summaryrefslogtreecommitdiff
path: root/arch/blackfin/mach-common
diff options
context:
space:
mode:
Diffstat (limited to 'arch/blackfin/mach-common')
-rw-r--r--arch/blackfin/mach-common/arch_checks.c2
-rw-r--r--arch/blackfin/mach-common/cache.S38
-rw-r--r--arch/blackfin/mach-common/cpufreq.c8
-rw-r--r--arch/blackfin/mach-common/dpmc.c50
-rw-r--r--arch/blackfin/mach-common/entry.S19
-rw-r--r--arch/blackfin/mach-common/head.S112
-rw-r--r--arch/blackfin/mach-common/interrupt.S6
-rw-r--r--arch/blackfin/mach-common/ints-priority.c271
-rw-r--r--arch/blackfin/mach-common/smp.c35
9 files changed, 333 insertions, 208 deletions
diff --git a/arch/blackfin/mach-common/arch_checks.c b/arch/blackfin/mach-common/arch_checks.c
index bceb98126c21..d8643fdd0fcf 100644
--- a/arch/blackfin/mach-common/arch_checks.c
+++ b/arch/blackfin/mach-common/arch_checks.c
@@ -61,6 +61,6 @@
# error "Anomaly 05000220 does not allow you to use Write Back cache with L2 or External Memory"
#endif
-#if ANOMALY_05000491 && !defined(CONFIG_CACHE_FLUSH_L1)
+#if ANOMALY_05000491 && !defined(CONFIG_ICACHE_FLUSH_L1)
# error You need IFLUSH in L1 inst while Anomaly 05000491 applies
#endif
diff --git a/arch/blackfin/mach-common/cache.S b/arch/blackfin/mach-common/cache.S
index ab4a925a443e..9f4dd35bfd74 100644
--- a/arch/blackfin/mach-common/cache.S
+++ b/arch/blackfin/mach-common/cache.S
@@ -11,12 +11,6 @@
#include <asm/cache.h>
#include <asm/page.h>
-#ifdef CONFIG_CACHE_FLUSH_L1
-.section .l1.text
-#else
-.text
-#endif
-
/* 05000443 - IFLUSH cannot be last instruction in hardware loop */
#if ANOMALY_05000443
# define BROK_FLUSH_INST "IFLUSH"
@@ -68,11 +62,43 @@
RTS;
.endm
+#ifdef CONFIG_ICACHE_FLUSH_L1
+.section .l1.text
+#else
+.text
+#endif
+
/* Invalidate all instruction cache lines assocoiated with this memory area */
+#ifdef CONFIG_SMP
+# define _blackfin_icache_flush_range _blackfin_icache_flush_range_l1
+#endif
ENTRY(_blackfin_icache_flush_range)
do_flush IFLUSH
ENDPROC(_blackfin_icache_flush_range)
+#ifdef CONFIG_SMP
+.text
+# undef _blackfin_icache_flush_range
+ENTRY(_blackfin_icache_flush_range)
+ p0.L = LO(DSPID);
+ p0.H = HI(DSPID);
+ r3 = [p0];
+ r3 = r3.b (z);
+ p2 = r3;
+ p0.L = _blackfin_iflush_l1_entry;
+ p0.H = _blackfin_iflush_l1_entry;
+ p0 = p0 + (p2 << 2);
+ p1 = [p0];
+ jump (p1);
+ENDPROC(_blackfin_icache_flush_range)
+#endif
+
+#ifdef CONFIG_DCACHE_FLUSH_L1
+.section .l1.text
+#else
+.text
+#endif
+
/* Throw away all D-cached data in specified region without any obligation to
* write them back. Since the Blackfin ISA does not have an "invalidate"
* instruction, we use flush/invalidate. Perhaps as a speed optimization we
diff --git a/arch/blackfin/mach-common/cpufreq.c b/arch/blackfin/mach-common/cpufreq.c
index f4cf11d362e1..85dc6d69f9c0 100644
--- a/arch/blackfin/mach-common/cpufreq.c
+++ b/arch/blackfin/mach-common/cpufreq.c
@@ -1,7 +1,7 @@
/*
* Blackfin core clock scaling
*
- * Copyright 2008-2009 Analog Devices Inc.
+ * Copyright 2008-2011 Analog Devices Inc.
*
* Licensed under the GPL-2 or later.
*/
@@ -16,10 +16,8 @@
#include <asm/time.h>
#include <asm/dpmc.h>
-#define CPUFREQ_CPU 0
-
/* this is the table of CCLK frequencies, in Hz */
-/* .index is the entry in the auxillary dpm_state_table[] */
+/* .index is the entry in the auxiliary dpm_state_table[] */
static struct cpufreq_frequency_table bfin_freq_table[] = {
{
.frequency = CPUFREQ_TABLE_END,
@@ -46,7 +44,7 @@ static struct bfin_dpm_state {
#if defined(CONFIG_CYCLES_CLOCKSOURCE)
/*
- * normalized to maximum frequncy offset for CYCLES,
+ * normalized to maximum frequency offset for CYCLES,
* used in time-ts cycles clock source, but could be used
* somewhere also.
*/
diff --git a/arch/blackfin/mach-common/dpmc.c b/arch/blackfin/mach-common/dpmc.c
index 02c7efd1bcf4..382099fd5561 100644
--- a/arch/blackfin/mach-common/dpmc.c
+++ b/arch/blackfin/mach-common/dpmc.c
@@ -61,17 +61,63 @@ err_out:
}
#ifdef CONFIG_CPU_FREQ
+# ifdef CONFIG_SMP
+static void bfin_idle_this_cpu(void *info)
+{
+ unsigned long flags = 0;
+ unsigned long iwr0, iwr1, iwr2;
+ unsigned int cpu = smp_processor_id();
+
+ local_irq_save_hw(flags);
+ bfin_iwr_set_sup0(&iwr0, &iwr1, &iwr2);
+
+ platform_clear_ipi(cpu, IRQ_SUPPLE_0);
+ SSYNC();
+ asm("IDLE;");
+ bfin_iwr_restore(iwr0, iwr1, iwr2);
+
+ local_irq_restore_hw(flags);
+}
+
+static void bfin_idle_cpu(void)
+{
+ smp_call_function(bfin_idle_this_cpu, NULL, 0);
+}
+
+static void bfin_wakeup_cpu(void)
+{
+ unsigned int cpu;
+ unsigned int this_cpu = smp_processor_id();
+ cpumask_t mask = cpu_online_map;
+
+ cpu_clear(this_cpu, mask);
+ for_each_cpu_mask(cpu, mask)
+ platform_send_ipi_cpu(cpu, IRQ_SUPPLE_0);
+}
+
+# else
+static void bfin_idle_cpu(void) {}
+static void bfin_wakeup_cpu(void) {}
+# endif
+
static int
vreg_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data)
{
struct cpufreq_freqs *freq = data;
+ if (freq->cpu != CPUFREQ_CPU)
+ return 0;
+
if (val == CPUFREQ_PRECHANGE && freq->old < freq->new) {
+ bfin_idle_cpu();
bfin_set_vlev(bfin_get_vlev(freq->new));
udelay(pdata->vr_settling_time); /* Wait until Volatge settled */
-
- } else if (val == CPUFREQ_POSTCHANGE && freq->old > freq->new)
+ bfin_wakeup_cpu();
+ } else if (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) {
+ bfin_idle_cpu();
bfin_set_vlev(bfin_get_vlev(freq->new));
+ bfin_wakeup_cpu();
+ }
return 0;
}
diff --git a/arch/blackfin/mach-common/entry.S b/arch/blackfin/mach-common/entry.S
index bc08c98d008d..f96933f48a7f 100644
--- a/arch/blackfin/mach-common/entry.S
+++ b/arch/blackfin/mach-common/entry.S
@@ -268,7 +268,7 @@ ENTRY(_handle_bad_cplb)
/* To get here, we just tried and failed to change a CPLB
* so, handle things in trap_c (C code), by lowering to
* IRQ5, just like we normally do. Since this is not a
- * "normal" return path, we have a do alot of stuff to
+ * "normal" return path, we have a do a lot of stuff to
* the stack to get ready so, we can fall through - we
* need to make a CPLB exception look like a normal exception
*/
@@ -817,7 +817,7 @@ _new_old_task:
rets = [sp++];
/*
- * When we come out of resume, r0 carries "old" task, becuase we are
+ * When we come out of resume, r0 carries "old" task, because we are
* in "new" task.
*/
rts;
@@ -952,8 +952,17 @@ ENDPROC(_evt_up_evt14)
#ifdef CONFIG_IPIPE
_resume_kernel_from_int:
+ r1 = LO(~0x8000) (Z);
+ r1 = r0 & r1;
+ r0 = 1;
+ r0 = r1 - r0;
+ r2 = r1 & r0;
+ cc = r2 == 0;
+ /* Sync the root stage only from the outer interrupt level. */
+ if !cc jump .Lnosync;
r0.l = ___ipipe_sync_root;
r0.h = ___ipipe_sync_root;
+ [--sp] = reti;
[--sp] = rets;
[--sp] = ( r7:4, p5:3 );
SP += -12;
@@ -961,6 +970,8 @@ _resume_kernel_from_int:
SP += 12;
( r7:4, p5:3 ) = [sp++];
rets = [sp++];
+ reti = [sp++];
+.Lnosync:
rts
#elif defined(CONFIG_PREEMPT)
@@ -1738,6 +1749,10 @@ ENTRY(_sys_call_table)
.long _sys_fanotify_mark
.long _sys_prlimit64
.long _sys_cacheflush
+ .long _sys_name_to_handle_at /* 375 */
+ .long _sys_open_by_handle_at
+ .long _sys_clock_adjtime
+ .long _sys_syncfs
.rept NR_syscalls-(.-_sys_call_table)/4
.long _sys_ni_syscall
diff --git a/arch/blackfin/mach-common/head.S b/arch/blackfin/mach-common/head.S
index 4391621d9048..76de5724c1e3 100644
--- a/arch/blackfin/mach-common/head.S
+++ b/arch/blackfin/mach-common/head.S
@@ -31,6 +31,7 @@ ENDPROC(__init_clear_bss)
ENTRY(__start)
/* R0: argument of command line string, passed from uboot, save it */
R7 = R0;
+
/* Enable Cycle Counter and Nesting Of Interrupts */
#ifdef CONFIG_BFIN_SCRATCH_REG_CYCLES
R0 = SYSCFG_SNEN;
@@ -38,76 +39,49 @@ ENTRY(__start)
R0 = SYSCFG_SNEN | SYSCFG_CCEN;
#endif
SYSCFG = R0;
- R0 = 0;
-
- /* Clear Out All the data and pointer Registers */
- R1 = R0;
- R2 = R0;
- R3 = R0;
- R4 = R0;
- R5 = R0;
- R6 = R0;
-
- P0 = R0;
- P1 = R0;
- P2 = R0;
- P3 = R0;
- P4 = R0;
- P5 = R0;
-
- LC0 = r0;
- LC1 = r0;
- L0 = r0;
- L1 = r0;
- L2 = r0;
- L3 = r0;
-
- /* Clear Out All the DAG Registers */
- B0 = r0;
- B1 = r0;
- B2 = r0;
- B3 = r0;
-
- I0 = r0;
- I1 = r0;
- I2 = r0;
- I3 = r0;
-
- M0 = r0;
- M1 = r0;
- M2 = r0;
- M3 = r0;
+
+ /* Optimization register tricks: keep a base value in the
+ * reserved P registers so we use the load/store with an
+ * offset syntax. R0 = [P5 + <constant>];
+ * P5 - core MMR base
+ * R6 - 0
+ */
+ r6 = 0;
+ p5.l = 0;
+ p5.h = hi(COREMMR_BASE);
+
+ /* Zero out registers required by Blackfin ABI */
+
+ /* Disable circular buffers */
+ L0 = r6;
+ L1 = r6;
+ L2 = r6;
+ L3 = r6;
+
+ /* Disable hardware loops in case we were started by 'go' */
+ LC0 = r6;
+ LC1 = r6;
/*
* Clear ITEST_COMMAND and DTEST_COMMAND registers,
* Leaving these as non-zero can confuse the emulator
*/
- p0.L = LO(DTEST_COMMAND);
- p0.H = HI(DTEST_COMMAND);
- [p0] = R0;
- [p0 + (ITEST_COMMAND - DTEST_COMMAND)] = R0;
+ [p5 + (DTEST_COMMAND - COREMMR_BASE)] = r6;
+ [p5 + (ITEST_COMMAND - COREMMR_BASE)] = r6;
CSYNC;
trace_buffer_init(p0,r0);
- P0 = R1;
- R0 = R1;
/* Turn off the icache */
- p0.l = LO(IMEM_CONTROL);
- p0.h = HI(IMEM_CONTROL);
- R1 = [p0];
- R0 = ~ENICPLB;
- R0 = R0 & R1;
- [p0] = R0;
+ r1 = [p5 + (IMEM_CONTROL - COREMMR_BASE)];
+ BITCLR (r1, ENICPLB_P);
+ [p5 + (IMEM_CONTROL - COREMMR_BASE)] = r1;
SSYNC;
/* Turn off the dcache */
- p0.l = LO(DMEM_CONTROL);
- p0.h = HI(DMEM_CONTROL);
- R1 = [p0];
- R0 = ~ENDCPLB;
- R0 = R0 & R1;
- [p0] = R0;
+ r1 = [p5 + (DMEM_CONTROL - COREMMR_BASE)];
+ BITCLR (r1, ENDCPLB_P);
+ [p5 + (DMEM_CONTROL - COREMMR_BASE)] = r1;
SSYNC;
/* in case of double faults, save a few things */
@@ -122,25 +96,25 @@ ENTRY(__start)
* below
*/
GET_PDA(p0, r0);
- r6 = [p0 + PDA_DF_RETX];
+ r5 = [p0 + PDA_DF_RETX];
p1.l = _init_saved_retx;
p1.h = _init_saved_retx;
- [p1] = r6;
+ [p1] = r5;
- r6 = [p0 + PDA_DF_DCPLB];
+ r5 = [p0 + PDA_DF_DCPLB];
p1.l = _init_saved_dcplb_fault_addr;
p1.h = _init_saved_dcplb_fault_addr;
- [p1] = r6;
+ [p1] = r5;
- r6 = [p0 + PDA_DF_ICPLB];
+ r5 = [p0 + PDA_DF_ICPLB];
p1.l = _init_saved_icplb_fault_addr;
p1.h = _init_saved_icplb_fault_addr;
- [p1] = r6;
+ [p1] = r5;
- r6 = [p0 + PDA_DF_SEQSTAT];
+ r5 = [p0 + PDA_DF_SEQSTAT];
p1.l = _init_saved_seqstat;
p1.h = _init_saved_seqstat;
- [p1] = r6;
+ [p1] = r5;
#endif
/* Initialize stack pointer */
@@ -155,7 +129,7 @@ ENTRY(__start)
sti r0;
#endif
- r0 = 0 (x);
+ r0 = r6;
/* Zero out all of the fun bss regions */
#if L1_DATA_A_LENGTH > 0
r1.l = __sbss_l1;
@@ -200,7 +174,7 @@ ENTRY(__start)
sp.l = lo(KERNEL_CLOCK_STACK);
sp.h = hi(KERNEL_CLOCK_STACK);
call _init_clocks;
- sp = usp; /* usp hasnt been touched, so restore from there */
+ sp = usp; /* usp hasn't been touched, so restore from there */
#endif
/* This section keeps the processor in supervisor mode
@@ -210,11 +184,9 @@ ENTRY(__start)
/* EVT15 = _real_start */
- p0.l = lo(EVT15);
- p0.h = hi(EVT15);
p1.l = _real_start;
p1.h = _real_start;
- [p0] = p1;
+ [p5 + (EVT15 - COREMMR_BASE)] = p1;
csync;
#ifdef CONFIG_EARLY_PRINTK
diff --git a/arch/blackfin/mach-common/interrupt.S b/arch/blackfin/mach-common/interrupt.S
index 2df37db3b49b..469ce7282dc8 100644
--- a/arch/blackfin/mach-common/interrupt.S
+++ b/arch/blackfin/mach-common/interrupt.S
@@ -274,16 +274,16 @@ ENDPROC(_evt_system_call)
* level to EVT14 to prepare the caller for a normal interrupt
* return through RTI.
*
- * We currently use this facility in two occasions:
+ * We currently use this feature in two occasions:
*
- * - to branch to __ipipe_irq_tail_hook as requested by a high
+ * - before branching to __ipipe_irq_tail_hook as requested by a high
* priority domain after the pipeline delivered an interrupt,
* e.g. such as Xenomai, in order to start its rescheduling
* procedure, since we may not switch tasks when IRQ levels are
* nested on the Blackfin, so we have to fake an interrupt return
* so that we may reschedule immediately.
*
- * - to branch to sync_root_irqs, in order to play any interrupt
+ * - before branching to __ipipe_sync_root(), in order to play any interrupt
* pending for the root domain (i.e. the Linux kernel). This lowers
* the core priority level enough so that Linux IRQ handlers may
* never delay interrupts handled by high priority domains; we defer
diff --git a/arch/blackfin/mach-common/ints-priority.c b/arch/blackfin/mach-common/ints-priority.c
index a604f19d8dc3..43d9fb195c1e 100644
--- a/arch/blackfin/mach-common/ints-priority.c
+++ b/arch/blackfin/mach-common/ints-priority.c
@@ -15,6 +15,7 @@
#include <linux/kernel_stat.h>
#include <linux/seq_file.h>
#include <linux/irq.h>
+#include <linux/sched.h>
#ifdef CONFIG_IPIPE
#include <linux/ipipe.h>
#endif
@@ -124,21 +125,21 @@ static void __init search_IAR(void)
* This is for core internal IRQs
*/
-static void bfin_ack_noop(unsigned int irq)
+static void bfin_ack_noop(struct irq_data *d)
{
/* Dummy function. */
}
-static void bfin_core_mask_irq(unsigned int irq)
+static void bfin_core_mask_irq(struct irq_data *d)
{
- bfin_irq_flags &= ~(1 << irq);
+ bfin_irq_flags &= ~(1 << d->irq);
if (!hard_irqs_disabled())
hard_local_irq_enable();
}
-static void bfin_core_unmask_irq(unsigned int irq)
+static void bfin_core_unmask_irq(struct irq_data *d)
{
- bfin_irq_flags |= 1 << irq;
+ bfin_irq_flags |= 1 << d->irq;
/*
* If interrupts are enabled, IMASK must contain the same value
* as bfin_irq_flags. Make sure that invariant holds. If interrupts
@@ -176,6 +177,11 @@ static void bfin_internal_mask_irq(unsigned int irq)
hard_local_irq_restore(flags);
}
+static void bfin_internal_mask_irq_chip(struct irq_data *d)
+{
+ bfin_internal_mask_irq(d->irq);
+}
+
#ifdef CONFIG_SMP
static void bfin_internal_unmask_irq_affinity(unsigned int irq,
const struct cpumask *affinity)
@@ -211,19 +217,24 @@ static void bfin_internal_unmask_irq(unsigned int irq)
}
#ifdef CONFIG_SMP
-static void bfin_internal_unmask_irq(unsigned int irq)
+static void bfin_internal_unmask_irq_chip(struct irq_data *d)
{
- struct irq_desc *desc = irq_to_desc(irq);
- bfin_internal_unmask_irq_affinity(irq, desc->affinity);
+ bfin_internal_unmask_irq_affinity(d->irq, d->affinity);
}
-static int bfin_internal_set_affinity(unsigned int irq, const struct cpumask *mask)
+static int bfin_internal_set_affinity(struct irq_data *d,
+ const struct cpumask *mask, bool force)
{
- bfin_internal_mask_irq(irq);
- bfin_internal_unmask_irq_affinity(irq, mask);
+ bfin_internal_mask_irq(d->irq);
+ bfin_internal_unmask_irq_affinity(d->irq, mask);
return 0;
}
+#else
+static void bfin_internal_unmask_irq_chip(struct irq_data *d)
+{
+ bfin_internal_unmask_irq(d->irq);
+}
#endif
#ifdef CONFIG_PM
@@ -279,28 +290,33 @@ int bfin_internal_set_wake(unsigned int irq, unsigned int state)
return 0;
}
+
+static int bfin_internal_set_wake_chip(struct irq_data *d, unsigned int state)
+{
+ return bfin_internal_set_wake(d->irq, state);
+}
#endif
static struct irq_chip bfin_core_irqchip = {
.name = "CORE",
- .ack = bfin_ack_noop,
- .mask = bfin_core_mask_irq,
- .unmask = bfin_core_unmask_irq,
+ .irq_ack = bfin_ack_noop,
+ .irq_mask = bfin_core_mask_irq,
+ .irq_unmask = bfin_core_unmask_irq,
};
static struct irq_chip bfin_internal_irqchip = {
.name = "INTN",
- .ack = bfin_ack_noop,
- .mask = bfin_internal_mask_irq,
- .unmask = bfin_internal_unmask_irq,
- .mask_ack = bfin_internal_mask_irq,
- .disable = bfin_internal_mask_irq,
- .enable = bfin_internal_unmask_irq,
+ .irq_ack = bfin_ack_noop,
+ .irq_mask = bfin_internal_mask_irq_chip,
+ .irq_unmask = bfin_internal_unmask_irq_chip,
+ .irq_mask_ack = bfin_internal_mask_irq_chip,
+ .irq_disable = bfin_internal_mask_irq_chip,
+ .irq_enable = bfin_internal_unmask_irq_chip,
#ifdef CONFIG_SMP
- .set_affinity = bfin_internal_set_affinity,
+ .irq_set_affinity = bfin_internal_set_affinity,
#endif
#ifdef CONFIG_PM
- .set_wake = bfin_internal_set_wake,
+ .irq_set_wake = bfin_internal_set_wake_chip,
#endif
};
@@ -312,33 +328,32 @@ static void bfin_handle_irq(unsigned irq)
__ipipe_handle_irq(irq, &regs);
ipipe_trace_irq_exit(irq);
#else /* !CONFIG_IPIPE */
- struct irq_desc *desc = irq_desc + irq;
- desc->handle_irq(irq, desc);
+ generic_handle_irq(irq);
#endif /* !CONFIG_IPIPE */
}
#ifdef BF537_GENERIC_ERROR_INT_DEMUX
static int error_int_mask;
-static void bfin_generic_error_mask_irq(unsigned int irq)
+static void bfin_generic_error_mask_irq(struct irq_data *d)
{
- error_int_mask &= ~(1L << (irq - IRQ_PPI_ERROR));
+ error_int_mask &= ~(1L << (d->irq - IRQ_PPI_ERROR));
if (!error_int_mask)
bfin_internal_mask_irq(IRQ_GENERIC_ERROR);
}
-static void bfin_generic_error_unmask_irq(unsigned int irq)
+static void bfin_generic_error_unmask_irq(struct irq_data *d)
{
bfin_internal_unmask_irq(IRQ_GENERIC_ERROR);
- error_int_mask |= 1L << (irq - IRQ_PPI_ERROR);
+ error_int_mask |= 1L << (d->irq - IRQ_PPI_ERROR);
}
static struct irq_chip bfin_generic_error_irqchip = {
.name = "ERROR",
- .ack = bfin_ack_noop,
- .mask_ack = bfin_generic_error_mask_irq,
- .mask = bfin_generic_error_mask_irq,
- .unmask = bfin_generic_error_unmask_irq,
+ .irq_ack = bfin_ack_noop,
+ .irq_mask_ack = bfin_generic_error_mask_irq,
+ .irq_mask = bfin_generic_error_mask_irq,
+ .irq_unmask = bfin_generic_error_unmask_irq,
};
static void bfin_demux_error_irq(unsigned int int_err_irq,
@@ -448,8 +463,10 @@ static void bfin_mac_status_ack_irq(unsigned int irq)
}
}
-static void bfin_mac_status_mask_irq(unsigned int irq)
+static void bfin_mac_status_mask_irq(struct irq_data *d)
{
+ unsigned int irq = d->irq;
+
mac_stat_int_mask &= ~(1L << (irq - IRQ_MAC_PHYINT));
#ifdef BF537_GENERIC_ERROR_INT_DEMUX
switch (irq) {
@@ -466,8 +483,10 @@ static void bfin_mac_status_mask_irq(unsigned int irq)
bfin_mac_status_ack_irq(irq);
}
-static void bfin_mac_status_unmask_irq(unsigned int irq)
+static void bfin_mac_status_unmask_irq(struct irq_data *d)
{
+ unsigned int irq = d->irq;
+
#ifdef BF537_GENERIC_ERROR_INT_DEMUX
switch (irq) {
case IRQ_MAC_PHYINT:
@@ -484,7 +503,7 @@ static void bfin_mac_status_unmask_irq(unsigned int irq)
}
#ifdef CONFIG_PM
-int bfin_mac_status_set_wake(unsigned int irq, unsigned int state)
+int bfin_mac_status_set_wake(struct irq_data *d, unsigned int state)
{
#ifdef BF537_GENERIC_ERROR_INT_DEMUX
return bfin_internal_set_wake(IRQ_GENERIC_ERROR, state);
@@ -496,12 +515,12 @@ int bfin_mac_status_set_wake(unsigned int irq, unsigned int state)
static struct irq_chip bfin_mac_status_irqchip = {
.name = "MACST",
- .ack = bfin_ack_noop,
- .mask_ack = bfin_mac_status_mask_irq,
- .mask = bfin_mac_status_mask_irq,
- .unmask = bfin_mac_status_unmask_irq,
+ .irq_ack = bfin_ack_noop,
+ .irq_mask_ack = bfin_mac_status_mask_irq,
+ .irq_mask = bfin_mac_status_mask_irq,
+ .irq_unmask = bfin_mac_status_unmask_irq,
#ifdef CONFIG_PM
- .set_wake = bfin_mac_status_set_wake,
+ .irq_set_wake = bfin_mac_status_set_wake,
#endif
};
@@ -538,13 +557,9 @@ static void bfin_demux_mac_status_irq(unsigned int int_err_irq,
static inline void bfin_set_irq_handler(unsigned irq, irq_flow_handler_t handle)
{
#ifdef CONFIG_IPIPE
- _set_irq_handler(irq, handle_level_irq);
-#else
- struct irq_desc *desc = irq_desc + irq;
- /* May not call generic set_irq_handler() due to spinlock
- recursion. */
- desc->handle_irq = handle;
+ handle = handle_level_irq;
#endif
+ __irq_set_handler_locked(irq, handle);
}
static DECLARE_BITMAP(gpio_enabled, MAX_BLACKFIN_GPIOS);
@@ -552,58 +567,59 @@ extern void bfin_gpio_irq_prepare(unsigned gpio);
#if !defined(CONFIG_BF54x)
-static void bfin_gpio_ack_irq(unsigned int irq)
+static void bfin_gpio_ack_irq(struct irq_data *d)
{
/* AFAIK ack_irq in case mask_ack is provided
* get's only called for edge sense irqs
*/
- set_gpio_data(irq_to_gpio(irq), 0);
+ set_gpio_data(irq_to_gpio(d->irq), 0);
}
-static void bfin_gpio_mask_ack_irq(unsigned int irq)
+static void bfin_gpio_mask_ack_irq(struct irq_data *d)
{
- struct irq_desc *desc = irq_desc + irq;
+ unsigned int irq = d->irq;
u32 gpionr = irq_to_gpio(irq);
- if (desc->handle_irq == handle_edge_irq)
+ if (!irqd_is_level_type(d))
set_gpio_data(gpionr, 0);
set_gpio_maska(gpionr, 0);
}
-static void bfin_gpio_mask_irq(unsigned int irq)
+static void bfin_gpio_mask_irq(struct irq_data *d)
{
- set_gpio_maska(irq_to_gpio(irq), 0);
+ set_gpio_maska(irq_to_gpio(d->irq), 0);
}
-static void bfin_gpio_unmask_irq(unsigned int irq)
+static void bfin_gpio_unmask_irq(struct irq_data *d)
{
- set_gpio_maska(irq_to_gpio(irq), 1);
+ set_gpio_maska(irq_to_gpio(d->irq), 1);
}
-static unsigned int bfin_gpio_irq_startup(unsigned int irq)
+static unsigned int bfin_gpio_irq_startup(struct irq_data *d)
{
- u32 gpionr = irq_to_gpio(irq);
+ u32 gpionr = irq_to_gpio(d->irq);
if (__test_and_set_bit(gpionr, gpio_enabled))
bfin_gpio_irq_prepare(gpionr);
- bfin_gpio_unmask_irq(irq);
+ bfin_gpio_unmask_irq(d);
return 0;
}
-static void bfin_gpio_irq_shutdown(unsigned int irq)
+static void bfin_gpio_irq_shutdown(struct irq_data *d)
{
- u32 gpionr = irq_to_gpio(irq);
+ u32 gpionr = irq_to_gpio(d->irq);
- bfin_gpio_mask_irq(irq);
+ bfin_gpio_mask_irq(d);
__clear_bit(gpionr, gpio_enabled);
bfin_gpio_irq_free(gpionr);
}
-static int bfin_gpio_irq_type(unsigned int irq, unsigned int type)
+static int bfin_gpio_irq_type(struct irq_data *d, unsigned int type)
{
+ unsigned int irq = d->irq;
int ret;
char buf[16];
u32 gpionr = irq_to_gpio(irq);
@@ -664,9 +680,9 @@ static int bfin_gpio_irq_type(unsigned int irq, unsigned int type)
}
#ifdef CONFIG_PM
-int bfin_gpio_set_wake(unsigned int irq, unsigned int state)
+int bfin_gpio_set_wake(struct irq_data *d, unsigned int state)
{
- return gpio_pm_wakeup_ctrl(irq_to_gpio(irq), state);
+ return gpio_pm_wakeup_ctrl(irq_to_gpio(d->irq), state);
}
#endif
@@ -818,14 +834,13 @@ void init_pint_lut(void)
}
}
-static void bfin_gpio_ack_irq(unsigned int irq)
+static void bfin_gpio_ack_irq(struct irq_data *d)
{
- struct irq_desc *desc = irq_desc + irq;
- u32 pint_val = irq2pint_lut[irq - SYS_IRQS];
+ u32 pint_val = irq2pint_lut[d->irq - SYS_IRQS];
u32 pintbit = PINT_BIT(pint_val);
u32 bank = PINT_2_BANK(pint_val);
- if ((desc->status & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) {
+ if (irqd_get_trigger_type(d) == IRQ_TYPE_EDGE_BOTH) {
if (pint[bank]->invert_set & pintbit)
pint[bank]->invert_clear = pintbit;
else
@@ -835,14 +850,13 @@ static void bfin_gpio_ack_irq(unsigned int irq)
}
-static void bfin_gpio_mask_ack_irq(unsigned int irq)
+static void bfin_gpio_mask_ack_irq(struct irq_data *d)
{
- struct irq_desc *desc = irq_desc + irq;
- u32 pint_val = irq2pint_lut[irq - SYS_IRQS];
+ u32 pint_val = irq2pint_lut[d->irq - SYS_IRQS];
u32 pintbit = PINT_BIT(pint_val);
u32 bank = PINT_2_BANK(pint_val);
- if ((desc->status & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) {
+ if (irqd_get_trigger_type(d) == IRQ_TYPE_EDGE_BOTH) {
if (pint[bank]->invert_set & pintbit)
pint[bank]->invert_clear = pintbit;
else
@@ -853,24 +867,25 @@ static void bfin_gpio_mask_ack_irq(unsigned int irq)
pint[bank]->mask_clear = pintbit;
}
-static void bfin_gpio_mask_irq(unsigned int irq)
+static void bfin_gpio_mask_irq(struct irq_data *d)
{
- u32 pint_val = irq2pint_lut[irq - SYS_IRQS];
+ u32 pint_val = irq2pint_lut[d->irq - SYS_IRQS];
pint[PINT_2_BANK(pint_val)]->mask_clear = PINT_BIT(pint_val);
}
-static void bfin_gpio_unmask_irq(unsigned int irq)
+static void bfin_gpio_unmask_irq(struct irq_data *d)
{
- u32 pint_val = irq2pint_lut[irq - SYS_IRQS];
+ u32 pint_val = irq2pint_lut[d->irq - SYS_IRQS];
u32 pintbit = PINT_BIT(pint_val);
u32 bank = PINT_2_BANK(pint_val);
pint[bank]->mask_set = pintbit;
}
-static unsigned int bfin_gpio_irq_startup(unsigned int irq)
+static unsigned int bfin_gpio_irq_startup(struct irq_data *d)
{
+ unsigned int irq = d->irq;
u32 gpionr = irq_to_gpio(irq);
u32 pint_val = irq2pint_lut[irq - SYS_IRQS];
@@ -884,22 +899,23 @@ static unsigned int bfin_gpio_irq_startup(unsigned int irq)
if (__test_and_set_bit(gpionr, gpio_enabled))
bfin_gpio_irq_prepare(gpionr);
- bfin_gpio_unmask_irq(irq);
+ bfin_gpio_unmask_irq(d);
return 0;
}
-static void bfin_gpio_irq_shutdown(unsigned int irq)
+static void bfin_gpio_irq_shutdown(struct irq_data *d)
{
- u32 gpionr = irq_to_gpio(irq);
+ u32 gpionr = irq_to_gpio(d->irq);
- bfin_gpio_mask_irq(irq);
+ bfin_gpio_mask_irq(d);
__clear_bit(gpionr, gpio_enabled);
bfin_gpio_irq_free(gpionr);
}
-static int bfin_gpio_irq_type(unsigned int irq, unsigned int type)
+static int bfin_gpio_irq_type(struct irq_data *d, unsigned int type)
{
+ unsigned int irq = d->irq;
int ret;
char buf[16];
u32 gpionr = irq_to_gpio(irq);
@@ -961,10 +977,10 @@ static int bfin_gpio_irq_type(unsigned int irq, unsigned int type)
u32 pint_saved_masks[NR_PINT_SYS_IRQS];
u32 pint_wakeup_masks[NR_PINT_SYS_IRQS];
-int bfin_gpio_set_wake(unsigned int irq, unsigned int state)
+int bfin_gpio_set_wake(struct irq_data *d, unsigned int state)
{
u32 pint_irq;
- u32 pint_val = irq2pint_lut[irq - SYS_IRQS];
+ u32 pint_val = irq2pint_lut[d->irq - SYS_IRQS];
u32 bank = PINT_2_BANK(pint_val);
u32 pintbit = PINT_BIT(pint_val);
@@ -1066,17 +1082,17 @@ static void bfin_demux_gpio_irq(unsigned int inta_irq,
static struct irq_chip bfin_gpio_irqchip = {
.name = "GPIO",
- .ack = bfin_gpio_ack_irq,
- .mask = bfin_gpio_mask_irq,
- .mask_ack = bfin_gpio_mask_ack_irq,
- .unmask = bfin_gpio_unmask_irq,
- .disable = bfin_gpio_mask_irq,
- .enable = bfin_gpio_unmask_irq,
- .set_type = bfin_gpio_irq_type,
- .startup = bfin_gpio_irq_startup,
- .shutdown = bfin_gpio_irq_shutdown,
+ .irq_ack = bfin_gpio_ack_irq,
+ .irq_mask = bfin_gpio_mask_irq,
+ .irq_mask_ack = bfin_gpio_mask_ack_irq,
+ .irq_unmask = bfin_gpio_unmask_irq,
+ .irq_disable = bfin_gpio_mask_irq,
+ .irq_enable = bfin_gpio_unmask_irq,
+ .irq_set_type = bfin_gpio_irq_type,
+ .irq_startup = bfin_gpio_irq_startup,
+ .irq_shutdown = bfin_gpio_irq_shutdown,
#ifdef CONFIG_PM
- .set_wake = bfin_gpio_set_wake,
+ .irq_set_wake = bfin_gpio_set_wake,
#endif
};
@@ -1147,9 +1163,9 @@ int __init init_arch_irq(void)
for (irq = 0; irq <= SYS_IRQS; irq++) {
if (irq <= IRQ_CORETMR)
- set_irq_chip(irq, &bfin_core_irqchip);
+ irq_set_chip(irq, &bfin_core_irqchip);
else
- set_irq_chip(irq, &bfin_internal_irqchip);
+ irq_set_chip(irq, &bfin_internal_irqchip);
switch (irq) {
#if defined(CONFIG_BF53x)
@@ -1173,50 +1189,50 @@ int __init init_arch_irq(void)
#elif defined(CONFIG_BF538) || defined(CONFIG_BF539)
case IRQ_PORTF_INTA:
#endif
- set_irq_chained_handler(irq,
- bfin_demux_gpio_irq);
+ irq_set_chained_handler(irq, bfin_demux_gpio_irq);
break;
#ifdef BF537_GENERIC_ERROR_INT_DEMUX
case IRQ_GENERIC_ERROR:
- set_irq_chained_handler(irq, bfin_demux_error_irq);
+ irq_set_chained_handler(irq, bfin_demux_error_irq);
break;
#endif
#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
case IRQ_MAC_ERROR:
- set_irq_chained_handler(irq, bfin_demux_mac_status_irq);
+ irq_set_chained_handler(irq,
+ bfin_demux_mac_status_irq);
break;
#endif
#ifdef CONFIG_SMP
case IRQ_SUPPLE_0:
case IRQ_SUPPLE_1:
- set_irq_handler(irq, handle_percpu_irq);
+ irq_set_handler(irq, handle_percpu_irq);
break;
#endif
#ifdef CONFIG_TICKSOURCE_CORETMR
case IRQ_CORETMR:
# ifdef CONFIG_SMP
- set_irq_handler(irq, handle_percpu_irq);
+ irq_set_handler(irq, handle_percpu_irq);
break;
# else
- set_irq_handler(irq, handle_simple_irq);
+ irq_set_handler(irq, handle_simple_irq);
break;
# endif
#endif
#ifdef CONFIG_TICKSOURCE_GPTMR0
case IRQ_TIMER0:
- set_irq_handler(irq, handle_simple_irq);
+ irq_set_handler(irq, handle_simple_irq);
break;
#endif
#ifdef CONFIG_IPIPE
default:
- set_irq_handler(irq, handle_level_irq);
+ irq_set_handler(irq, handle_level_irq);
break;
#else /* !CONFIG_IPIPE */
default:
- set_irq_handler(irq, handle_simple_irq);
+ irq_set_handler(irq, handle_simple_irq);
break;
#endif /* !CONFIG_IPIPE */
}
@@ -1224,22 +1240,22 @@ int __init init_arch_irq(void)
#ifdef BF537_GENERIC_ERROR_INT_DEMUX
for (irq = IRQ_PPI_ERROR; irq <= IRQ_UART1_ERROR; irq++)
- set_irq_chip_and_handler(irq, &bfin_generic_error_irqchip,
+ irq_set_chip_and_handler(irq, &bfin_generic_error_irqchip,
handle_level_irq);
#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
- set_irq_chained_handler(IRQ_MAC_ERROR, bfin_demux_mac_status_irq);
+ irq_set_chained_handler(IRQ_MAC_ERROR, bfin_demux_mac_status_irq);
#endif
#endif
#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
for (irq = IRQ_MAC_PHYINT; irq <= IRQ_MAC_STMDONE; irq++)
- set_irq_chip_and_handler(irq, &bfin_mac_status_irqchip,
+ irq_set_chip_and_handler(irq, &bfin_mac_status_irqchip,
handle_level_irq);
#endif
/* if configured as edge, then will be changed to do_edge_IRQ */
for (irq = GPIO_IRQ_BASE;
irq < (GPIO_IRQ_BASE + MAX_BLACKFIN_GPIOS); irq++)
- set_irq_chip_and_handler(irq, &bfin_gpio_irqchip,
+ irq_set_chip_and_handler(irq, &bfin_gpio_irqchip,
handle_level_irq);
bfin_write_IMASK(0);
@@ -1373,7 +1389,7 @@ asmlinkage int __ipipe_grab_irq(int vec, struct pt_regs *regs)
struct ipipe_domain *this_domain = __ipipe_current_domain;
struct ivgx *ivg_stop = ivg7_13[vec-IVG7].istop;
struct ivgx *ivg = ivg7_13[vec-IVG7].ifirst;
- int irq, s;
+ int irq, s = 0;
if (likely(vec == EVT_IVTMR_P))
irq = IRQ_CORETMR;
@@ -1423,6 +1439,21 @@ asmlinkage int __ipipe_grab_irq(int vec, struct pt_regs *regs)
__raw_get_cpu_var(__ipipe_tick_regs).ipend |= 0x10;
}
+ /*
+ * We don't want Linux interrupt handlers to run at the
+ * current core priority level (i.e. < EVT15), since this
+ * might delay other interrupts handled by a high priority
+ * domain. Here is what we do instead:
+ *
+ * - we raise the SYNCDEFER bit to prevent
+ * __ipipe_handle_irq() to sync the pipeline for the root
+ * stage for the incoming interrupt. Upon return, that IRQ is
+ * pending in the interrupt log.
+ *
+ * - we raise the TIF_IRQ_SYNC bit for the current thread, so
+ * that _schedule_and_signal_from_int will eventually sync the
+ * pipeline from EVT15.
+ */
if (this_domain == ipipe_root_domain) {
s = __test_and_set_bit(IPIPE_SYNCDEFER_FLAG, &p->status);
barrier();
@@ -1432,6 +1463,24 @@ asmlinkage int __ipipe_grab_irq(int vec, struct pt_regs *regs)
__ipipe_handle_irq(irq, regs);
ipipe_trace_irq_exit(irq);
+ if (user_mode(regs) &&
+ !ipipe_test_foreign_stack() &&
+ (current->ipipe_flags & PF_EVTRET) != 0) {
+ /*
+ * Testing for user_regs() does NOT fully eliminate
+ * foreign stack contexts, because of the forged
+ * interrupt returns we do through
+ * __ipipe_call_irqtail. In that case, we might have
+ * preempted a foreign stack context in a high
+ * priority domain, with a single interrupt level now
+ * pending after the irqtail unwinding is done. In
+ * which case user_mode() is now true, and the event
+ * gets dispatched spuriously.
+ */
+ current->ipipe_flags &= ~PF_EVTRET;
+ __ipipe_dispatch_event(IPIPE_EVENT_RETURN, regs);
+ }
+
if (this_domain == ipipe_root_domain) {
set_thread_flag(TIF_IRQ_SYNC);
if (!s) {
diff --git a/arch/blackfin/mach-common/smp.c b/arch/blackfin/mach-common/smp.c
index 9f251406a76a..8bce5ed031e4 100644
--- a/arch/blackfin/mach-common/smp.c
+++ b/arch/blackfin/mach-common/smp.c
@@ -40,6 +40,10 @@
*/
struct corelock_slot corelock __attribute__ ((__section__(".l2.bss")));
+#ifdef CONFIG_ICACHE_FLUSH_L1
+unsigned long blackfin_iflush_l1_entry[NR_CPUS];
+#endif
+
void __cpuinitdata *init_retx_coreb, *init_saved_retx_coreb,
*init_saved_seqstat_coreb, *init_saved_icplb_fault_addr_coreb,
*init_saved_dcplb_fault_addr_coreb;
@@ -108,6 +112,19 @@ static void ipi_flush_icache(void *info)
blackfin_dcache_invalidate_range((unsigned long)fdata,
(unsigned long)fdata + sizeof(*fdata));
+ /* Make sure all write buffers in the data side of the core
+ * are flushed before trying to invalidate the icache. This
+ * needs to be after the data flush and before the icache
+ * flush so that the SSYNC does the right thing in preventing
+ * the instruction prefetcher from hitting things in cached
+ * memory at the wrong time -- it runs much further ahead than
+ * the pipeline.
+ */
+ SSYNC();
+
+ /* ipi_flaush_icache is invoked by generic flush_icache_range,
+ * so call blackfin arch icache flush directly here.
+ */
blackfin_icache_flush_range(fdata->start, fdata->end);
}
@@ -244,12 +261,13 @@ int smp_call_function(void (*func)(void *info), void *info, int wait)
{
cpumask_t callmap;
+ preempt_disable();
callmap = cpu_online_map;
cpu_clear(smp_processor_id(), callmap);
- if (cpus_empty(callmap))
- return 0;
+ if (!cpus_empty(callmap))
+ smp_send_message(callmap, BFIN_IPI_CALL_FUNC, func, info, wait);
- smp_send_message(callmap, BFIN_IPI_CALL_FUNC, func, info, wait);
+ preempt_enable();
return 0;
}
@@ -286,12 +304,13 @@ void smp_send_stop(void)
{
cpumask_t callmap;
+ preempt_disable();
callmap = cpu_online_map;
cpu_clear(smp_processor_id(), callmap);
- if (cpus_empty(callmap))
- return;
+ if (!cpus_empty(callmap))
+ smp_send_message(callmap, BFIN_IPI_CPU_STOP, NULL, NULL, 0);
- smp_send_message(callmap, BFIN_IPI_CPU_STOP, NULL, NULL, 0);
+ preempt_enable();
return;
}
@@ -361,8 +380,6 @@ void __cpuinit secondary_start_kernel(void)
*/
init_exception_vectors();
- bfin_setup_caches(cpu);
-
local_irq_disable();
/* Attach the new idle task to the global mm. */
@@ -381,6 +398,8 @@ void __cpuinit secondary_start_kernel(void)
local_irq_enable();
+ bfin_setup_caches(cpu);
+
/*
* Calibrate loops per jiffy value.
* IRQs need to be enabled here - D-cache can be invalidated