From 7e66cbc93f876e24c2f8434cf011b94be242005e Mon Sep 17 00:00:00 2001 From: Konstantin Khlebnikov Date: Thu, 28 Aug 2014 12:49:36 +0100 Subject: ARM: 8132/1: LPAE: drop wrong carry flag correction after adding TTBR1_OFFSET ARM: LPAE: drop wrong carry flag correction after adding TTBR1_OFFSET In commit 7fb00c2fca4b6c58be521eb3676cf4b4ba8dde3b ("ARM: 8114/1: LPAE: load upper bits of early TTBR0/TTBR1") part which fixes carrying in adding TTBR1_OFFSET to TTRR1 was wrong: addls ttbr1, ttbr1, #TTBR1_OFFSET adcls tmp, tmp, #0 addls doesn't update flags, adcls adds carry from cmp above: cmp ttbr1, tmp @ PHYS_OFFSET > PAGE_OFFSET? Condition 'ls' means carry flag is clear or zero flag is set, thus only one case is affected: when PHYS_OFFSET == PAGE_OFFSET. It seems safer to remove this fixup. Bug is here for ages and nobody complained. Let's fix it separately. Reported-and-Tested-by: Jassi Brar Signed-off-by: Konstantin Khlebnikov Signed-off-by: Russell King --- arch/arm/mm/proc-v7-3level.S | 1 - 1 file changed, 1 deletion(-) diff --git a/arch/arm/mm/proc-v7-3level.S b/arch/arm/mm/proc-v7-3level.S index 1a24e9232ec8..b64e67c7f176 100644 --- a/arch/arm/mm/proc-v7-3level.S +++ b/arch/arm/mm/proc-v7-3level.S @@ -146,7 +146,6 @@ ENDPROC(cpu_v7_set_pte_ext) mov \tmp, \ttbr1, lsr #(32 - ARCH_PGD_SHIFT) @ upper bits mov \ttbr1, \ttbr1, lsl #ARCH_PGD_SHIFT @ lower bits addls \ttbr1, \ttbr1, #TTBR1_OFFSET - adcls \tmp, \tmp, #0 mcrr p15, 1, \ttbr1, \tmp, c2 @ load TTBR1 mov \tmp, \ttbr0, lsr #(32 - ARCH_PGD_SHIFT) @ upper bits mov \ttbr0, \ttbr0, lsl #ARCH_PGD_SHIFT @ lower bits -- cgit v1.2.3 From a040803a9d6b8c1876d3487a5cb69602ebcbb82c Mon Sep 17 00:00:00 2001 From: Sudeep Holla Date: Mon, 1 Sep 2014 17:14:29 +0100 Subject: ARM: 8133/1: use irq_set_affinity with force=false when migrating irqs Since commit 1dbfa187dad ("ARM: irq migration: force migration off CPU going down") the ARM interrupt migration code on cpu offline calls irqchip.irq_set_affinity() with the argument force=true. At the point of this change the argument had no effect because it was not used by any interrupt chip driver and there was no semantics defined. This changed with commit 01f8fa4f01d8 ("genirq: Allow forcing cpu affinity of interrupts") which made the force argument useful to route interrupts to not yet online cpus without checking the target cpu against the cpu online mask. The following commit ffde1de64012 ("irqchip: gic: Support forced affinity setting") implemented this for the GIC interrupt controller. As a consequence the ARM cpu offline irq migration fails if CPU0 is offlined, because CPU0 is still set in the affinity mask and the validataion against cpu online mask is skipped to the force argument being true. The following first_cpu(mask) selection always selects CPU0 as the target. Solve the issue by calling irq_set_affinity() with force=false from the CPU offline irq migration code so the GIC driver validates the affinity mask against CPU online mask and therefore removes CPU0 from the possible target candidates. Tested on TC2 hotpluging CPU0 in and out. Without this patch the system locks up as the IRQs are not migrated away from CPU0. Signed-off-by: Sudeep Holla Acked-by: Thomas Gleixner Acked-by: Mark Rutland Cc: # 3.10.x Signed-off-by: Russell King --- arch/arm/kernel/irq.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c index 2c4257604513..5c4d38e32a51 100644 --- a/arch/arm/kernel/irq.c +++ b/arch/arm/kernel/irq.c @@ -175,7 +175,7 @@ static bool migrate_one_irq(struct irq_desc *desc) c = irq_data_get_irq_chip(d); if (!c->irq_set_affinity) pr_debug("IRQ%u: unable to set affinity\n", d->irq); - else if (c->irq_set_affinity(d, affinity, true) == IRQ_SET_MASK_OK && ret) + else if (c->irq_set_affinity(d, affinity, false) == IRQ_SET_MASK_OK && ret) cpumask_copy(d->affinity, affinity); return ret; -- cgit v1.2.3 From e918a62a2ba81d10a3cc2c513dc70034c9524a95 Mon Sep 17 00:00:00 2001 From: Punit Agrawal Date: Mon, 1 Sep 2014 17:16:01 +0100 Subject: ARM: 8135/1: Fix in-correct barrier usage in SWP{B} emulation According to the ARM ARMv7, explicit barriers are necessary when using synchronisation primitives such as SWP{B}. The use of these instructions does not automatically imply a barrier and any ordering requirements by the software must be explicitly expressed with the use of suitable barriers. Based on this, remove the barriers from SWP{B} emulation. Acked-by: Will Deacon Signed-off-by: Punit Agrawal Signed-off-by: Russell King --- arch/arm/kernel/swp_emulate.c | 15 --------------- 1 file changed, 15 deletions(-) diff --git a/arch/arm/kernel/swp_emulate.c b/arch/arm/kernel/swp_emulate.c index 67ca8578c6d8..587fdfe1a72c 100644 --- a/arch/arm/kernel/swp_emulate.c +++ b/arch/arm/kernel/swp_emulate.c @@ -142,14 +142,6 @@ static int emulate_swpX(unsigned int address, unsigned int *data, while (1) { unsigned long temp; - /* - * Barrier required between accessing protected resource and - * releasing a lock for it. Legacy code might not have done - * this, and we cannot determine that this is not the case - * being emulated, so insert always. - */ - smp_mb(); - if (type == TYPE_SWPB) __user_swpb_asm(*data, address, res, temp); else @@ -162,13 +154,6 @@ static int emulate_swpX(unsigned int address, unsigned int *data, } if (res == 0) { - /* - * Barrier also required between acquiring a lock for a - * protected resource and accessing the resource. Inserted for - * same reason as above. - */ - smp_mb(); - if (type == TYPE_SWPB) swpbcounter++; else -- cgit v1.2.3 From d9981380b49b839ecaffbbe131908a342db68980 Mon Sep 17 00:00:00 2001 From: Victor Kamensky Date: Thu, 4 Sep 2014 06:07:33 +0100 Subject: ARM: 8137/1: fix get_user BE behavior for target variable with size of 8 bytes e38361d 'ARM: 8091/2: add get_user() support for 8 byte types' commit broke V7 BE get_user call when target var size is 64 bit, but '*ptr' size is 32 bit or smaller. e38361d changed type of __r2 from 'register unsigned long' to 'register typeof(x) __r2 asm("r2")' i.e before the change even when target variable size was 64 bit, __r2 was still 32 bit. But after e38361d commit, for target var of 64 bit size, __r2 became 64 bit and now it should occupy 2 registers r2, and r3. The issue in BE case that r3 register is least significant word of __r2 and r2 register is most significant word of __r2. But __get_user_4 still copies result into r2 (most significant word of __r2). Subsequent code copies from __r2 into x, but for situation described it will pick up only garbage from r3 register. Special __get_user_64t_(124) functions are introduced. They are similar to corresponding __get_user_(124) function but result stored in r3 register (lsw in case of 64 bit __r2 in BE image). Those function are used by get_user macro in case of BE and target var size is 64bit. Also changed __get_user_lo8 name into __get_user_32t_8 to get consistent naming accross all cases. Signed-off-by: Victor Kamensky Suggested-by: Daniel Thompson Reviewed-by: Daniel Thompson Signed-off-by: Russell King --- arch/arm/include/asm/uaccess.h | 48 ++++++++++++++++++++++++++++++++++-------- arch/arm/lib/getuser.S | 38 +++++++++++++++++++++++++++++++-- 2 files changed, 75 insertions(+), 11 deletions(-) diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h index a4cd7af475e9..4767eb9caa78 100644 --- a/arch/arm/include/asm/uaccess.h +++ b/arch/arm/include/asm/uaccess.h @@ -107,8 +107,11 @@ static inline void set_fs(mm_segment_t fs) extern int __get_user_1(void *); extern int __get_user_2(void *); extern int __get_user_4(void *); -extern int __get_user_lo8(void *); +extern int __get_user_32t_8(void *); extern int __get_user_8(void *); +extern int __get_user_64t_1(void *); +extern int __get_user_64t_2(void *); +extern int __get_user_64t_4(void *); #define __GUP_CLOBBER_1 "lr", "cc" #ifdef CONFIG_CPU_USE_DOMAINS @@ -117,7 +120,7 @@ extern int __get_user_8(void *); #define __GUP_CLOBBER_2 "lr", "cc" #endif #define __GUP_CLOBBER_4 "lr", "cc" -#define __GUP_CLOBBER_lo8 "lr", "cc" +#define __GUP_CLOBBER_32t_8 "lr", "cc" #define __GUP_CLOBBER_8 "lr", "cc" #define __get_user_x(__r2,__p,__e,__l,__s) \ @@ -131,12 +134,30 @@ extern int __get_user_8(void *); /* narrowing a double-word get into a single 32bit word register: */ #ifdef __ARMEB__ -#define __get_user_xb(__r2, __p, __e, __l, __s) \ - __get_user_x(__r2, __p, __e, __l, lo8) +#define __get_user_x_32t(__r2, __p, __e, __l, __s) \ + __get_user_x(__r2, __p, __e, __l, 32t_8) #else -#define __get_user_xb __get_user_x +#define __get_user_x_32t __get_user_x #endif +/* + * storing result into proper least significant word of 64bit target var, + * different only for big endian case where 64 bit __r2 lsw is r3: + */ +#ifdef __ARMEB__ +#define __get_user_x_64t(__r2, __p, __e, __l, __s) \ + __asm__ __volatile__ ( \ + __asmeq("%0", "r0") __asmeq("%1", "r2") \ + __asmeq("%3", "r1") \ + "bl __get_user_64t_" #__s \ + : "=&r" (__e), "=r" (__r2) \ + : "0" (__p), "r" (__l) \ + : __GUP_CLOBBER_##__s) +#else +#define __get_user_x_64t __get_user_x +#endif + + #define __get_user_check(x,p) \ ({ \ unsigned long __limit = current_thread_info()->addr_limit - 1; \ @@ -146,17 +167,26 @@ extern int __get_user_8(void *); register int __e asm("r0"); \ switch (sizeof(*(__p))) { \ case 1: \ - __get_user_x(__r2, __p, __e, __l, 1); \ + if (sizeof((x)) >= 8) \ + __get_user_x_64t(__r2, __p, __e, __l, 1); \ + else \ + __get_user_x(__r2, __p, __e, __l, 1); \ break; \ case 2: \ - __get_user_x(__r2, __p, __e, __l, 2); \ + if (sizeof((x)) >= 8) \ + __get_user_x_64t(__r2, __p, __e, __l, 2); \ + else \ + __get_user_x(__r2, __p, __e, __l, 2); \ break; \ case 4: \ - __get_user_x(__r2, __p, __e, __l, 4); \ + if (sizeof((x)) >= 8) \ + __get_user_x_64t(__r2, __p, __e, __l, 4); \ + else \ + __get_user_x(__r2, __p, __e, __l, 4); \ break; \ case 8: \ if (sizeof((x)) < 8) \ - __get_user_xb(__r2, __p, __e, __l, 4); \ + __get_user_x_32t(__r2, __p, __e, __l, 4); \ else \ __get_user_x(__r2, __p, __e, __l, 8); \ break; \ diff --git a/arch/arm/lib/getuser.S b/arch/arm/lib/getuser.S index 938600098b88..8ecfd15c3a02 100644 --- a/arch/arm/lib/getuser.S +++ b/arch/arm/lib/getuser.S @@ -80,7 +80,7 @@ ENTRY(__get_user_8) ENDPROC(__get_user_8) #ifdef __ARMEB__ -ENTRY(__get_user_lo8) +ENTRY(__get_user_32t_8) check_uaccess r0, 8, r1, r2, __get_user_bad #ifdef CONFIG_CPU_USE_DOMAINS add r0, r0, #4 @@ -90,7 +90,37 @@ ENTRY(__get_user_lo8) #endif mov r0, #0 ret lr -ENDPROC(__get_user_lo8) +ENDPROC(__get_user_32t_8) + +ENTRY(__get_user_64t_1) + check_uaccess r0, 1, r1, r2, __get_user_bad8 +8: TUSER(ldrb) r3, [r0] + mov r0, #0 + ret lr +ENDPROC(__get_user_64t_1) + +ENTRY(__get_user_64t_2) + check_uaccess r0, 2, r1, r2, __get_user_bad8 +#ifdef CONFIG_CPU_USE_DOMAINS +rb .req ip +9: ldrbt r3, [r0], #1 +10: ldrbt rb, [r0], #0 +#else +rb .req r0 +9: ldrb r3, [r0] +10: ldrb rb, [r0, #1] +#endif + orr r3, rb, r3, lsl #8 + mov r0, #0 + ret lr +ENDPROC(__get_user_64t_2) + +ENTRY(__get_user_64t_4) + check_uaccess r0, 4, r1, r2, __get_user_bad8 +11: TUSER(ldr) r3, [r0] + mov r0, #0 + ret lr +ENDPROC(__get_user_64t_4) #endif __get_user_bad8: @@ -111,5 +141,9 @@ ENDPROC(__get_user_bad8) .long 6b, __get_user_bad8 #ifdef __ARMEB__ .long 7b, __get_user_bad + .long 8b, __get_user_bad8 + .long 9b, __get_user_bad8 + .long 10b, __get_user_bad8 + .long 11b, __get_user_bad8 #endif .popsection -- cgit v1.2.3 From 7a0bd49713aca3040099e1413d1cc9f08802d97a Mon Sep 17 00:00:00 2001 From: Victor Kamensky Date: Sat, 13 Sep 2014 23:29:17 +0100 Subject: ARM: 8151/1: add missing exports for asm functions required by get_user macro Previous commits that dealt with get_user for 64bit type missed to export proper functions, so if get_user macro with particular target/value types are used by kernel module modpost would produce 'undefined!' error. Solution is to export all required functions. Signed-off-by: Victor Kamensky Signed-off-by: Russell King --- arch/arm/kernel/armksyms.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c index f7b450f97e68..a88671cfe1ff 100644 --- a/arch/arm/kernel/armksyms.c +++ b/arch/arm/kernel/armksyms.c @@ -98,6 +98,14 @@ EXPORT_SYMBOL(__clear_user); EXPORT_SYMBOL(__get_user_1); EXPORT_SYMBOL(__get_user_2); EXPORT_SYMBOL(__get_user_4); +EXPORT_SYMBOL(__get_user_8); + +#ifdef __ARMEB__ +EXPORT_SYMBOL(__get_user_64t_1); +EXPORT_SYMBOL(__get_user_64t_2); +EXPORT_SYMBOL(__get_user_64t_4); +EXPORT_SYMBOL(__get_user_32t_8); +#endif EXPORT_SYMBOL(__put_user_1); EXPORT_SYMBOL(__put_user_2); -- cgit v1.2.3 From fbfb872f5f417cea48760c535e0ff027c88b507a Mon Sep 17 00:00:00 2001 From: Nathan Lynch Date: Thu, 11 Sep 2014 02:49:08 +0100 Subject: ARM: 8148/1: flush TLS and thumbee register state during exec The TPIDRURO and TPIDRURW registers need to be flushed during exec; otherwise TLS information is potentially leaked. TPIDRURO in particular needs careful treatment. Since flush_thread basically needs the same code used to set the TLS in arm_syscall, pull that into a common set_tls helper in tls.h and use it in both places. Similarly, TEEHBR needs to be cleared during exec as well. Clearing its save slot in thread_info isn't right as there is no guarantee that a thread switch will occur before the new program runs. Just setting the register directly is sufficient. Signed-off-by: Nathan Lynch Acked-by: Will Deacon Cc: Signed-off-by: Russell King --- arch/arm/include/asm/tls.h | 62 ++++++++++++++++++++++++++++++++++++++++++++++ arch/arm/kernel/process.c | 2 ++ arch/arm/kernel/thumbee.c | 2 +- arch/arm/kernel/traps.c | 17 +------------ 4 files changed, 66 insertions(+), 17 deletions(-) diff --git a/arch/arm/include/asm/tls.h b/arch/arm/include/asm/tls.h index 83259b873333..36172adda9d0 100644 --- a/arch/arm/include/asm/tls.h +++ b/arch/arm/include/asm/tls.h @@ -1,6 +1,9 @@ #ifndef __ASMARM_TLS_H #define __ASMARM_TLS_H +#include +#include + #ifdef __ASSEMBLY__ #include .macro switch_tls_none, base, tp, tpuser, tmp1, tmp2 @@ -50,6 +53,47 @@ #endif #ifndef __ASSEMBLY__ + +static inline void set_tls(unsigned long val) +{ + struct thread_info *thread; + + thread = current_thread_info(); + + thread->tp_value[0] = val; + + /* + * This code runs with preemption enabled and therefore must + * be reentrant with respect to switch_tls. + * + * We need to ensure ordering between the shadow state and the + * hardware state, so that we don't corrupt the hardware state + * with a stale shadow state during context switch. + * + * If we're preempted here, switch_tls will load TPIDRURO from + * thread_info upon resuming execution and the following mcr + * is merely redundant. + */ + barrier(); + + if (!tls_emu) { + if (has_tls_reg) { + asm("mcr p15, 0, %0, c13, c0, 3" + : : "r" (val)); + } else { + /* + * User space must never try to access this + * directly. Expect your app to break + * eventually if you do so. The user helper + * at 0xffff0fe0 must be used instead. (see + * entry-armv.S for details) + */ + *((unsigned int *)0xffff0ff0) = val; + } + + } +} + static inline unsigned long get_tpuser(void) { unsigned long reg = 0; @@ -59,5 +103,23 @@ static inline unsigned long get_tpuser(void) return reg; } + +static inline void set_tpuser(unsigned long val) +{ + /* Since TPIDRURW is fully context-switched (unlike TPIDRURO), + * we need not update thread_info. + */ + if (has_tls_reg && !tls_emu) { + asm("mcr p15, 0, %0, c13, c0, 2" + : : "r" (val)); + } +} + +static inline void flush_tls(void) +{ + set_tls(0); + set_tpuser(0); +} + #endif #endif /* __ASMARM_TLS_H */ diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 81ef686a91ca..a35f6ebbd2c2 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -334,6 +334,8 @@ void flush_thread(void) memset(&tsk->thread.debug, 0, sizeof(struct debug_info)); memset(&thread->fpstate, 0, sizeof(union fp_state)); + flush_tls(); + thread_notify(THREAD_NOTIFY_FLUSH, thread); } diff --git a/arch/arm/kernel/thumbee.c b/arch/arm/kernel/thumbee.c index 7b8403b76666..80f0d69205e7 100644 --- a/arch/arm/kernel/thumbee.c +++ b/arch/arm/kernel/thumbee.c @@ -45,7 +45,7 @@ static int thumbee_notifier(struct notifier_block *self, unsigned long cmd, void switch (cmd) { case THREAD_NOTIFY_FLUSH: - thread->thumbee_state = 0; + teehbr_write(0); break; case THREAD_NOTIFY_SWITCH: current_thread_info()->thumbee_state = teehbr_read(); diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index c8e4bb714944..a964c9f40f87 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -581,7 +581,6 @@ do_cache_op(unsigned long start, unsigned long end, int flags) #define NR(x) ((__ARM_NR_##x) - __ARM_NR_BASE) asmlinkage int arm_syscall(int no, struct pt_regs *regs) { - struct thread_info *thread = current_thread_info(); siginfo_t info; if ((no >> 16) != (__ARM_NR_BASE>> 16)) @@ -632,21 +631,7 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs) return regs->ARM_r0; case NR(set_tls): - thread->tp_value[0] = regs->ARM_r0; - if (tls_emu) - return 0; - if (has_tls_reg) { - asm ("mcr p15, 0, %0, c13, c0, 3" - : : "r" (regs->ARM_r0)); - } else { - /* - * User space must never try to access this directly. - * Expect your app to break eventually if you do so. - * The user helper at 0xffff0fe0 must be used instead. - * (see entry-armv.S for details) - */ - *((unsigned int *)0xffff0ff0) = regs->ARM_r0; - } + set_tls(regs->ARM_r0); return 0; #ifdef CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG -- cgit v1.2.3 From 505013bc9065391f09a51d51cd3bf0b06dfb570a Mon Sep 17 00:00:00 2001 From: Stephen Boyd Date: Thu, 11 Sep 2014 23:25:30 +0100 Subject: ARM: 8149/1: perf: Don't sleep while atomic when enabling per-cpu interrupts Rob Clark reports a sleeping while atomic bug when using perf. BUG: sleeping function called from invalid context at ../kernel/locking/mutex.c:583 in_atomic(): 1, irqs_disabled(): 128, pid: 0, name: swapper/0 ------------[ cut here ]------------ WARNING: CPU: 2 PID: 4828 at ../kernel/locking/mutex.c:479 mutex_lock_nested+0x3a0/0x3e8() DEBUG_LOCKS_WARN_ON(in_interrupt()) Modules linked in: CPU: 2 PID: 4828 Comm: Xorg.bin Tainted: G W 3.17.0-rc3-00234-gd535c45-dirty #819 [] (unwind_backtrace) from [] (show_stack+0x10/0x14) [] (show_stack) from [] (dump_stack+0x98/0xb8) [] (dump_stack) from [] (warn_slowpath_common+0x70/0x8c) [] (warn_slowpath_common) from [] (warn_slowpath_fmt+0x30/0x40) [] (warn_slowpath_fmt) from [] (mutex_lock_nested+0x3a0/0x3e8) [] (mutex_lock_nested) from [] (irq_find_host+0x20/0x9c) [] (irq_find_host) from [] (of_irq_get+0x28/0x48) [] (of_irq_get) from [] (platform_get_irq+0x1c/0x8c) [] (platform_get_irq) from [] (cpu_pmu_enable_percpu_irq+0x14/0x38) [] (cpu_pmu_enable_percpu_irq) from [] (flush_smp_call_function_queue+0x88/0x178) [] (flush_smp_call_function_queue) from [] (handle_IPI+0x88/0x160) [] (handle_IPI) from [] (gic_handle_irq+0x64/0x68) [] (gic_handle_irq) from [] (__irq_svc+0x44/0x5c) Exception stack(0xe63ddea0 to 0xe63ddee8) dea0: 00000001 00000001 00000000 c2f3b200 c16db380 c032d4a0 e63ddf40 60010013 dec0: 00000000 001fbfd4 00000100 00000000 00000001 e63ddee8 c0284770 c02a2e30 dee0: 20010013 ffffffff [] (__irq_svc) from [] (ktime_get_ts64+0x1c8/0x200) [] (ktime_get_ts64) from [] (poll_select_set_timeout+0x60/0xa8) [] (poll_select_set_timeout) from [] (SyS_select+0xa8/0x118) [] (SyS_select) from [] (ret_fast_syscall+0x0/0x48) ---[ end trace 0bb583b46342da6f ]--- INFO: lockdep is turned off. We don't really need to get the platform irq again when we're enabling or disabling the per-cpu irq. Furthermore, we don't really need to set and clear bits in the active_irqs bitmask because that's only used in the non-percpu irq case to figure out when the last CPU PMU has been disabled. Just pass the irq directly to the enable/disable functions to clean all this up. This should be slightly more efficient and also fix the scheduling while atomic bug. Fixes: bbd64559376f "ARM: perf: support percpu irqs for the CPU PMU" Reported-by: Rob Clark Acked-by: Will Deacon Cc: stable@vger.kernel.org Signed-off-by: Stephen Boyd Signed-off-by: Russell King --- arch/arm/kernel/perf_event_cpu.c | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c index e6a6edbec613..4bf4cce759fe 100644 --- a/arch/arm/kernel/perf_event_cpu.c +++ b/arch/arm/kernel/perf_event_cpu.c @@ -76,21 +76,15 @@ static struct pmu_hw_events *cpu_pmu_get_cpu_events(void) static void cpu_pmu_enable_percpu_irq(void *data) { - struct arm_pmu *cpu_pmu = data; - struct platform_device *pmu_device = cpu_pmu->plat_device; - int irq = platform_get_irq(pmu_device, 0); + int irq = *(int *)data; enable_percpu_irq(irq, IRQ_TYPE_NONE); - cpumask_set_cpu(smp_processor_id(), &cpu_pmu->active_irqs); } static void cpu_pmu_disable_percpu_irq(void *data) { - struct arm_pmu *cpu_pmu = data; - struct platform_device *pmu_device = cpu_pmu->plat_device; - int irq = platform_get_irq(pmu_device, 0); + int irq = *(int *)data; - cpumask_clear_cpu(smp_processor_id(), &cpu_pmu->active_irqs); disable_percpu_irq(irq); } @@ -103,7 +97,7 @@ static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu) irq = platform_get_irq(pmu_device, 0); if (irq >= 0 && irq_is_percpu(irq)) { - on_each_cpu(cpu_pmu_disable_percpu_irq, cpu_pmu, 1); + on_each_cpu(cpu_pmu_disable_percpu_irq, &irq, 1); free_percpu_irq(irq, &percpu_pmu); } else { for (i = 0; i < irqs; ++i) { @@ -138,7 +132,7 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler) irq); return err; } - on_each_cpu(cpu_pmu_enable_percpu_irq, cpu_pmu, 1); + on_each_cpu(cpu_pmu_enable_percpu_irq, &irq, 1); } else { for (i = 0; i < irqs; ++i) { err = 0; -- cgit v1.2.3