summaryrefslogtreecommitdiff
path: root/include/linux/interrupt.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/interrupt.h')
-rw-r--r--include/linux/interrupt.h65
1 files changed, 43 insertions, 22 deletions
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 2aea5d22db07..773dbc9e1c5f 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -59,6 +59,7 @@
* IRQF_NO_THREAD - Interrupt cannot be threaded
* IRQF_EARLY_RESUME - Resume IRQ early during syscore instead of at device
* resume time.
+ * IRQF_NO_SOFTIRQ_CALL - Do not process softirqs in the irq thread context (RT)
*/
#define IRQF_DISABLED 0x00000020
#define IRQF_SAMPLE_RANDOM 0x00000040
@@ -73,6 +74,7 @@
#define IRQF_FORCE_RESUME 0x00008000
#define IRQF_NO_THREAD 0x00010000
#define IRQF_EARLY_RESUME 0x00020000
+#define IRQF_NO_SOFTIRQ_CALL 0x00040000
#define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD)
@@ -217,7 +219,7 @@ extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id);
#ifdef CONFIG_LOCKDEP
# define local_irq_enable_in_hardirq() do { } while (0)
#else
-# define local_irq_enable_in_hardirq() local_irq_enable()
+# define local_irq_enable_in_hardirq() local_irq_enable_nort()
#endif
extern void disable_irq_nosync(unsigned int irq);
@@ -394,9 +396,13 @@ static inline int disable_irq_wake(unsigned int irq)
#ifdef CONFIG_IRQ_FORCED_THREADING
-extern bool force_irqthreads;
+# ifndef CONFIG_PREEMPT_RT_BASE
+ extern bool force_irqthreads;
+# else
+# define force_irqthreads (true)
+# endif
#else
-#define force_irqthreads (0)
+#define force_irqthreads (false)
#endif
#ifndef __ARCH_SET_SOFTIRQ_PENDING
@@ -450,8 +456,14 @@ struct softirq_action
void (*action)(struct softirq_action *);
};
+#ifndef CONFIG_PREEMPT_RT_FULL
asmlinkage void do_softirq(void);
asmlinkage void __do_softirq(void);
+static inline void thread_do_softirq(void) { do_softirq(); }
+#else
+extern void thread_do_softirq(void);
+#endif
+
extern void open_softirq(int nr, void (*action)(struct softirq_action *));
extern void softirq_init(void);
extern void __raise_softirq_irqoff(unsigned int nr);
@@ -459,6 +471,8 @@ extern void __raise_softirq_irqoff(unsigned int nr);
extern void raise_softirq_irqoff(unsigned int nr);
extern void raise_softirq(unsigned int nr);
+extern void softirq_check_pending_idle(void);
+
/* This is the worklist that queues up per-cpu softirq work.
*
* send_remote_sendirq() adds work to these lists, and
@@ -499,8 +513,9 @@ extern void __send_remote_softirq(struct call_single_data *cp, int cpu,
to be executed on some cpu at least once after this.
* If the tasklet is already scheduled, but its execution is still not
started, it will be executed only once.
- * If this tasklet is already running on another CPU (or schedule is called
- from tasklet itself), it is rescheduled for later.
+ * If this tasklet is already running on another CPU, it is rescheduled
+ for later.
+ * Schedule must not be called from the tasklet itself (a lockup occurs)
* Tasklet is strictly serialized wrt itself, but not
wrt another tasklets. If client needs some intertask synchronization,
he makes it with spinlocks.
@@ -525,27 +540,36 @@ struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data }
enum
{
TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */
- TASKLET_STATE_RUN /* Tasklet is running (SMP only) */
+ TASKLET_STATE_RUN, /* Tasklet is running (SMP only) */
+ TASKLET_STATE_PENDING /* Tasklet is pending */
};
-#ifdef CONFIG_SMP
+#define TASKLET_STATEF_SCHED (1 << TASKLET_STATE_SCHED)
+#define TASKLET_STATEF_RUN (1 << TASKLET_STATE_RUN)
+#define TASKLET_STATEF_PENDING (1 << TASKLET_STATE_PENDING)
+
+#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL)
static inline int tasklet_trylock(struct tasklet_struct *t)
{
return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
}
+static inline int tasklet_tryunlock(struct tasklet_struct *t)
+{
+ return cmpxchg(&t->state, TASKLET_STATEF_RUN, 0) == TASKLET_STATEF_RUN;
+}
+
static inline void tasklet_unlock(struct tasklet_struct *t)
{
smp_mb__before_clear_bit();
clear_bit(TASKLET_STATE_RUN, &(t)->state);
}
-static inline void tasklet_unlock_wait(struct tasklet_struct *t)
-{
- while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); }
-}
+extern void tasklet_unlock_wait(struct tasklet_struct *t);
+
#else
#define tasklet_trylock(t) 1
+#define tasklet_tryunlock(t) 1
#define tasklet_unlock_wait(t) do { } while (0)
#define tasklet_unlock(t) do { } while (0)
#endif
@@ -594,17 +618,8 @@ static inline void tasklet_disable(struct tasklet_struct *t)
smp_mb();
}
-static inline void tasklet_enable(struct tasklet_struct *t)
-{
- smp_mb__before_atomic_dec();
- atomic_dec(&t->count);
-}
-
-static inline void tasklet_hi_enable(struct tasklet_struct *t)
-{
- smp_mb__before_atomic_dec();
- atomic_dec(&t->count);
-}
+extern void tasklet_enable(struct tasklet_struct *t);
+extern void tasklet_hi_enable(struct tasklet_struct *t);
extern void tasklet_kill(struct tasklet_struct *t);
extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
@@ -636,6 +651,12 @@ void tasklet_hrtimer_cancel(struct tasklet_hrtimer *ttimer)
tasklet_kill(&ttimer->tasklet);
}
+#ifdef CONFIG_PREEMPT_RT_FULL
+extern void softirq_early_init(void);
+#else
+static inline void softirq_early_init(void) { }
+#endif
+
/*
* Autoprobing for irqs:
*