summaryrefslogtreecommitdiff
path: root/kernel/irq/manage.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/irq/manage.c')
-rw-r--r--kernel/irq/manage.c17
1 files changed, 15 insertions, 2 deletions
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 1da999f5e746..fd301aa5665d 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -18,6 +18,7 @@
#include "internals.h"
#ifdef CONFIG_IRQ_FORCED_THREADING
+# ifndef CONFIG_PREEMPT_RT_BASE
__read_mostly bool force_irqthreads;
static int __init setup_forced_irqthreads(char *arg)
@@ -26,6 +27,7 @@ static int __init setup_forced_irqthreads(char *arg)
return 0;
}
early_param("threadirqs", setup_forced_irqthreads);
+# endif
#endif
/**
@@ -740,7 +742,15 @@ irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
local_bh_disable();
ret = action->thread_fn(action->irq, action->dev_id);
irq_finalize_oneshot(desc, action, false);
- local_bh_enable();
+ /*
+ * Interrupts which have real time requirements can be set up
+ * to avoid softirq processing in the thread handler. This is
+ * safe as these interrupts do not raise soft interrupts.
+ */
+ if (irq_settings_no_softirq_call(desc))
+ _local_bh_enable();
+ else
+ local_bh_enable();
return ret;
}
@@ -1027,7 +1037,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
desc->istate |= IRQS_ONESHOT;
if (irq_settings_can_autoenable(desc))
- irq_startup(desc);
+ irq_startup(desc, true);
else
/* Undo nested disables: */
desc->depth = 1;
@@ -1038,6 +1048,9 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
}
+ if (new->flags & IRQF_NO_SOFTIRQ_CALL)
+ irq_settings_set_no_softirq_call(desc);
+
/* Set default affinity mask once everything is setup */
setup_affinity(irq, desc, mask);