summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSebastian Andrzej Siewior <bigeasy@linutronix.de>2013-08-21 17:48:46 +0200
committerSteven Rostedt (VMware) <rostedt@goodmis.org>2019-07-23 14:57:12 -0400
commit3708c991edfa020b9696e48113403ac3ef9fdbb7 (patch)
tree991eae7bf2c4e60bdd4293e748ae3d14cc27691b
parente17f52a49cbc8fab6c4c2802e77ee4ef39a59d78 (diff)
genirq: Do not invoke the affinity callback via a workqueue on RT
[ Upstream commit 2122adbe011cdc0eb62ad62494e181005b23c76a ] Joe Korty reported, that __irq_set_affinity_locked() schedules a workqueue while holding a rawlock which results in a might_sleep() warning. This patch uses swork_queue() instead. Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
-rw-r--r--include/linux/interrupt.h5
-rw-r--r--kernel/irq/manage.c19
2 files changed, 6 insertions, 18 deletions
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 72333899f043..a9321f6429f2 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -13,7 +13,7 @@
#include <linux/hrtimer.h>
#include <linux/kref.h>
#include <linux/workqueue.h>
-#include <linux/swork.h>
+#include <linux/kthread.h>
#include <linux/atomic.h>
#include <asm/ptrace.h>
@@ -228,7 +228,6 @@ extern void resume_device_irqs(void);
* struct irq_affinity_notify - context for notification of IRQ affinity changes
* @irq: Interrupt to which notification applies
* @kref: Reference count, for internal use
- * @swork: Swork item, for internal use
* @work: Work item, for internal use
* @notify: Function to be called on change. This will be
* called in process context.
@@ -241,7 +240,7 @@ struct irq_affinity_notify {
unsigned int irq;
struct kref kref;
#ifdef CONFIG_PREEMPT_RT_BASE
- struct swork_event swork;
+ struct kthread_work work;
#else
struct work_struct work;
#endif
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 7f4041357d2f..381305c48a0a 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -261,7 +261,7 @@ int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
kref_get(&desc->affinity_notify->kref);
#ifdef CONFIG_PREEMPT_RT_BASE
- swork_queue(&desc->affinity_notify->swork);
+ kthread_schedule_work(&desc->affinity_notify->work);
#else
schedule_work(&desc->affinity_notify->work);
#endif
@@ -326,21 +326,11 @@ out:
}
#ifdef CONFIG_PREEMPT_RT_BASE
-static void init_helper_thread(void)
-{
- static int init_sworker_once;
-
- if (init_sworker_once)
- return;
- if (WARN_ON(swork_get()))
- return;
- init_sworker_once = 1;
-}
-static void irq_affinity_notify(struct swork_event *swork)
+static void irq_affinity_notify(struct kthread_work *work)
{
struct irq_affinity_notify *notify =
- container_of(swork, struct irq_affinity_notify, swork);
+ container_of(work, struct irq_affinity_notify, work);
_irq_affinity_notify(notify);
}
@@ -383,8 +373,7 @@ irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
notify->irq = irq;
kref_init(&notify->kref);
#ifdef CONFIG_PREEMPT_RT_BASE
- INIT_SWORK(&notify->swork, irq_affinity_notify);
- init_helper_thread();
+ kthread_init_work(&notify->work, irq_affinity_notify);
#else
INIT_WORK(&notify->work, irq_affinity_notify);
#endif