summaryrefslogtreecommitdiff
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c243
1 files changed, 148 insertions, 95 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index cd8b61bded78..12137825bf5a 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -49,6 +49,8 @@
#include <linux/uaccess.h>
#include <linux/sched/isolation.h>
#include <linux/nmi.h>
+#include <linux/locallock.h>
+#include <linux/delay.h>
#include "workqueue_internal.h"
@@ -123,11 +125,16 @@ enum {
* cpu or grabbing pool->lock is enough for read access. If
* POOL_DISASSOCIATED is set, it's identical to L.
*
+ * On RT we need the extra protection via rt_lock_idle_list() for
+ * the list manipulations against read access from
+ * wq_worker_sleeping(). All other places are nicely serialized via
+ * pool->lock.
+ *
* A: wq_pool_attach_mutex protected.
*
* PL: wq_pool_mutex protected.
*
- * PR: wq_pool_mutex protected for writes. Sched-RCU protected for reads.
+ * PR: wq_pool_mutex protected for writes. RCU protected for reads.
*
* PW: wq_pool_mutex and wq->mutex protected for writes. Either for reads.
*
@@ -136,7 +143,7 @@ enum {
*
* WQ: wq->mutex protected.
*
- * WR: wq->mutex protected for writes. Sched-RCU protected for reads.
+ * WR: wq->mutex protected for writes. RCU protected for reads.
*
* MD: wq_mayday_lock protected.
*/
@@ -183,7 +190,7 @@ struct worker_pool {
atomic_t nr_running ____cacheline_aligned_in_smp;
/*
- * Destruction of pool is sched-RCU protected to allow dereferences
+ * Destruction of pool is RCU protected to allow dereferences
* from get_work_pool().
*/
struct rcu_head rcu;
@@ -212,7 +219,7 @@ struct pool_workqueue {
/*
* Release of unbound pwq is punted to system_wq. See put_pwq()
* and pwq_unbound_release_workfn() for details. pool_workqueue
- * itself is also sched-RCU protected so that the first pwq can be
+ * itself is also RCU protected so that the first pwq can be
* determined without grabbing wq->mutex.
*/
struct work_struct unbound_release_work;
@@ -350,6 +357,8 @@ EXPORT_SYMBOL_GPL(system_power_efficient_wq);
struct workqueue_struct *system_freezable_power_efficient_wq __read_mostly;
EXPORT_SYMBOL_GPL(system_freezable_power_efficient_wq);
+static DEFINE_LOCAL_IRQ_LOCK(pendingb_lock);
+
static int worker_thread(void *__worker);
static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
@@ -357,20 +366,20 @@ static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
#include <trace/events/workqueue.h>
#define assert_rcu_or_pool_mutex() \
- RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \
+ RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \
!lockdep_is_held(&wq_pool_mutex), \
- "sched RCU or wq_pool_mutex should be held")
+ "RCU or wq_pool_mutex should be held")
#define assert_rcu_or_wq_mutex(wq) \
- RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \
+ RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \
!lockdep_is_held(&wq->mutex), \
- "sched RCU or wq->mutex should be held")
+ "RCU or wq->mutex should be held")
#define assert_rcu_or_wq_mutex_or_pool_mutex(wq) \
- RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \
+ RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \
!lockdep_is_held(&wq->mutex) && \
!lockdep_is_held(&wq_pool_mutex), \
- "sched RCU, wq->mutex or wq_pool_mutex should be held")
+ "RCU, wq->mutex or wq_pool_mutex should be held")
#define for_each_cpu_worker_pool(pool, cpu) \
for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0]; \
@@ -382,7 +391,7 @@ static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
* @pool: iteration cursor
* @pi: integer used for iteration
*
- * This must be called either with wq_pool_mutex held or sched RCU read
+ * This must be called either with wq_pool_mutex held or RCU read
* locked. If the pool needs to be used beyond the locking in effect, the
* caller is responsible for guaranteeing that the pool stays online.
*
@@ -414,7 +423,7 @@ static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
* @pwq: iteration cursor
* @wq: the target workqueue
*
- * This must be called either with wq->mutex held or sched RCU read locked.
+ * This must be called either with wq->mutex held or RCU read locked.
* If the pwq needs to be used beyond the locking in effect, the caller is
* responsible for guaranteeing that the pwq stays online.
*
@@ -426,6 +435,31 @@ static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
if (({ assert_rcu_or_wq_mutex(wq); false; })) { } \
else
+#ifdef CONFIG_PREEMPT_RT_BASE
+static inline void rt_lock_idle_list(struct worker_pool *pool)
+{
+ preempt_disable();
+}
+static inline void rt_unlock_idle_list(struct worker_pool *pool)
+{
+ preempt_enable();
+}
+static inline void sched_lock_idle_list(struct worker_pool *pool) { }
+static inline void sched_unlock_idle_list(struct worker_pool *pool) { }
+#else
+static inline void rt_lock_idle_list(struct worker_pool *pool) { }
+static inline void rt_unlock_idle_list(struct worker_pool *pool) { }
+static inline void sched_lock_idle_list(struct worker_pool *pool)
+{
+ spin_lock_irq(&pool->lock);
+}
+static inline void sched_unlock_idle_list(struct worker_pool *pool)
+{
+ spin_unlock_irq(&pool->lock);
+}
+#endif
+
+
#ifdef CONFIG_DEBUG_OBJECTS_WORK
static struct debug_obj_descr work_debug_descr;
@@ -550,7 +584,7 @@ static int worker_pool_assign_id(struct worker_pool *pool)
* @wq: the target workqueue
* @node: the node ID
*
- * This must be called with any of wq_pool_mutex, wq->mutex or sched RCU
+ * This must be called with any of wq_pool_mutex, wq->mutex or RCU
* read locked.
* If the pwq needs to be used beyond the locking in effect, the caller is
* responsible for guaranteeing that the pwq stays online.
@@ -694,8 +728,8 @@ static struct pool_workqueue *get_work_pwq(struct work_struct *work)
* @work: the work item of interest
*
* Pools are created and destroyed under wq_pool_mutex, and allows read
- * access under sched-RCU read lock. As such, this function should be
- * called under wq_pool_mutex or with preemption disabled.
+ * access under RCU read lock. As such, this function should be
+ * called under wq_pool_mutex or inside of a rcu_read_lock() region.
*
* All fields of the returned pool are accessible as long as the above
* mentioned locking is in effect. If the returned pool needs to be used
@@ -832,50 +866,45 @@ static struct worker *first_idle_worker(struct worker_pool *pool)
*/
static void wake_up_worker(struct worker_pool *pool)
{
- struct worker *worker = first_idle_worker(pool);
+ struct worker *worker;
+
+ rt_lock_idle_list(pool);
+
+ worker = first_idle_worker(pool);
if (likely(worker))
wake_up_process(worker->task);
+
+ rt_unlock_idle_list(pool);
}
/**
- * wq_worker_waking_up - a worker is waking up
+ * wq_worker_running - a worker is running again
* @task: task waking up
- * @cpu: CPU @task is waking up to
*
- * This function is called during try_to_wake_up() when a worker is
- * being awoken.
- *
- * CONTEXT:
- * spin_lock_irq(rq->lock)
+ * This function is called when a worker returns from schedule()
*/
-void wq_worker_waking_up(struct task_struct *task, int cpu)
+void wq_worker_running(struct task_struct *task)
{
struct worker *worker = kthread_data(task);
- if (!(worker->flags & WORKER_NOT_RUNNING)) {
- WARN_ON_ONCE(worker->pool->cpu != cpu);
+ if (!worker->sleeping)
+ return;
+ if (!(worker->flags & WORKER_NOT_RUNNING))
atomic_inc(&worker->pool->nr_running);
- }
+ worker->sleeping = 0;
}
/**
* wq_worker_sleeping - a worker is going to sleep
* @task: task going to sleep
*
- * This function is called during schedule() when a busy worker is
- * going to sleep. Worker on the same cpu can be woken up by
- * returning pointer to its task.
- *
- * CONTEXT:
- * spin_lock_irq(rq->lock)
- *
- * Return:
- * Worker task on @cpu to wake up, %NULL if none.
+ * This function is called from schedule() when a busy worker is
+ * going to sleep.
*/
-struct task_struct *wq_worker_sleeping(struct task_struct *task)
+void wq_worker_sleeping(struct task_struct *task)
{
- struct worker *worker = kthread_data(task), *to_wakeup = NULL;
+ struct worker *worker = kthread_data(task);
struct worker_pool *pool;
/*
@@ -884,29 +913,26 @@ struct task_struct *wq_worker_sleeping(struct task_struct *task)
* checking NOT_RUNNING.
*/
if (worker->flags & WORKER_NOT_RUNNING)
- return NULL;
+ return;
pool = worker->pool;
- /* this can only happen on the local cpu */
- if (WARN_ON_ONCE(pool->cpu != raw_smp_processor_id()))
- return NULL;
+ if (WARN_ON_ONCE(worker->sleeping))
+ return;
+
+ worker->sleeping = 1;
/*
* The counterpart of the following dec_and_test, implied mb,
* worklist not empty test sequence is in insert_work().
* Please read comment there.
- *
- * NOT_RUNNING is clear. This means that we're bound to and
- * running on the local cpu w/ rq lock held and preemption
- * disabled, which in turn means that none else could be
- * manipulating idle_list, so dereferencing idle_list without pool
- * lock is safe.
*/
if (atomic_dec_and_test(&pool->nr_running) &&
- !list_empty(&pool->worklist))
- to_wakeup = first_idle_worker(pool);
- return to_wakeup ? to_wakeup->task : NULL;
+ !list_empty(&pool->worklist)) {
+ sched_lock_idle_list(pool);
+ wake_up_worker(pool);
+ sched_unlock_idle_list(pool);
+ }
}
/**
@@ -1100,12 +1126,14 @@ static void put_pwq_unlocked(struct pool_workqueue *pwq)
{
if (pwq) {
/*
- * As both pwqs and pools are sched-RCU protected, the
+ * As both pwqs and pools are RCU protected, the
* following lock operations are safe.
*/
- spin_lock_irq(&pwq->pool->lock);
+ rcu_read_lock();
+ local_spin_lock_irq(pendingb_lock, &pwq->pool->lock);
put_pwq(pwq);
- spin_unlock_irq(&pwq->pool->lock);
+ local_spin_unlock_irq(pendingb_lock, &pwq->pool->lock);
+ rcu_read_unlock();
}
}
@@ -1209,7 +1237,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
struct worker_pool *pool;
struct pool_workqueue *pwq;
- local_irq_save(*flags);
+ local_lock_irqsave(pendingb_lock, *flags);
/* try to steal the timer if it exists */
if (is_dwork) {
@@ -1228,6 +1256,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
return 0;
+ rcu_read_lock();
/*
* The queueing is in progress, or it is already queued. Try to
* steal it from ->worklist without clearing WORK_STRUCT_PENDING.
@@ -1266,14 +1295,16 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
set_work_pool_and_keep_pending(work, pool->id);
spin_unlock(&pool->lock);
+ rcu_read_unlock();
return 1;
}
spin_unlock(&pool->lock);
fail:
- local_irq_restore(*flags);
+ rcu_read_unlock();
+ local_unlock_irqrestore(pendingb_lock, *flags);
if (work_is_canceling(work))
return -ENOENT;
- cpu_relax();
+ cpu_chill();
return -EAGAIN;
}
@@ -1375,7 +1406,13 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
* queued or lose PENDING. Grabbing PENDING and queueing should
* happen with IRQ disabled.
*/
+#ifndef CONFIG_PREEMPT_RT_FULL
+ /*
+ * nort: On RT the "interrupts-disabled" rule has been replaced with
+ * pendingb_lock.
+ */
lockdep_assert_irqs_disabled();
+#endif
debug_work_activate(work);
@@ -1383,6 +1420,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
if (unlikely(wq->flags & __WQ_DRAINING) &&
WARN_ON_ONCE(!is_chained_work(wq)))
return;
+ rcu_read_lock();
retry:
if (req_cpu == WORK_CPU_UNBOUND)
cpu = wq_select_unbound_cpu(raw_smp_processor_id());
@@ -1439,10 +1477,8 @@ retry:
/* pwq determined, queue */
trace_workqueue_queue_work(req_cpu, pwq, work);
- if (WARN_ON(!list_empty(&work->entry))) {
- spin_unlock(&pwq->pool->lock);
- return;
- }
+ if (WARN_ON(!list_empty(&work->entry)))
+ goto out;
pwq->nr_in_flight[pwq->work_color]++;
work_flags = work_color_to_flags(pwq->work_color);
@@ -1460,7 +1496,9 @@ retry:
insert_work(pwq, work, worklist, work_flags);
+out:
spin_unlock(&pwq->pool->lock);
+ rcu_read_unlock();
}
/**
@@ -1480,14 +1518,14 @@ bool queue_work_on(int cpu, struct workqueue_struct *wq,
bool ret = false;
unsigned long flags;
- local_irq_save(flags);
+ local_lock_irqsave(pendingb_lock,flags);
if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
__queue_work(cpu, wq, work);
ret = true;
}
- local_irq_restore(flags);
+ local_unlock_irqrestore(pendingb_lock, flags);
return ret;
}
EXPORT_SYMBOL(queue_work_on);
@@ -1496,8 +1534,11 @@ void delayed_work_timer_fn(struct timer_list *t)
{
struct delayed_work *dwork = from_timer(dwork, t, timer);
+ /* XXX */
+ /* local_lock(pendingb_lock); */
/* should have been called from irqsafe timer with irq already off */
__queue_work(dwork->cpu, dwork->wq, &dwork->work);
+ /* local_unlock(pendingb_lock); */
}
EXPORT_SYMBOL(delayed_work_timer_fn);
@@ -1552,14 +1593,14 @@ bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
unsigned long flags;
/* read the comment in __queue_work() */
- local_irq_save(flags);
+ local_lock_irqsave(pendingb_lock, flags);
if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
__queue_delayed_work(cpu, wq, dwork, delay);
ret = true;
}
- local_irq_restore(flags);
+ local_unlock_irqrestore(pendingb_lock, flags);
return ret;
}
EXPORT_SYMBOL(queue_delayed_work_on);
@@ -1594,7 +1635,7 @@ bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
if (likely(ret >= 0)) {
__queue_delayed_work(cpu, wq, dwork, delay);
- local_irq_restore(flags);
+ local_unlock_irqrestore(pendingb_lock, flags);
}
/* -ENOENT from try_to_grab_pending() becomes %true */
@@ -1605,11 +1646,12 @@ EXPORT_SYMBOL_GPL(mod_delayed_work_on);
static void rcu_work_rcufn(struct rcu_head *rcu)
{
struct rcu_work *rwork = container_of(rcu, struct rcu_work, rcu);
+ unsigned long flags;
/* read the comment in __queue_work() */
- local_irq_disable();
+ local_lock_irqsave(pendingb_lock, flags);
__queue_work(WORK_CPU_UNBOUND, rwork->wq, &rwork->work);
- local_irq_enable();
+ local_unlock_irqrestore(pendingb_lock, flags);
}
/**
@@ -1661,7 +1703,9 @@ static void worker_enter_idle(struct worker *worker)
worker->last_active = jiffies;
/* idle_list is LIFO */
+ rt_lock_idle_list(pool);
list_add(&worker->entry, &pool->idle_list);
+ rt_unlock_idle_list(pool);
if (too_many_workers(pool) && !timer_pending(&pool->idle_timer))
mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT);
@@ -1694,7 +1738,9 @@ static void worker_leave_idle(struct worker *worker)
return;
worker_clr_flags(worker, WORKER_IDLE);
pool->nr_idle--;
+ rt_lock_idle_list(pool);
list_del_init(&worker->entry);
+ rt_unlock_idle_list(pool);
}
static struct worker *alloc_worker(int node)
@@ -1862,7 +1908,9 @@ static void destroy_worker(struct worker *worker)
pool->nr_workers--;
pool->nr_idle--;
+ rt_lock_idle_list(pool);
list_del_init(&worker->entry);
+ rt_unlock_idle_list(pool);
worker->flags |= WORKER_DIE;
wake_up_process(worker->task);
}
@@ -2855,14 +2903,14 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
might_sleep();
- local_irq_disable();
+ rcu_read_lock();
pool = get_work_pool(work);
if (!pool) {
- local_irq_enable();
+ rcu_read_unlock();
return false;
}
- spin_lock(&pool->lock);
+ spin_lock_irq(&pool->lock);
/* see the comment in try_to_grab_pending() with the same code */
pwq = get_work_pwq(work);
if (pwq) {
@@ -2894,10 +2942,11 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
lock_map_acquire(&pwq->wq->lockdep_map);
lock_map_release(&pwq->wq->lockdep_map);
}
-
+ rcu_read_unlock();
return true;
already_gone:
spin_unlock_irq(&pool->lock);
+ rcu_read_unlock();
return false;
}
@@ -2997,7 +3046,7 @@ static bool __cancel_work_timer(struct work_struct *work, bool is_dwork)
/* tell other tasks trying to grab @work to back off */
mark_work_canceling(work);
- local_irq_restore(flags);
+ local_unlock_irqrestore(pendingb_lock, flags);
/*
* This allows canceling during early boot. We know that @work
@@ -3058,10 +3107,10 @@ EXPORT_SYMBOL_GPL(cancel_work_sync);
*/
bool flush_delayed_work(struct delayed_work *dwork)
{
- local_irq_disable();
+ local_lock_irq(pendingb_lock);
if (del_timer_sync(&dwork->timer))
__queue_work(dwork->cpu, dwork->wq, &dwork->work);
- local_irq_enable();
+ local_unlock_irq(pendingb_lock);
return flush_work(&dwork->work);
}
EXPORT_SYMBOL(flush_delayed_work);
@@ -3099,7 +3148,7 @@ static bool __cancel_work(struct work_struct *work, bool is_dwork)
return false;
set_work_pool_and_clear_pending(work, get_work_pool_id(work));
- local_irq_restore(flags);
+ local_unlock_irqrestore(pendingb_lock, flags);
return ret;
}
@@ -3344,7 +3393,7 @@ static void rcu_free_pool(struct rcu_head *rcu)
* put_unbound_pool - put a worker_pool
* @pool: worker_pool to put
*
- * Put @pool. If its refcnt reaches zero, it gets destroyed in sched-RCU
+ * Put @pool. If its refcnt reaches zero, it gets destroyed in RCU
* safe manner. get_unbound_pool() calls this function on its failure path
* and this function should be able to release pools which went through,
* successfully or not, init_worker_pool().
@@ -3398,8 +3447,8 @@ static void put_unbound_pool(struct worker_pool *pool)
del_timer_sync(&pool->idle_timer);
del_timer_sync(&pool->mayday_timer);
- /* sched-RCU protected to allow dereferences from get_work_pool() */
- call_rcu_sched(&pool->rcu, rcu_free_pool);
+ /* RCU protected to allow dereferences from get_work_pool() */
+ call_rcu(&pool->rcu, rcu_free_pool);
}
/**
@@ -3506,14 +3555,14 @@ static void pwq_unbound_release_workfn(struct work_struct *work)
put_unbound_pool(pool);
mutex_unlock(&wq_pool_mutex);
- call_rcu_sched(&pwq->rcu, rcu_free_pwq);
+ call_rcu(&pwq->rcu, rcu_free_pwq);
/*
* If we're the last pwq going away, @wq is already dead and no one
* is gonna access it anymore. Schedule RCU free.
*/
if (is_last)
- call_rcu_sched(&wq->rcu, rcu_free_wq);
+ call_rcu(&wq->rcu, rcu_free_wq);
}
/**
@@ -4198,7 +4247,7 @@ void destroy_workqueue(struct workqueue_struct *wq)
* The base ref is never dropped on per-cpu pwqs. Directly
* schedule RCU free.
*/
- call_rcu_sched(&wq->rcu, rcu_free_wq);
+ call_rcu(&wq->rcu, rcu_free_wq);
} else {
/*
* We're the sole accessor of @wq at this point. Directly
@@ -4308,7 +4357,8 @@ bool workqueue_congested(int cpu, struct workqueue_struct *wq)
struct pool_workqueue *pwq;
bool ret;
- rcu_read_lock_sched();
+ rcu_read_lock();
+ preempt_disable();
if (cpu == WORK_CPU_UNBOUND)
cpu = smp_processor_id();
@@ -4319,7 +4369,8 @@ bool workqueue_congested(int cpu, struct workqueue_struct *wq)
pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu));
ret = !list_empty(&pwq->delayed_works);
- rcu_read_unlock_sched();
+ preempt_enable();
+ rcu_read_unlock();
return ret;
}
@@ -4345,15 +4396,15 @@ unsigned int work_busy(struct work_struct *work)
if (work_pending(work))
ret |= WORK_BUSY_PENDING;
- local_irq_save(flags);
+ rcu_read_lock();
pool = get_work_pool(work);
if (pool) {
- spin_lock(&pool->lock);
+ spin_lock_irqsave(&pool->lock, flags);
if (find_worker_executing_work(pool, work))
ret |= WORK_BUSY_RUNNING;
- spin_unlock(&pool->lock);
+ spin_unlock_irqrestore(&pool->lock, flags);
}
- local_irq_restore(flags);
+ rcu_read_unlock();
return ret;
}
@@ -4537,7 +4588,7 @@ void show_workqueue_state(void)
unsigned long flags;
int pi;
- rcu_read_lock_sched();
+ rcu_read_lock();
pr_info("Showing busy workqueues and worker pools:\n");
@@ -4602,7 +4653,7 @@ void show_workqueue_state(void)
touch_nmi_watchdog();
}
- rcu_read_unlock_sched();
+ rcu_read_unlock();
}
/* used to show worker information through /proc/PID/{comm,stat,status} */
@@ -4989,16 +5040,16 @@ bool freeze_workqueues_busy(void)
* nr_active is monotonically decreasing. It's safe
* to peek without lock.
*/
- rcu_read_lock_sched();
+ rcu_read_lock();
for_each_pwq(pwq, wq) {
WARN_ON_ONCE(pwq->nr_active < 0);
if (pwq->nr_active) {
busy = true;
- rcu_read_unlock_sched();
+ rcu_read_unlock();
goto out_unlock;
}
}
- rcu_read_unlock_sched();
+ rcu_read_unlock();
}
out_unlock:
mutex_unlock(&wq_pool_mutex);
@@ -5193,7 +5244,8 @@ static ssize_t wq_pool_ids_show(struct device *dev,
const char *delim = "";
int node, written = 0;
- rcu_read_lock_sched();
+ get_online_cpus();
+ rcu_read_lock();
for_each_node(node) {
written += scnprintf(buf + written, PAGE_SIZE - written,
"%s%d:%d", delim, node,
@@ -5201,7 +5253,8 @@ static ssize_t wq_pool_ids_show(struct device *dev,
delim = " ";
}
written += scnprintf(buf + written, PAGE_SIZE - written, "\n");
- rcu_read_unlock_sched();
+ rcu_read_unlock();
+ put_online_cpus();
return written;
}