summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2011-10-03 12:43:25 +0200
committerClark Williams <williams@redhat.com>2012-04-13 11:01:46 -0500
commit825ea26b7d05ef85c40ff1ff01eab84cb64a01c0 (patch)
treebf8d801de046a642f43d20781f728e3ca1752ff8 /kernel
parentf5deed7063c8dff8082e56521541ae2e5883121f (diff)
workqueue: Fix PF_THREAD_BOUND abuse
PF_THREAD_BOUND is set by kthread_bind() and means the thread is bound to a particular cpu for correctness. The workqueue code abuses this flag and blindly sets it for all created threads, including those that are free to migrate. Restore the original semantics now that the worst abuse in the cpu-hotplug path are gone. The only icky bit is the rescue thread for per-cpu workqueues, this cannot use kthread_bind() but will use set_cpus_allowed_ptr() to migrate itself to the desired cpu. Set and clear PF_THREAD_BOUND manually here. XXX: I think worker_maybe_bind_and_lock()/worker_unbind_and_unlock() should also do a get_online_cpus(), this would likely allow us to remove the while loop. XXX: should probably repurpose GCWQ_DISASSOCIATED to warn on adding works after CPU_DOWN_PREPARE -- its dual use to mark unbound gcwqs is a tad annoying though. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/workqueue.c29
1 files changed, 20 insertions, 9 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index ba977c4d8d4f..dc50d5d42428 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -1283,8 +1283,14 @@ __acquires(&gcwq->lock)
return false;
if (task_cpu(task) == gcwq->cpu &&
cpumask_equal(&current->cpus_allowed,
- get_cpu_mask(gcwq->cpu)))
+ get_cpu_mask(gcwq->cpu))) {
+ /*
+ * Since we're binding to a particular cpu and need to
+ * stay there for correctness, mark us PF_THREAD_BOUND.
+ */
+ task->flags |= PF_THREAD_BOUND;
return true;
+ }
spin_unlock_irq(&gcwq->lock);
/*
@@ -1298,6 +1304,18 @@ __acquires(&gcwq->lock)
}
}
+static void worker_unbind_and_unlock(struct worker *worker)
+{
+ struct global_cwq *gcwq = worker->gcwq;
+ struct task_struct *task = worker->task;
+
+ /*
+ * Its no longer required we're PF_THREAD_BOUND, the work is done.
+ */
+ task->flags &= ~PF_THREAD_BOUND;
+ spin_unlock_irq(&gcwq->lock);
+}
+
static struct worker *alloc_worker(void)
{
struct worker *worker;
@@ -1360,15 +1378,9 @@ static struct worker *create_worker(struct global_cwq *gcwq, bool bind)
if (IS_ERR(worker->task))
goto fail;
- /*
- * A rogue worker will become a regular one if CPU comes
- * online later on. Make sure every worker has
- * PF_THREAD_BOUND set.
- */
if (bind && !on_unbound_cpu)
kthread_bind(worker->task, gcwq->cpu);
else {
- worker->task->flags |= PF_THREAD_BOUND;
if (on_unbound_cpu)
worker->flags |= WORKER_UNBOUND;
}
@@ -2045,7 +2057,7 @@ repeat:
if (keep_working(gcwq))
wake_up_worker(gcwq);
- spin_unlock_irq(&gcwq->lock);
+ worker_unbind_and_unlock(rescuer);
}
schedule();
@@ -2995,7 +3007,6 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
if (IS_ERR(rescuer->task))
goto err;
- rescuer->task->flags |= PF_THREAD_BOUND;
wake_up_process(rescuer->task);
}