summaryrefslogtreecommitdiff
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c23
1 files changed, 21 insertions, 2 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 054774605d2f..51d77e7c0989 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -307,6 +307,18 @@ static cpumask_var_t wq_unbound_cpumask;
/* CPU where unbound work was last round robin scheduled from this CPU */
static DEFINE_PER_CPU(int, wq_rr_cpu_last);
+/*
+ * Local execution of unbound work items is no longer guaranteed. The
+ * following always forces round-robin CPU selection on unbound work items
+ * to uncover usages which depend on it.
+ */
+#ifdef CONFIG_DEBUG_WQ_FORCE_RR_CPU
+static bool wq_debug_force_rr_cpu = true;
+#else
+static bool wq_debug_force_rr_cpu = false;
+#endif
+module_param_named(debug_force_rr_cpu, wq_debug_force_rr_cpu, bool, 0644);
+
/* the per-cpu worker pools */
static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS],
cpu_worker_pools);
@@ -1309,10 +1321,17 @@ static bool is_chained_work(struct workqueue_struct *wq)
*/
static int wq_select_unbound_cpu(int cpu)
{
+ static bool printed_dbg_warning;
int new_cpu;
- if (cpumask_test_cpu(cpu, wq_unbound_cpumask))
- return cpu;
+ if (likely(!wq_debug_force_rr_cpu)) {
+ if (cpumask_test_cpu(cpu, wq_unbound_cpumask))
+ return cpu;
+ } else if (!printed_dbg_warning) {
+ pr_warn("workqueue: round-robin CPU selection forced, expect performance impact\n");
+ printed_dbg_warning = true;
+ }
+
if (cpumask_empty(wq_unbound_cpumask))
return cpu;