summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/Kconfig.locks2
-rw-r--r--kernel/Kconfig.preempt26
-rw-r--r--kernel/Makefile9
-rw-r--r--kernel/cpu.c154
-rw-r--r--kernel/cred.c2
-rw-r--r--kernel/debug/kdb/kdb_io.c6
-rw-r--r--kernel/exit.c2
-rw-r--r--kernel/fork.c33
-rw-r--r--kernel/futex.c80
-rw-r--r--kernel/hrtimer.c247
-rw-r--r--kernel/irq/handle.c3
-rw-r--r--kernel/irq/manage.c15
-rw-r--r--kernel/irq/settings.h12
-rw-r--r--kernel/irq/spurious.c10
-rw-r--r--kernel/irq_work.c2
-rw-r--r--kernel/itimer.c1
-rw-r--r--kernel/ksysfs.c12
-rw-r--r--kernel/lockdep.c2
-rw-r--r--kernel/panic.c2
-rw-r--r--kernel/posix-cpu-timers.c197
-rw-r--r--kernel/posix-timers.c37
-rw-r--r--kernel/power/hibernate.c7
-rw-r--r--kernel/power/suspend.c4
-rw-r--r--kernel/printk.c101
-rw-r--r--kernel/rcupdate.c2
-rw-r--r--kernel/rcutiny.c2
-rw-r--r--kernel/rcutree.c17
-rw-r--r--kernel/rcutree_plugin.h9
-rw-r--r--kernel/relay.c14
-rw-r--r--kernel/res_counter.c8
-rw-r--r--kernel/rt.c442
-rw-r--r--kernel/rtmutex.c473
-rw-r--r--kernel/rtmutex_common.h12
-rw-r--r--kernel/sched/core.c298
-rw-r--r--kernel/sched/debug.c7
-rw-r--r--kernel/sched/features.h4
-rw-r--r--kernel/sched/rt.c1
-rw-r--r--kernel/signal.c137
-rw-r--r--kernel/softirq.c570
-rw-r--r--kernel/spinlock.c7
-rw-r--r--kernel/stop_machine.c79
-rw-r--r--kernel/time/jiffies.c4
-rw-r--r--kernel/time/ntp.c26
-rw-r--r--kernel/time/tick-common.c10
-rw-r--r--kernel/time/tick-internal.h3
-rw-r--r--kernel/time/tick-sched.c25
-rw-r--r--kernel/time/timekeeping.c114
-rw-r--r--kernel/timer.c108
-rw-r--r--kernel/trace/Kconfig104
-rw-r--r--kernel/trace/Makefile4
-rw-r--r--kernel/trace/latency_hist.c1170
-rw-r--r--kernel/trace/ring_buffer.c151
-rw-r--r--kernel/trace/trace.c35
-rw-r--r--kernel/trace/trace.h1
-rw-r--r--kernel/trace/trace_events.c3
-rw-r--r--kernel/trace/trace_irqsoff.c11
-rw-r--r--kernel/trace/trace_output.c5
-rw-r--r--kernel/user.c4
-rw-r--r--kernel/watchdog.c16
-rw-r--r--kernel/workqueue.c637
-rw-r--r--kernel/workqueue_sched.h5
61 files changed, 4526 insertions, 958 deletions
diff --git a/kernel/Kconfig.locks b/kernel/Kconfig.locks
index 2251882daf53..033ebc06c262 100644
--- a/kernel/Kconfig.locks
+++ b/kernel/Kconfig.locks
@@ -199,4 +199,4 @@ config INLINE_WRITE_UNLOCK_IRQRESTORE
def_bool !DEBUG_SPINLOCK && ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE
config MUTEX_SPIN_ON_OWNER
- def_bool SMP && !DEBUG_MUTEXES
+ def_bool SMP && !DEBUG_MUTEXES && !PREEMPT_RT_FULL
diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt
index 3f9c97419f02..c9f006b2095c 100644
--- a/kernel/Kconfig.preempt
+++ b/kernel/Kconfig.preempt
@@ -1,3 +1,10 @@
+config PREEMPT
+ bool
+ select PREEMPT_COUNT
+
+config PREEMPT_RT_BASE
+ bool
+ select PREEMPT
choice
prompt "Preemption Model"
@@ -33,9 +40,9 @@ config PREEMPT_VOLUNTARY
Select this if you are building a kernel for a desktop system.
-config PREEMPT
+config PREEMPT__LL
bool "Preemptible Kernel (Low-Latency Desktop)"
- select PREEMPT_COUNT
+ select PREEMPT
select UNINLINE_SPIN_UNLOCK if !ARCH_INLINE_SPIN_UNLOCK
help
This option reduces the latency of the kernel by making
@@ -52,6 +59,21 @@ config PREEMPT
embedded system with latency requirements in the milliseconds
range.
+config PREEMPT_RTB
+ bool "Preemptible Kernel (Basic RT)"
+ select PREEMPT_RT_BASE
+ help
+ This option is basically the same as (Low-Latency Desktop) but
+ enables changes which are preliminary for the full preemptiple
+ RT kernel.
+
+config PREEMPT_RT_FULL
+ bool "Fully Preemptible Kernel (RT)"
+ depends on IRQ_FORCED_THREADING
+ select PREEMPT_RT_BASE
+ help
+ All and everything
+
endchoice
config PREEMPT_COUNT
diff --git a/kernel/Makefile b/kernel/Makefile
index cb41b9547c9f..f4bf68a76876 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -7,8 +7,8 @@ obj-y = fork.o exec_domain.o panic.o printk.o \
sysctl.o sysctl_binary.o capability.o ptrace.o timer.o user.o \
signal.o sys.o kmod.o workqueue.o pid.o \
rcupdate.o extable.o params.o posix-timers.o \
- kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \
- hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \
+ kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o \
+ hrtimer.o nsproxy.o srcu.o semaphore.o \
notifier.o ksysfs.o cred.o \
async.o range.o groups.o
@@ -29,7 +29,11 @@ obj-$(CONFIG_FREEZER) += freezer.o
obj-$(CONFIG_PROFILING) += profile.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o
obj-y += time/
+ifneq ($(CONFIG_PREEMPT_RT_FULL),y)
+obj-y += mutex.o
obj-$(CONFIG_DEBUG_MUTEXES) += mutex-debug.o
+obj-y += rwsem.o
+endif
obj-$(CONFIG_LOCKDEP) += lockdep.o
ifeq ($(CONFIG_PROC_FS),y)
obj-$(CONFIG_LOCKDEP) += lockdep_proc.o
@@ -41,6 +45,7 @@ endif
obj-$(CONFIG_RT_MUTEXES) += rtmutex.o
obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o
obj-$(CONFIG_RT_MUTEX_TESTER) += rtmutex-tester.o
+obj-$(CONFIG_PREEMPT_RT_FULL) += rt.o
obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o
obj-$(CONFIG_SMP) += smp.o
ifneq ($(CONFIG_SMP),y)
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 2060c6e57027..d79d33ac011c 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -46,7 +46,12 @@ static int cpu_hotplug_disabled;
static struct {
struct task_struct *active_writer;
+#ifdef CONFIG_PREEMPT_RT_FULL
+ /* Makes the lock keep the task's state */
+ spinlock_t lock;
+#else
struct mutex lock; /* Synchronizes accesses to refcount, */
+#endif
/*
* Also blocks the new readers during
* an ongoing cpu hotplug operation.
@@ -54,18 +59,128 @@ static struct {
int refcount;
} cpu_hotplug = {
.active_writer = NULL,
+#ifdef CONFIG_PREEMPT_RT_FULL
+ .lock = __SPIN_LOCK_UNLOCKED(cpu_hotplug.lock),
+#else
.lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
+#endif
.refcount = 0,
};
+#ifdef CONFIG_PREEMPT_RT_FULL
+# define hotplug_lock() rt_spin_lock(&cpu_hotplug.lock)
+# define hotplug_unlock() rt_spin_unlock(&cpu_hotplug.lock)
+#else
+# define hotplug_lock() mutex_lock(&cpu_hotplug.lock)
+# define hotplug_unlock() mutex_unlock(&cpu_hotplug.lock)
+#endif
+
+struct hotplug_pcp {
+ struct task_struct *unplug;
+ int refcount;
+ struct completion synced;
+};
+
+static DEFINE_PER_CPU(struct hotplug_pcp, hotplug_pcp);
+
+/**
+ * pin_current_cpu - Prevent the current cpu from being unplugged
+ *
+ * Lightweight version of get_online_cpus() to prevent cpu from being
+ * unplugged when code runs in a migration disabled region.
+ *
+ * Must be called with preemption disabled (preempt_count = 1)!
+ */
+void pin_current_cpu(void)
+{
+ struct hotplug_pcp *hp;
+
+retry:
+ hp = &__get_cpu_var(hotplug_pcp);
+
+ if (!hp->unplug || hp->refcount || preempt_count() > 1 ||
+ hp->unplug == current || (current->flags & PF_STOMPER)) {
+ hp->refcount++;
+ return;
+ }
+ preempt_enable();
+ hotplug_lock();
+ hotplug_unlock();
+ preempt_disable();
+ goto retry;
+}
+
+/**
+ * unpin_current_cpu - Allow unplug of current cpu
+ *
+ * Must be called with preemption or interrupts disabled!
+ */
+void unpin_current_cpu(void)
+{
+ struct hotplug_pcp *hp = &__get_cpu_var(hotplug_pcp);
+
+ WARN_ON(hp->refcount <= 0);
+
+ /* This is safe. sync_unplug_thread is pinned to this cpu */
+ if (!--hp->refcount && hp->unplug && hp->unplug != current &&
+ !(current->flags & PF_STOMPER))
+ wake_up_process(hp->unplug);
+}
+
+/*
+ * FIXME: Is this really correct under all circumstances ?
+ */
+static int sync_unplug_thread(void *data)
+{
+ struct hotplug_pcp *hp = data;
+
+ preempt_disable();
+ hp->unplug = current;
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ while (hp->refcount) {
+ schedule_preempt_disabled();
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ }
+ set_current_state(TASK_RUNNING);
+ preempt_enable();
+ complete(&hp->synced);
+ return 0;
+}
+
+/*
+ * Start the sync_unplug_thread on the target cpu and wait for it to
+ * complete.
+ */
+static int cpu_unplug_begin(unsigned int cpu)
+{
+ struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu);
+ struct task_struct *tsk;
+
+ init_completion(&hp->synced);
+ tsk = kthread_create(sync_unplug_thread, hp, "sync_unplug/%d", cpu);
+ if (IS_ERR(tsk))
+ return (PTR_ERR(tsk));
+ kthread_bind(tsk, cpu);
+ wake_up_process(tsk);
+ wait_for_completion(&hp->synced);
+ return 0;
+}
+
+static void cpu_unplug_done(unsigned int cpu)
+{
+ struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu);
+
+ hp->unplug = NULL;
+}
+
void get_online_cpus(void)
{
might_sleep();
if (cpu_hotplug.active_writer == current)
return;
- mutex_lock(&cpu_hotplug.lock);
+ hotplug_lock();
cpu_hotplug.refcount++;
- mutex_unlock(&cpu_hotplug.lock);
+ hotplug_unlock();
}
EXPORT_SYMBOL_GPL(get_online_cpus);
@@ -74,10 +189,10 @@ void put_online_cpus(void)
{
if (cpu_hotplug.active_writer == current)
return;
- mutex_lock(&cpu_hotplug.lock);
+ hotplug_lock();
if (!--cpu_hotplug.refcount && unlikely(cpu_hotplug.active_writer))
wake_up_process(cpu_hotplug.active_writer);
- mutex_unlock(&cpu_hotplug.lock);
+ hotplug_unlock();
}
EXPORT_SYMBOL_GPL(put_online_cpus);
@@ -109,11 +224,11 @@ static void cpu_hotplug_begin(void)
cpu_hotplug.active_writer = current;
for (;;) {
- mutex_lock(&cpu_hotplug.lock);
+ hotplug_lock();
if (likely(!cpu_hotplug.refcount))
break;
__set_current_state(TASK_UNINTERRUPTIBLE);
- mutex_unlock(&cpu_hotplug.lock);
+ hotplug_unlock();
schedule();
}
}
@@ -121,7 +236,7 @@ static void cpu_hotplug_begin(void)
static void cpu_hotplug_done(void)
{
cpu_hotplug.active_writer = NULL;
- mutex_unlock(&cpu_hotplug.lock);
+ hotplug_unlock();
}
#else /* #if CONFIG_HOTPLUG_CPU */
@@ -210,13 +325,14 @@ static int __ref take_cpu_down(void *_param)
/* Requires cpu_add_remove_lock to be held */
static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
{
- int err, nr_calls = 0;
+ int mycpu, err, nr_calls = 0;
void *hcpu = (void *)(long)cpu;
unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
struct take_cpu_down_param tcd_param = {
.mod = mod,
.hcpu = hcpu,
};
+ cpumask_var_t cpumask;
if (num_online_cpus() == 1)
return -EBUSY;
@@ -224,7 +340,26 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
if (!cpu_online(cpu))
return -EINVAL;
+ /* Move the downtaker off the unplug cpu */
+ if (!alloc_cpumask_var(&cpumask, GFP_KERNEL))
+ return -ENOMEM;
+ cpumask_andnot(cpumask, cpu_online_mask, cpumask_of(cpu));
+ set_cpus_allowed_ptr(current, cpumask);
+ free_cpumask_var(cpumask);
+ migrate_disable();
+ mycpu = smp_processor_id();
+ if (mycpu == cpu) {
+ printk(KERN_ERR "Yuck! Still on unplug CPU\n!");
+ migrate_enable();
+ return -EBUSY;
+ }
+
cpu_hotplug_begin();
+ err = cpu_unplug_begin(cpu);
+ if (err) {
+ printk("cpu_unplug_begin(%d) failed\n", cpu);
+ goto out_cancel;
+ }
err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
if (err) {
@@ -263,6 +398,9 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
check_for_tasks(cpu);
out_release:
+ cpu_unplug_done(cpu);
+out_cancel:
+ migrate_enable();
cpu_hotplug_done();
if (!err)
cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);
diff --git a/kernel/cred.c b/kernel/cred.c
index e70683d9ec32..fed7c3fd9477 100644
--- a/kernel/cred.c
+++ b/kernel/cred.c
@@ -36,7 +36,7 @@ static struct kmem_cache *cred_jar;
static struct thread_group_cred init_tgcred = {
.usage = ATOMIC_INIT(2),
.tgid = 0,
- .lock = __SPIN_LOCK_UNLOCKED(init_cred.tgcred.lock),
+ .lock = __SPIN_LOCK_UNLOCKED(init_tgcred.lock),
};
#endif
diff --git a/kernel/debug/kdb/kdb_io.c b/kernel/debug/kdb/kdb_io.c
index bb9520f0f6ff..eb68a9dd0f11 100644
--- a/kernel/debug/kdb/kdb_io.c
+++ b/kernel/debug/kdb/kdb_io.c
@@ -553,7 +553,6 @@ int vkdb_printf(const char *fmt, va_list ap)
int diag;
int linecount;
int logging, saved_loglevel = 0;
- int saved_trap_printk;
int got_printf_lock = 0;
int retlen = 0;
int fnd, len;
@@ -564,8 +563,6 @@ int vkdb_printf(const char *fmt, va_list ap)
unsigned long uninitialized_var(flags);
preempt_disable();
- saved_trap_printk = kdb_trap_printk;
- kdb_trap_printk = 0;
/* Serialize kdb_printf if multiple cpus try to write at once.
* But if any cpu goes recursive in kdb, just print the output,
@@ -821,7 +818,6 @@ kdb_print_out:
} else {
__release(kdb_printf_lock);
}
- kdb_trap_printk = saved_trap_printk;
preempt_enable();
return retlen;
}
@@ -831,9 +827,11 @@ int kdb_printf(const char *fmt, ...)
va_list ap;
int r;
+ kdb_trap_printk++;
va_start(ap, fmt);
r = vkdb_printf(fmt, ap);
va_end(ap);
+ kdb_trap_printk--;
return r;
}
diff --git a/kernel/exit.c b/kernel/exit.c
index d8bd3b425fa7..35ab292cd4d8 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -143,7 +143,7 @@ static void __exit_signal(struct task_struct *tsk)
* Do this under ->siglock, we can race with another thread
* doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals.
*/
- flush_sigqueue(&tsk->pending);
+ flush_task_sigqueue(tsk);
tsk->sighand = NULL;
spin_unlock(&sighand->siglock);
diff --git a/kernel/fork.c b/kernel/fork.c
index b9372a0bff18..52f1846c4966 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -90,7 +90,7 @@ int max_threads; /* tunable limit on nr_threads */
DEFINE_PER_CPU(unsigned long, process_counts) = 0;
-__cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */
+DEFINE_RWLOCK(tasklist_lock); /* outer */
#ifdef CONFIG_PROVE_RCU
int lockdep_tasklist_lock_is_held(void)
@@ -201,7 +201,18 @@ void __put_task_struct(struct task_struct *tsk)
if (!profile_handoff_task(tsk))
free_task(tsk);
}
+#ifndef CONFIG_PREEMPT_RT_BASE
EXPORT_SYMBOL_GPL(__put_task_struct);
+#else
+void __put_task_struct_cb(struct rcu_head *rhp)
+{
+ struct task_struct *tsk = container_of(rhp, struct task_struct, put_rcu);
+
+ __put_task_struct(tsk);
+
+}
+EXPORT_SYMBOL_GPL(__put_task_struct_cb);
+#endif
/*
* macro override instead of weak attribute alias, to workaround
@@ -561,6 +572,19 @@ void __mmdrop(struct mm_struct *mm)
}
EXPORT_SYMBOL_GPL(__mmdrop);
+#ifdef CONFIG_PREEMPT_RT_BASE
+/*
+ * RCU callback for delayed mm drop. Not strictly rcu, but we don't
+ * want another facility to make this work.
+ */
+void __mmdrop_delayed(struct rcu_head *rhp)
+{
+ struct mm_struct *mm = container_of(rhp, struct mm_struct, delayed_drop);
+
+ __mmdrop(mm);
+}
+#endif
+
/*
* Decrement the use count and release all resources for an mm.
*/
@@ -1096,6 +1120,9 @@ void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
*/
static void posix_cpu_timers_init(struct task_struct *tsk)
{
+#ifdef CONFIG_PREEMPT_RT_BASE
+ tsk->posix_timer_list = NULL;
+#endif
tsk->cputime_expires.prof_exp = 0;
tsk->cputime_expires.virt_exp = 0;
tsk->cputime_expires.sched_exp = 0;
@@ -1204,6 +1231,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
spin_lock_init(&p->alloc_lock);
init_sigpending(&p->pending);
+ p->sigqueue_cache = NULL;
p->utime = p->stime = p->gtime = 0;
p->utimescaled = p->stimescaled = 0;
@@ -1262,6 +1290,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
p->hardirq_context = 0;
p->softirq_context = 0;
#endif
+#ifdef CONFIG_PREEMPT_RT_FULL
+ p->pagefault_disabled = 0;
+#endif
#ifdef CONFIG_LOCKDEP
p->lockdep_depth = 0; /* no locks held yet */
p->curr_chain_key = 0;
diff --git a/kernel/futex.c b/kernel/futex.c
index e2b0fb9a0b3b..3ee96ed994e6 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -1423,6 +1423,16 @@ retry_private:
requeue_pi_wake_futex(this, &key2, hb2);
drop_count++;
continue;
+ } else if (ret == -EAGAIN) {
+ /*
+ * Waiter was woken by timeout or
+ * signal and has set pi_blocked_on to
+ * PI_WAKEUP_INPROGRESS before we
+ * tried to enqueue it on the rtmutex.
+ */
+ this->pi_state = NULL;
+ free_pi_state(pi_state);
+ continue;
} else if (ret) {
/* -EDEADLK */
this->pi_state = NULL;
@@ -2267,7 +2277,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
struct hrtimer_sleeper timeout, *to = NULL;
struct rt_mutex_waiter rt_waiter;
struct rt_mutex *pi_mutex = NULL;
- struct futex_hash_bucket *hb;
+ struct futex_hash_bucket *hb, *hb2;
union futex_key key2 = FUTEX_KEY_INIT;
struct futex_q q = futex_q_init;
int res, ret;
@@ -2289,8 +2299,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
* The waiter is allocated on our stack, manipulated by the requeue
* code while we sleep on uaddr.
*/
- debug_rt_mutex_init_waiter(&rt_waiter);
- rt_waiter.task = NULL;
+ rt_mutex_init_waiter(&rt_waiter, false);
ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE);
if (unlikely(ret != 0))
@@ -2311,20 +2320,55 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
/* Queue the futex_q, drop the hb lock, wait for wakeup. */
futex_wait_queue_me(hb, &q, to);
- spin_lock(&hb->lock);
- ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to);
- spin_unlock(&hb->lock);
- if (ret)
- goto out_put_keys;
+ /*
+ * On RT we must avoid races with requeue and trying to block
+ * on two mutexes (hb->lock and uaddr2's rtmutex) by
+ * serializing access to pi_blocked_on with pi_lock.
+ */
+ raw_spin_lock_irq(&current->pi_lock);
+ if (current->pi_blocked_on) {
+ /*
+ * We have been requeued or are in the process of
+ * being requeued.
+ */
+ raw_spin_unlock_irq(&current->pi_lock);
+ } else {
+ /*
+ * Setting pi_blocked_on to PI_WAKEUP_INPROGRESS
+ * prevents a concurrent requeue from moving us to the
+ * uaddr2 rtmutex. After that we can safely acquire
+ * (and possibly block on) hb->lock.
+ */
+ current->pi_blocked_on = PI_WAKEUP_INPROGRESS;
+ raw_spin_unlock_irq(&current->pi_lock);
+
+ spin_lock(&hb->lock);
+
+ /*
+ * Clean up pi_blocked_on. We might leak it otherwise
+ * when we succeeded with the hb->lock in the fast
+ * path.
+ */
+ raw_spin_lock_irq(&current->pi_lock);
+ current->pi_blocked_on = NULL;
+ raw_spin_unlock_irq(&current->pi_lock);
+
+ ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to);
+ spin_unlock(&hb->lock);
+ if (ret)
+ goto out_put_keys;
+ }
/*
- * In order for us to be here, we know our q.key == key2, and since
- * we took the hb->lock above, we also know that futex_requeue() has
- * completed and we no longer have to concern ourselves with a wakeup
- * race with the atomic proxy lock acquisition by the requeue code. The
- * futex_requeue dropped our key1 reference and incremented our key2
- * reference count.
+ * In order to be here, we have either been requeued, are in
+ * the process of being requeued, or requeue successfully
+ * acquired uaddr2 on our behalf. If pi_blocked_on was
+ * non-null above, we may be racing with a requeue. Do not
+ * rely on q->lock_ptr to be hb2->lock until after blocking on
+ * hb->lock or hb2->lock. The futex_requeue dropped our key1
+ * reference and incremented our key2 reference count.
*/
+ hb2 = hash_futex(&key2);
/* Check if the requeue code acquired the second futex for us. */
if (!q.rt_waiter) {
@@ -2333,9 +2377,10 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
* did a lock-steal - fix up the PI-state in that case.
*/
if (q.pi_state && (q.pi_state->owner != current)) {
- spin_lock(q.lock_ptr);
+ spin_lock(&hb2->lock);
+ BUG_ON(&hb2->lock != q.lock_ptr);
ret = fixup_pi_state_owner(uaddr2, &q, current);
- spin_unlock(q.lock_ptr);
+ spin_unlock(&hb2->lock);
}
} else {
/*
@@ -2348,7 +2393,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
ret = rt_mutex_finish_proxy_lock(pi_mutex, to, &rt_waiter, 1);
debug_rt_mutex_free_waiter(&rt_waiter);
- spin_lock(q.lock_ptr);
+ spin_lock(&hb2->lock);
+ BUG_ON(&hb2->lock != q.lock_ptr);
/*
* Fixup the pi_state owner and possibly acquire the lock if we
* haven't already.
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index ae34bf51682b..3991464c3066 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -49,6 +49,7 @@
#include <asm/uaccess.h>
#include <trace/events/timer.h>
+#include <trace/events/hist.h>
/*
* The timer bases:
@@ -588,8 +589,7 @@ static int hrtimer_reprogram(struct hrtimer *timer,
* When the callback is running, we do not reprogram the clock event
* device. The timer callback is either running on a different CPU or
* the callback is executed in the hrtimer_interrupt context. The
- * reprogramming is handled either by the softirq, which called the
- * callback or at the end of the hrtimer_interrupt.
+ * reprogramming is handled at the end of the hrtimer_interrupt.
*/
if (hrtimer_callback_running(timer))
return 0;
@@ -624,6 +624,9 @@ static int hrtimer_reprogram(struct hrtimer *timer,
return res;
}
+static void __run_hrtimer(struct hrtimer *timer, ktime_t *now);
+static int hrtimer_rt_defer(struct hrtimer *timer);
+
/*
* Initialize the high resolution related parts of cpu_base
*/
@@ -644,14 +647,23 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
int wakeup)
{
if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) {
- if (wakeup) {
- raw_spin_unlock(&base->cpu_base->lock);
- raise_softirq_irqoff(HRTIMER_SOFTIRQ);
- raw_spin_lock(&base->cpu_base->lock);
- } else
- __raise_softirq_irqoff(HRTIMER_SOFTIRQ);
+ if (!wakeup)
+ return -ETIME;
- return 1;
+#ifdef CONFIG_PREEMPT_RT_BASE
+ /*
+ * Move softirq based timers away from the rbtree in
+ * case it expired already. Otherwise we would have a
+ * stale base->first entry until the softirq runs.
+ */
+ if (!hrtimer_rt_defer(timer))
+ return -ETIME;
+#endif
+ raw_spin_unlock(&base->cpu_base->lock);
+ raise_softirq_irqoff(HRTIMER_SOFTIRQ);
+ raw_spin_lock(&base->cpu_base->lock);
+
+ return 0;
}
return 0;
@@ -732,6 +744,11 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
}
static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) { }
static inline void retrigger_next_event(void *arg) { }
+static inline int hrtimer_reprogram(struct hrtimer *timer,
+ struct hrtimer_clock_base *base)
+{
+ return 0;
+}
#endif /* CONFIG_HIGH_RES_TIMERS */
@@ -846,6 +863,32 @@ u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval)
}
EXPORT_SYMBOL_GPL(hrtimer_forward);
+#ifdef CONFIG_PREEMPT_RT_BASE
+# define wake_up_timer_waiters(b) wake_up(&(b)->wait)
+
+/**
+ * hrtimer_wait_for_timer - Wait for a running timer
+ *
+ * @timer: timer to wait for
+ *
+ * The function waits in case the timers callback function is
+ * currently executed on the waitqueue of the timer base. The
+ * waitqueue is woken up after the timer callback function has
+ * finished execution.
+ */
+void hrtimer_wait_for_timer(const struct hrtimer *timer)
+{
+ struct hrtimer_clock_base *base = timer->base;
+
+ if (base && base->cpu_base && !timer->irqsafe)
+ wait_event(base->cpu_base->wait,
+ !(timer->state & HRTIMER_STATE_CALLBACK));
+}
+
+#else
+# define wake_up_timer_waiters(b) do { } while (0)
+#endif
+
/*
* enqueue_hrtimer - internal function to (re)start a timer
*
@@ -889,6 +932,11 @@ static void __remove_hrtimer(struct hrtimer *timer,
if (!(timer->state & HRTIMER_STATE_ENQUEUED))
goto out;
+ if (unlikely(!list_empty(&timer->cb_entry))) {
+ list_del_init(&timer->cb_entry);
+ goto out;
+ }
+
next_timer = timerqueue_getnext(&base->active);
timerqueue_del(&base->active, &timer->node);
if (&timer->node == next_timer) {
@@ -985,8 +1033,20 @@ int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
*
* XXX send_remote_softirq() ?
*/
- if (leftmost && new_base->cpu_base == &__get_cpu_var(hrtimer_bases))
- hrtimer_enqueue_reprogram(timer, new_base, wakeup);
+ if (leftmost && new_base->cpu_base == &__get_cpu_var(hrtimer_bases)) {
+ ret = hrtimer_enqueue_reprogram(timer, new_base, wakeup);
+ if (ret) {
+ /*
+ * In case we failed to reprogram the timer (mostly
+ * because out current timer is already elapsed),
+ * remove it again and report a failure. This avoids
+ * stale base->first entries.
+ */
+ debug_deactivate(timer);
+ __remove_hrtimer(timer, new_base,
+ timer->state & HRTIMER_STATE_CALLBACK, 0);
+ }
+ }
unlock_hrtimer_base(timer, &flags);
@@ -1072,7 +1132,7 @@ int hrtimer_cancel(struct hrtimer *timer)
if (ret >= 0)
return ret;
- cpu_relax();
+ hrtimer_wait_for_timer(timer);
}
}
EXPORT_SYMBOL_GPL(hrtimer_cancel);
@@ -1151,6 +1211,7 @@ static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
base = hrtimer_clockid_to_base(clock_id);
timer->base = &cpu_base->clock_base[base];
+ INIT_LIST_HEAD(&timer->cb_entry);
timerqueue_init(&timer->node);
#ifdef CONFIG_TIMER_STATS
@@ -1234,6 +1295,122 @@ static void __run_hrtimer(struct hrtimer *timer, ktime_t *now)
timer->state &= ~HRTIMER_STATE_CALLBACK;
}
+static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer);
+
+#ifdef CONFIG_PREEMPT_RT_BASE
+static void hrtimer_rt_reprogram(int restart, struct hrtimer *timer,
+ struct hrtimer_clock_base *base)
+{
+ /*
+ * Note, we clear the callback flag before we requeue the
+ * timer otherwise we trigger the callback_running() check
+ * in hrtimer_reprogram().
+ */
+ timer->state &= ~HRTIMER_STATE_CALLBACK;
+
+ if (restart != HRTIMER_NORESTART) {
+ BUG_ON(hrtimer_active(timer));
+ /*
+ * Enqueue the timer, if it's the leftmost timer then
+ * we need to reprogram it.
+ */
+ if (!enqueue_hrtimer(timer, base))
+ return;
+
+#ifndef CONFIG_HIGH_RES_TIMERS
+ }
+#else
+ if (base->cpu_base->hres_active &&
+ hrtimer_reprogram(timer, base))
+ goto requeue;
+
+ } else if (hrtimer_active(timer)) {
+ /*
+ * If the timer was rearmed on another CPU, reprogram
+ * the event device.
+ */
+ if (&timer->node == base->active.next &&
+ base->cpu_base->hres_active &&
+ hrtimer_reprogram(timer, base))
+ goto requeue;
+ }
+ return;
+
+requeue:
+ /*
+ * Timer is expired. Thus move it from tree to pending list
+ * again.
+ */
+ __remove_hrtimer(timer, base, timer->state, 0);
+ list_add_tail(&timer->cb_entry, &base->expired);
+#endif
+}
+
+/*
+ * The changes in mainline which removed the callback modes from
+ * hrtimer are not yet working with -rt. The non wakeup_process()
+ * based callbacks which involve sleeping locks need to be treated
+ * seperately.
+ */
+static void hrtimer_rt_run_pending(void)
+{
+ enum hrtimer_restart (*fn)(struct hrtimer *);
+ struct hrtimer_cpu_base *cpu_base;
+ struct hrtimer_clock_base *base;
+ struct hrtimer *timer;
+ int index, restart;
+
+ local_irq_disable();
+ cpu_base = &per_cpu(hrtimer_bases, smp_processor_id());
+
+ raw_spin_lock(&cpu_base->lock);
+
+ for (index = 0; index < HRTIMER_MAX_CLOCK_BASES; index++) {
+ base = &cpu_base->clock_base[index];
+
+ while (!list_empty(&base->expired)) {
+ timer = list_first_entry(&base->expired,
+ struct hrtimer, cb_entry);
+
+ /*
+ * Same as the above __run_hrtimer function
+ * just we run with interrupts enabled.
+ */
+ debug_hrtimer_deactivate(timer);
+ __remove_hrtimer(timer, base, HRTIMER_STATE_CALLBACK, 0);
+ timer_stats_account_hrtimer(timer);
+ fn = timer->function;
+
+ raw_spin_unlock_irq(&cpu_base->lock);
+ restart = fn(timer);
+ raw_spin_lock_irq(&cpu_base->lock);
+
+ hrtimer_rt_reprogram(restart, timer, base);
+ }
+ }
+
+ raw_spin_unlock_irq(&cpu_base->lock);
+
+ wake_up_timer_waiters(cpu_base);
+}
+
+static int hrtimer_rt_defer(struct hrtimer *timer)
+{
+ if (timer->irqsafe)
+ return 0;
+
+ __remove_hrtimer(timer, timer->base, timer->state, 0);
+ list_add_tail(&timer->cb_entry, &timer->base->expired);
+ return 1;
+}
+
+#else
+
+static inline void hrtimer_rt_run_pending(void) { }
+static inline int hrtimer_rt_defer(struct hrtimer *timer) { return 0; }
+
+#endif
+
#ifdef CONFIG_HIGH_RES_TIMERS
/*
@@ -1244,7 +1421,7 @@ void hrtimer_interrupt(struct clock_event_device *dev)
{
struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
ktime_t expires_next, now, entry_time, delta;
- int i, retries = 0;
+ int i, retries = 0, raise = 0;
BUG_ON(!cpu_base->hres_active);
cpu_base->nr_events++;
@@ -1280,6 +1457,14 @@ retry:
timer = container_of(node, struct hrtimer, node);
+ trace_hrtimer_interrupt(raw_smp_processor_id(),
+ ktime_to_ns(ktime_sub(
+ hrtimer_get_expires(timer), basenow)),
+ current,
+ timer->function == hrtimer_wakeup ?
+ container_of(timer, struct hrtimer_sleeper,
+ timer)->task : NULL);
+
/*
* The immediate goal for using the softexpires is
* minimizing wakeups, not running timers at the
@@ -1303,7 +1488,10 @@ retry:
break;
}
- __run_hrtimer(timer, &basenow);
+ if (!hrtimer_rt_defer(timer))
+ __run_hrtimer(timer, &basenow);
+ else
+ raise = 1;
}
}
@@ -1318,6 +1506,10 @@ retry:
if (expires_next.tv64 == KTIME_MAX ||
!tick_program_event(expires_next, 0)) {
cpu_base->hang_detected = 0;
+
+ if (raise)
+ raise_softirq_irqoff(HRTIMER_SOFTIRQ);
+
return;
}
@@ -1393,17 +1585,17 @@ void hrtimer_peek_ahead_timers(void)
local_irq_restore(flags);
}
-static void run_hrtimer_softirq(struct softirq_action *h)
-{
- hrtimer_peek_ahead_timers();
-}
-
#else /* CONFIG_HIGH_RES_TIMERS */
static inline void __hrtimer_peek_ahead_timers(void) { }
#endif /* !CONFIG_HIGH_RES_TIMERS */
+static void run_hrtimer_softirq(struct softirq_action *h)
+{
+ hrtimer_rt_run_pending();
+}
+
/*
* Called from timer softirq every jiffy, expire hrtimers:
*
@@ -1436,7 +1628,7 @@ void hrtimer_run_queues(void)
struct timerqueue_node *node;
struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
struct hrtimer_clock_base *base;
- int index, gettime = 1;
+ int index, gettime = 1, raise = 0;
if (hrtimer_hres_active())
return;
@@ -1461,10 +1653,16 @@ void hrtimer_run_queues(void)
hrtimer_get_expires_tv64(timer))
break;
- __run_hrtimer(timer, &base->softirq_time);
+ if (!hrtimer_rt_defer(timer))
+ __run_hrtimer(timer, &base->softirq_time);
+ else
+ raise = 1;
}
raw_spin_unlock(&cpu_base->lock);
}
+
+ if (raise)
+ raise_softirq_irqoff(HRTIMER_SOFTIRQ);
}
/*
@@ -1486,6 +1684,7 @@ static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer)
void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task)
{
sl->timer.function = hrtimer_wakeup;
+ sl->timer.irqsafe = 1;
sl->task = task;
}
EXPORT_SYMBOL_GPL(hrtimer_init_sleeper);
@@ -1624,9 +1823,13 @@ static void __cpuinit init_hrtimers_cpu(int cpu)
for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
cpu_base->clock_base[i].cpu_base = cpu_base;
timerqueue_init_head(&cpu_base->clock_base[i].active);
+ INIT_LIST_HEAD(&cpu_base->clock_base[i].expired);
}
hrtimer_init_hres(cpu_base);
+#ifdef CONFIG_PREEMPT_RT_BASE
+ init_waitqueue_head(&cpu_base->wait);
+#endif
}
#ifdef CONFIG_HOTPLUG_CPU
@@ -1739,9 +1942,7 @@ void __init hrtimers_init(void)
hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE,
(void *)(long)smp_processor_id());
register_cpu_notifier(&hrtimers_nb);
-#ifdef CONFIG_HIGH_RES_TIMERS
open_softirq(HRTIMER_SOFTIRQ, run_hrtimer_softirq);
-#endif
}
/**
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index bdb180325551..982793c367dc 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -172,8 +172,11 @@ handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action)
action = action->next;
} while (action);
+#ifndef CONFIG_PREEMPT_RT_FULL
+ /* FIXME: Can we unbreak that ? */
if (random & IRQF_SAMPLE_RANDOM)
add_interrupt_randomness(irq);
+#endif
if (!noirqdebug)
note_interrupt(irq, desc, retval);
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 89a3ea82569b..cd4eb0a45ce1 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -18,6 +18,7 @@
#include "internals.h"
#ifdef CONFIG_IRQ_FORCED_THREADING
+# ifndef CONFIG_PREEMPT_RT_BASE
__read_mostly bool force_irqthreads;
static int __init setup_forced_irqthreads(char *arg)
@@ -26,6 +27,7 @@ static int __init setup_forced_irqthreads(char *arg)
return 0;
}
early_param("threadirqs", setup_forced_irqthreads);
+# endif
#endif
/**
@@ -747,7 +749,15 @@ irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
local_bh_disable();
ret = action->thread_fn(action->irq, action->dev_id);
irq_finalize_oneshot(desc, action);
- local_bh_enable();
+ /*
+ * Interrupts which have real time requirements can be set up
+ * to avoid softirq processing in the thread handler. This is
+ * safe as these interrupts do not raise soft interrupts.
+ */
+ if (irq_settings_no_softirq_call(desc))
+ _local_bh_enable();
+ else
+ local_bh_enable();
return ret;
}
@@ -1069,6 +1079,9 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
}
+ if (new->flags & IRQF_NO_SOFTIRQ_CALL)
+ irq_settings_set_no_softirq_call(desc);
+
/* Set default affinity mask once everything is setup */
setup_affinity(irq, desc, mask);
diff --git a/kernel/irq/settings.h b/kernel/irq/settings.h
index 1162f1030f18..0d2c381c2bdb 100644
--- a/kernel/irq/settings.h
+++ b/kernel/irq/settings.h
@@ -14,6 +14,7 @@ enum {
_IRQ_NO_BALANCING = IRQ_NO_BALANCING,
_IRQ_NESTED_THREAD = IRQ_NESTED_THREAD,
_IRQ_PER_CPU_DEVID = IRQ_PER_CPU_DEVID,
+ _IRQ_NO_SOFTIRQ_CALL = IRQ_NO_SOFTIRQ_CALL,
_IRQF_MODIFY_MASK = IRQF_MODIFY_MASK,
};
@@ -26,6 +27,7 @@ enum {
#define IRQ_NOAUTOEN GOT_YOU_MORON
#define IRQ_NESTED_THREAD GOT_YOU_MORON
#define IRQ_PER_CPU_DEVID GOT_YOU_MORON
+#define IRQ_NO_SOFTIRQ_CALL GOT_YOU_MORON
#undef IRQF_MODIFY_MASK
#define IRQF_MODIFY_MASK GOT_YOU_MORON
@@ -36,6 +38,16 @@ irq_settings_clr_and_set(struct irq_desc *desc, u32 clr, u32 set)
desc->status_use_accessors |= (set & _IRQF_MODIFY_MASK);
}
+static inline bool irq_settings_no_softirq_call(struct irq_desc *desc)
+{
+ return desc->status_use_accessors & _IRQ_NO_SOFTIRQ_CALL;
+}
+
+static inline void irq_settings_set_no_softirq_call(struct irq_desc *desc)
+{
+ desc->status_use_accessors |= _IRQ_NO_SOFTIRQ_CALL;
+}
+
static inline bool irq_settings_is_per_cpu(struct irq_desc *desc)
{
return desc->status_use_accessors & _IRQ_PER_CPU;
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
index 611cd6003c45..d1c80fa035b7 100644
--- a/kernel/irq/spurious.c
+++ b/kernel/irq/spurious.c
@@ -341,6 +341,11 @@ MODULE_PARM_DESC(noirqdebug, "Disable irq lockup detection when true");
static int __init irqfixup_setup(char *str)
{
+#ifdef CONFIG_PREEMPT_RT_BASE
+ printk(KERN_WARNING "irqfixup boot option not supported "
+ "w/ CONFIG_PREEMPT_RT\n");
+ return 1;
+#endif
irqfixup = 1;
printk(KERN_WARNING "Misrouted IRQ fixup support enabled.\n");
printk(KERN_WARNING "This may impact system performance.\n");
@@ -353,6 +358,11 @@ module_param(irqfixup, int, 0644);
static int __init irqpoll_setup(char *str)
{
+#ifdef CONFIG_PREEMPT_RT_BASE
+ printk(KERN_WARNING "irqpoll boot option not supported "
+ "w/ CONFIG_PREEMPT_RT\n");
+ return 1;
+#endif
irqfixup = 2;
printk(KERN_WARNING "Misrouted IRQ fixup and polling support "
"enabled\n");
diff --git a/kernel/irq_work.c b/kernel/irq_work.c
index 1588e3b2871b..170c2eabc00f 100644
--- a/kernel/irq_work.c
+++ b/kernel/irq_work.c
@@ -107,8 +107,10 @@ void irq_work_run(void)
if (llist_empty(this_list))
return;
+#ifndef CONFIG_PREEMPT_RT_FULL
BUG_ON(!in_irq());
BUG_ON(!irqs_disabled());
+#endif
llnode = llist_del_all(this_list);
while (llnode != NULL) {
diff --git a/kernel/itimer.c b/kernel/itimer.c
index 8d262b467573..d0513909d663 100644
--- a/kernel/itimer.c
+++ b/kernel/itimer.c
@@ -213,6 +213,7 @@ again:
/* We are sharing ->siglock with it_real_fn() */
if (hrtimer_try_to_cancel(timer) < 0) {
spin_unlock_irq(&tsk->sighand->siglock);
+ hrtimer_wait_for_timer(&tsk->signal->real_timer);
goto again;
}
expires = timeval_to_ktime(value->it_value);
diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
index 4e316e1acf58..a546d3303a2c 100644
--- a/kernel/ksysfs.c
+++ b/kernel/ksysfs.c
@@ -133,6 +133,15 @@ KERNEL_ATTR_RO(vmcoreinfo);
#endif /* CONFIG_KEXEC */
+#if defined(CONFIG_PREEMPT_RT_FULL)
+static ssize_t realtime_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", 1);
+}
+KERNEL_ATTR_RO(realtime);
+#endif
+
/* whether file capabilities are enabled */
static ssize_t fscaps_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
@@ -182,6 +191,9 @@ static struct attribute * kernel_attrs[] = {
&kexec_crash_size_attr.attr,
&vmcoreinfo_attr.attr,
#endif
+#ifdef CONFIG_PREEMPT_RT_FULL
+ &realtime_attr.attr,
+#endif
NULL
};
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index ea9ee4518c35..6537c1ccbe28 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -3495,6 +3495,7 @@ static void check_flags(unsigned long flags)
}
}
+#ifndef CONFIG_PREEMPT_RT_FULL
/*
* We dont accurately track softirq state in e.g.
* hardirq contexts (such as on 4KSTACKS), so only
@@ -3509,6 +3510,7 @@ static void check_flags(unsigned long flags)
DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled);
}
}
+#endif
if (!debug_locks)
print_irqtrace_events(current);
diff --git a/kernel/panic.c b/kernel/panic.c
index 8ed89a175d79..f1c847a7c21d 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -363,9 +363,11 @@ static u64 oops_id;
static int init_oops_id(void)
{
+#ifndef CONFIG_PREEMPT_RT_FULL
if (!oops_id)
get_random_bytes(&oops_id, sizeof(oops_id));
else
+#endif
oops_id++;
return 0;
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index 125cb67daa21..2af6ea68dfe8 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -682,7 +682,7 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int flags,
/*
* Disarm any old timer after extracting its expiry time.
*/
- BUG_ON(!irqs_disabled());
+ BUG_ON_NONRT(!irqs_disabled());
ret = 0;
old_incr = timer->it.cpu.incr;
@@ -1198,7 +1198,7 @@ void posix_cpu_timer_schedule(struct k_itimer *timer)
/*
* Now re-arm for the new expiry time.
*/
- BUG_ON(!irqs_disabled());
+ BUG_ON_NONRT(!irqs_disabled());
arm_timer(timer);
spin_unlock(&p->sighand->siglock);
@@ -1262,10 +1262,11 @@ static inline int fastpath_timer_check(struct task_struct *tsk)
sig = tsk->signal;
if (sig->cputimer.running) {
struct task_cputime group_sample;
+ unsigned long flags;
- raw_spin_lock(&sig->cputimer.lock);
+ raw_spin_lock_irqsave(&sig->cputimer.lock, flags);
group_sample = sig->cputimer.cputime;
- raw_spin_unlock(&sig->cputimer.lock);
+ raw_spin_unlock_irqrestore(&sig->cputimer.lock, flags);
if (task_cputime_expired(&group_sample, &sig->cputime_expires))
return 1;
@@ -1279,13 +1280,13 @@ static inline int fastpath_timer_check(struct task_struct *tsk)
* already updated our counts. We need to check if any timers fire now.
* Interrupts are disabled.
*/
-void run_posix_cpu_timers(struct task_struct *tsk)
+static void __run_posix_cpu_timers(struct task_struct *tsk)
{
LIST_HEAD(firing);
struct k_itimer *timer, *next;
unsigned long flags;
- BUG_ON(!irqs_disabled());
+ BUG_ON_NONRT(!irqs_disabled());
/*
* The fast path checks that there are no expired thread or thread
@@ -1343,6 +1344,190 @@ void run_posix_cpu_timers(struct task_struct *tsk)
}
}
+#ifdef CONFIG_PREEMPT_RT_BASE
+#include <linux/kthread.h>
+#include <linux/cpu.h>
+DEFINE_PER_CPU(struct task_struct *, posix_timer_task);
+DEFINE_PER_CPU(struct task_struct *, posix_timer_tasklist);
+
+static int posix_cpu_timers_thread(void *data)
+{
+ int cpu = (long)data;
+
+ BUG_ON(per_cpu(posix_timer_task,cpu) != current);
+
+ while (!kthread_should_stop()) {
+ struct task_struct *tsk = NULL;
+ struct task_struct *next = NULL;
+
+ if (cpu_is_offline(cpu))
+ goto wait_to_die;
+
+ /* grab task list */
+ raw_local_irq_disable();
+ tsk = per_cpu(posix_timer_tasklist, cpu);
+ per_cpu(posix_timer_tasklist, cpu) = NULL;
+ raw_local_irq_enable();
+
+ /* its possible the list is empty, just return */
+ if (!tsk) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule();
+ __set_current_state(TASK_RUNNING);
+ continue;
+ }
+
+ /* Process task list */
+ while (1) {
+ /* save next */
+ next = tsk->posix_timer_list;
+
+ /* run the task timers, clear its ptr and
+ * unreference it
+ */
+ __run_posix_cpu_timers(tsk);
+ tsk->posix_timer_list = NULL;
+ put_task_struct(tsk);
+
+ /* check if this is the last on the list */
+ if (next == tsk)
+ break;
+ tsk = next;
+ }
+ }
+ return 0;
+
+wait_to_die:
+ /* Wait for kthread_stop */
+ set_current_state(TASK_INTERRUPTIBLE);
+ while (!kthread_should_stop()) {
+ schedule();
+ set_current_state(TASK_INTERRUPTIBLE);
+ }
+ __set_current_state(TASK_RUNNING);
+ return 0;
+}
+
+static inline int __fastpath_timer_check(struct task_struct *tsk)
+{
+ /* tsk == current, ensure it is safe to use ->signal/sighand */
+ if (unlikely(tsk->exit_state))
+ return 0;
+
+ if (!task_cputime_zero(&tsk->cputime_expires))
+ return 1;
+
+ if (!task_cputime_zero(&tsk->signal->cputime_expires))
+ return 1;
+
+ return 0;
+}
+
+void run_posix_cpu_timers(struct task_struct *tsk)
+{
+ unsigned long cpu = smp_processor_id();
+ struct task_struct *tasklist;
+
+ BUG_ON(!irqs_disabled());
+ if(!per_cpu(posix_timer_task, cpu))
+ return;
+ /* get per-cpu references */
+ tasklist = per_cpu(posix_timer_tasklist, cpu);
+
+ /* check to see if we're already queued */
+ if (!tsk->posix_timer_list && __fastpath_timer_check(tsk)) {
+ get_task_struct(tsk);
+ if (tasklist) {
+ tsk->posix_timer_list = tasklist;
+ } else {
+ /*
+ * The list is terminated by a self-pointing
+ * task_struct
+ */
+ tsk->posix_timer_list = tsk;
+ }
+ per_cpu(posix_timer_tasklist, cpu) = tsk;
+
+ wake_up_process(per_cpu(posix_timer_task, cpu));
+ }
+}
+
+/*
+ * posix_cpu_thread_call - callback that gets triggered when a CPU is added.
+ * Here we can start up the necessary migration thread for the new CPU.
+ */
+static int posix_cpu_thread_call(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ int cpu = (long)hcpu;
+ struct task_struct *p;
+ struct sched_param param;
+
+ switch (action) {
+ case CPU_UP_PREPARE:
+ p = kthread_create(posix_cpu_timers_thread, hcpu,
+ "posixcputmr/%d",cpu);
+ if (IS_ERR(p))
+ return NOTIFY_BAD;
+ p->flags |= PF_NOFREEZE;
+ kthread_bind(p, cpu);
+ /* Must be high prio to avoid getting starved */
+ param.sched_priority = MAX_RT_PRIO-1;
+ sched_setscheduler(p, SCHED_FIFO, &param);
+ per_cpu(posix_timer_task,cpu) = p;
+ break;
+ case CPU_ONLINE:
+ /* Strictly unneccessary, as first user will wake it. */
+ wake_up_process(per_cpu(posix_timer_task,cpu));
+ break;
+#ifdef CONFIG_HOTPLUG_CPU
+ case CPU_UP_CANCELED:
+ /* Unbind it from offline cpu so it can run. Fall thru. */
+ kthread_bind(per_cpu(posix_timer_task, cpu),
+ cpumask_any(cpu_online_mask));
+ kthread_stop(per_cpu(posix_timer_task,cpu));
+ per_cpu(posix_timer_task,cpu) = NULL;
+ break;
+ case CPU_DEAD:
+ kthread_stop(per_cpu(posix_timer_task,cpu));
+ per_cpu(posix_timer_task,cpu) = NULL;
+ break;
+#endif
+ }
+ return NOTIFY_OK;
+}
+
+/* Register at highest priority so that task migration (migrate_all_tasks)
+ * happens before everything else.
+ */
+static struct notifier_block __devinitdata posix_cpu_thread_notifier = {
+ .notifier_call = posix_cpu_thread_call,
+ .priority = 10
+};
+
+static int __init posix_cpu_thread_init(void)
+{
+ void *hcpu = (void *)(long)smp_processor_id();
+ /* Start one for boot CPU. */
+ unsigned long cpu;
+
+ /* init the per-cpu posix_timer_tasklets */
+ for_each_possible_cpu(cpu)
+ per_cpu(posix_timer_tasklist, cpu) = NULL;
+
+ posix_cpu_thread_call(&posix_cpu_thread_notifier, CPU_UP_PREPARE, hcpu);
+ posix_cpu_thread_call(&posix_cpu_thread_notifier, CPU_ONLINE, hcpu);
+ register_cpu_notifier(&posix_cpu_thread_notifier);
+ return 0;
+}
+early_initcall(posix_cpu_thread_init);
+#else /* CONFIG_PREEMPT_RT_BASE */
+void run_posix_cpu_timers(struct task_struct *tsk)
+{
+ __run_posix_cpu_timers(tsk);
+}
+#endif /* CONFIG_PREEMPT_RT_BASE */
+
/*
* Set one of the process-wide special case CPU timers or RLIMIT_CPU.
* The tsk->sighand->siglock must be held by the caller.
diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
index 69185ae6b701..6a74800a6f66 100644
--- a/kernel/posix-timers.c
+++ b/kernel/posix-timers.c
@@ -439,6 +439,7 @@ static enum hrtimer_restart posix_timer_fn(struct hrtimer *timer)
static struct pid *good_sigevent(sigevent_t * event)
{
struct task_struct *rtn = current->group_leader;
+ int sig = event->sigev_signo;
if ((event->sigev_notify & SIGEV_THREAD_ID ) &&
(!(rtn = find_task_by_vpid(event->sigev_notify_thread_id)) ||
@@ -447,7 +448,8 @@ static struct pid *good_sigevent(sigevent_t * event)
return NULL;
if (((event->sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE) &&
- ((event->sigev_signo <= 0) || (event->sigev_signo > SIGRTMAX)))
+ (sig <= 0 || sig > SIGRTMAX || sig_kernel_only(sig) ||
+ sig_kernel_coredump(sig)))
return NULL;
return task_pid(rtn);
@@ -764,6 +766,20 @@ SYSCALL_DEFINE1(timer_getoverrun, timer_t, timer_id)
return overrun;
}
+/*
+ * Protected by RCU!
+ */
+static void timer_wait_for_callback(struct k_clock *kc, struct k_itimer *timr)
+{
+#ifdef CONFIG_PREEMPT_RT_FULL
+ if (kc->timer_set == common_timer_set)
+ hrtimer_wait_for_timer(&timr->it.real.timer);
+ else
+ /* FIXME: Whacky hack for posix-cpu-timers */
+ schedule_timeout(1);
+#endif
+}
+
/* Set a POSIX.1b interval timer. */
/* timr->it_lock is taken. */
static int
@@ -841,6 +857,7 @@ retry:
if (!timr)
return -EINVAL;
+ rcu_read_lock();
kc = clockid_to_kclock(timr->it_clock);
if (WARN_ON_ONCE(!kc || !kc->timer_set))
error = -EINVAL;
@@ -849,9 +866,12 @@ retry:
unlock_timer(timr, flag);
if (error == TIMER_RETRY) {
+ timer_wait_for_callback(kc, timr);
rtn = NULL; // We already got the old time...
+ rcu_read_unlock();
goto retry;
}
+ rcu_read_unlock();
if (old_setting && !error &&
copy_to_user(old_setting, &old_spec, sizeof (old_spec)))
@@ -889,10 +909,15 @@ retry_delete:
if (!timer)
return -EINVAL;
+ rcu_read_lock();
if (timer_delete_hook(timer) == TIMER_RETRY) {
unlock_timer(timer, flags);
+ timer_wait_for_callback(clockid_to_kclock(timer->it_clock),
+ timer);
+ rcu_read_unlock();
goto retry_delete;
}
+ rcu_read_unlock();
spin_lock(&current->sighand->siglock);
list_del(&timer->list);
@@ -918,8 +943,18 @@ static void itimer_delete(struct k_itimer *timer)
retry_delete:
spin_lock_irqsave(&timer->it_lock, flags);
+ /* On RT we can race with a deletion */
+ if (!timer->it_signal) {
+ unlock_timer(timer, flags);
+ return;
+ }
+
if (timer_delete_hook(timer) == TIMER_RETRY) {
+ rcu_read_lock();
unlock_timer(timer, flags);
+ timer_wait_for_callback(clockid_to_kclock(timer->it_clock),
+ timer);
+ rcu_read_unlock();
goto retry_delete;
}
list_del(&timer->list);
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
index e09dfbfeecee..1aa6e1f7fb65 100644
--- a/kernel/power/hibernate.c
+++ b/kernel/power/hibernate.c
@@ -270,6 +270,8 @@ static int create_image(int platform_mode)
local_irq_disable();
+ system_state = SYSTEM_SUSPEND;
+
error = syscore_suspend();
if (error) {
printk(KERN_ERR "PM: Some system devices failed to power down, "
@@ -297,6 +299,7 @@ static int create_image(int platform_mode)
syscore_resume();
Enable_irqs:
+ system_state = SYSTEM_RUNNING;
local_irq_enable();
Enable_cpus:
@@ -420,6 +423,7 @@ static int resume_target_kernel(bool platform_mode)
goto Enable_cpus;
local_irq_disable();
+ system_state = SYSTEM_SUSPEND;
error = syscore_suspend();
if (error)
@@ -453,6 +457,7 @@ static int resume_target_kernel(bool platform_mode)
syscore_resume();
Enable_irqs:
+ system_state = SYSTEM_RUNNING;
local_irq_enable();
Enable_cpus:
@@ -532,6 +537,7 @@ int hibernation_platform_enter(void)
goto Platform_finish;
local_irq_disable();
+ system_state = SYSTEM_SUSPEND;
syscore_suspend();
if (pm_wakeup_pending()) {
error = -EAGAIN;
@@ -544,6 +550,7 @@ int hibernation_platform_enter(void)
Power_up:
syscore_resume();
+ system_state = SYSTEM_RUNNING;
local_irq_enable();
enable_nonboot_cpus();
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index 396d262b8fd0..d4fb331d8d6e 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -164,6 +164,8 @@ static int suspend_enter(suspend_state_t state, bool *wakeup)
arch_suspend_disable_irqs();
BUG_ON(!irqs_disabled());
+ system_state = SYSTEM_SUSPEND;
+
error = syscore_suspend();
if (!error) {
*wakeup = pm_wakeup_pending();
@@ -174,6 +176,8 @@ static int suspend_enter(suspend_state_t state, bool *wakeup)
syscore_resume();
}
+ system_state = SYSTEM_RUNNING;
+
arch_suspend_enable_irqs();
BUG_ON(irqs_disabled());
diff --git a/kernel/printk.c b/kernel/printk.c
index b663c2c95d39..d3f22ec24196 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -21,6 +21,7 @@
#include <linux/tty.h>
#include <linux/tty_driver.h>
#include <linux/console.h>
+#include <linux/sysrq.h>
#include <linux/init.h>
#include <linux/jiffies.h>
#include <linux/nmi.h>
@@ -47,13 +48,6 @@
#define CREATE_TRACE_POINTS
#include <trace/events/printk.h>
-/*
- * Architectures can override it:
- */
-void asmlinkage __attribute__((weak)) early_printk(const char *fmt, ...)
-{
-}
-
#define __LOG_BUF_LEN (1 << CONFIG_LOG_BUF_SHIFT)
/* printk's without a loglevel use this.. */
@@ -514,6 +508,7 @@ static void __call_console_drivers(unsigned start, unsigned end)
{
struct console *con;
+ migrate_disable();
for_each_console(con) {
if (exclusive_console && con != exclusive_console)
continue;
@@ -522,8 +517,65 @@ static void __call_console_drivers(unsigned start, unsigned end)
(con->flags & CON_ANYTIME)))
con->write(con, &LOG_BUF(start), end - start);
}
+ migrate_enable();
+}
+
+#ifdef CONFIG_EARLY_PRINTK
+struct console *early_console;
+
+static void early_vprintk(const char *fmt, va_list ap)
+{
+ if (early_console) {
+ char buf[512];
+ int n = vscnprintf(buf, sizeof(buf), fmt, ap);
+
+ early_console->write(early_console, buf, n);
+ }
+}
+
+asmlinkage void early_printk(const char *fmt, ...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+ early_vprintk(fmt, ap);
+ va_end(ap);
+}
+
+/*
+ * This is independent of any log levels - a global
+ * kill switch that turns off all of printk.
+ *
+ * Used by the NMI watchdog if early-printk is enabled.
+ */
+static bool __read_mostly printk_killswitch;
+
+static int __init force_early_printk_setup(char *str)
+{
+ printk_killswitch = true;
+ return 0;
+}
+early_param("force_early_printk", force_early_printk_setup);
+
+void printk_kill(void)
+{
+ printk_killswitch = true;
}
+static int forced_early_printk(const char *fmt, va_list ap)
+{
+ if (!printk_killswitch)
+ return 0;
+ early_vprintk(fmt, ap);
+ return 1;
+}
+#else
+static inline int forced_early_printk(const char *fmt, va_list ap)
+{
+ return 0;
+}
+#endif
+
static bool __read_mostly ignore_loglevel;
static int __init ignore_loglevel_setup(char *str)
@@ -790,12 +842,18 @@ static inline int can_use_console(unsigned int cpu)
* interrupts disabled. It should return with 'lockbuf_lock'
* released but interrupts still disabled.
*/
-static int console_trylock_for_printk(unsigned int cpu)
+static int console_trylock_for_printk(unsigned int cpu, unsigned long flags)
__releases(&logbuf_lock)
{
int retval = 0, wake = 0;
+#ifdef CONFIG_PREEMPT_RT_FULL
+ int lock = (!early_boot_irqs_disabled && !irqs_disabled_flags(flags) &&
+ !preempt_count()) || sysrq_in_progress;
+#else
+ int lock = 1;
+#endif
- if (console_trylock()) {
+ if (lock && console_trylock()) {
retval = 1;
/*
@@ -846,6 +904,13 @@ asmlinkage int vprintk(const char *fmt, va_list args)
size_t plen;
char special;
+ /*
+ * Fall back to early_printk if a debugging subsystem has
+ * killed printk output
+ */
+ if (unlikely(forced_early_printk(fmt, args)))
+ return 1;
+
boot_delay_msec();
printk_delay();
@@ -965,8 +1030,15 @@ asmlinkage int vprintk(const char *fmt, va_list args)
* will release 'logbuf_lock' regardless of whether it
* actually gets the semaphore or not.
*/
- if (console_trylock_for_printk(this_cpu))
+ if (console_trylock_for_printk(this_cpu, flags)) {
+#ifndef CONFIG_PREEMPT_RT_FULL
+ console_unlock();
+#else
+ raw_local_irq_restore(flags);
console_unlock();
+ raw_local_irq_save(flags);
+#endif
+ }
lockdep_on();
out_restore_irqs:
@@ -1242,8 +1314,8 @@ void printk_tick(void)
int printk_needs_cpu(int cpu)
{
- if (cpu_is_offline(cpu))
- printk_tick();
+ if (unlikely(cpu_is_offline(cpu)))
+ __this_cpu_write(printk_pending, 0);
return __this_cpu_read(printk_pending);
}
@@ -1289,11 +1361,16 @@ again:
_con_start = con_start;
_log_end = log_end;
con_start = log_end; /* Flush */
+#ifndef CONFIG_PREEMPT_RT_FULL
raw_spin_unlock(&logbuf_lock);
stop_critical_timings(); /* don't trace print latency */
call_console_drivers(_con_start, _log_end);
start_critical_timings();
local_irq_restore(flags);
+#else
+ raw_spin_unlock_irqrestore(&logbuf_lock, flags);
+ call_console_drivers(_con_start, _log_end);
+#endif
}
console_locked = 0;
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index a86f1741cc27..63fc4b3e7af3 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -77,6 +77,7 @@ int debug_lockdep_rcu_enabled(void)
}
EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled);
+#ifndef CONFIG_PREEMPT_RT_FULL
/**
* rcu_read_lock_bh_held() - might we be in RCU-bh read-side critical section?
*
@@ -103,6 +104,7 @@ int rcu_read_lock_bh_held(void)
return in_softirq() || irqs_disabled();
}
EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held);
+#endif
#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c
index 37a5444204d2..fc581fb75751 100644
--- a/kernel/rcutiny.c
+++ b/kernel/rcutiny.c
@@ -368,6 +368,7 @@ void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
}
EXPORT_SYMBOL_GPL(call_rcu_sched);
+#ifndef CONFIG_PREEMPT_RT_FULL
/*
* Post an RCU bottom-half callback to be invoked after any subsequent
* quiescent state.
@@ -377,3 +378,4 @@ void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
__call_rcu(head, func, &rcu_bh_ctrlblk);
}
EXPORT_SYMBOL_GPL(call_rcu_bh);
+#endif
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 1050d6d3922c..8f4394141ff0 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -172,6 +172,14 @@ void rcu_sched_qs(int cpu)
rdp->passed_quiesce = 1;
}
+#ifdef CONFIG_PREEMPT_RT_FULL
+static void rcu_preempt_qs(int cpu);
+
+void rcu_bh_qs(int cpu)
+{
+ rcu_preempt_qs(cpu);
+}
+#else
void rcu_bh_qs(int cpu)
{
struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu);
@@ -182,6 +190,7 @@ void rcu_bh_qs(int cpu)
trace_rcu_grace_period("rcu_bh", rdp->gpnum, "cpuqs");
rdp->passed_quiesce = 1;
}
+#endif
/*
* Note a context switch. This is a quiescent state for RCU-sched,
@@ -228,6 +237,7 @@ long rcu_batches_completed_sched(void)
}
EXPORT_SYMBOL_GPL(rcu_batches_completed_sched);
+#ifndef CONFIG_PREEMPT_RT_FULL
/*
* Return the number of RCU BH batches processed thus far for debug & stats.
*/
@@ -245,6 +255,7 @@ void rcu_bh_force_quiescent_state(void)
force_quiescent_state(&rcu_bh_state, 0);
}
EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state);
+#endif
/*
* Record the number of times rcutorture tests have been initiated and
@@ -1885,6 +1896,7 @@ void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
}
EXPORT_SYMBOL_GPL(call_rcu_sched);
+#ifndef CONFIG_PREEMPT_RT_FULL
/*
* Queue an RCU callback for invocation after a quicker grace period.
*/
@@ -1893,6 +1905,7 @@ void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
__call_rcu(head, func, &rcu_bh_state, 0);
}
EXPORT_SYMBOL_GPL(call_rcu_bh);
+#endif
/**
* synchronize_sched - wait until an rcu-sched grace period has elapsed.
@@ -1929,6 +1942,7 @@ void synchronize_sched(void)
}
EXPORT_SYMBOL_GPL(synchronize_sched);
+#ifndef CONFIG_PREEMPT_RT_FULL
/**
* synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed.
*
@@ -1949,6 +1963,7 @@ void synchronize_rcu_bh(void)
wait_rcu_gp(call_rcu_bh);
}
EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
+#endif
static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0);
static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0);
@@ -2224,6 +2239,7 @@ static void _rcu_barrier(struct rcu_state *rsp,
mutex_unlock(&rcu_barrier_mutex);
}
+#ifndef CONFIG_PREEMPT_RT_FULL
/**
* rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete.
*/
@@ -2232,6 +2248,7 @@ void rcu_barrier_bh(void)
_rcu_barrier(&rcu_bh_state, call_rcu_bh);
}
EXPORT_SYMBOL_GPL(rcu_barrier_bh);
+#endif
/**
* rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks.
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index c023464816be..2844d7d85e4e 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -339,7 +339,7 @@ static noinline void rcu_read_unlock_special(struct task_struct *t)
}
/* Hardware IRQ handlers cannot block. */
- if (in_irq() || in_serving_softirq()) {
+ if (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_OFFSET)) {
local_irq_restore(flags);
return;
}
@@ -1899,7 +1899,7 @@ static void __cpuinit rcu_prepare_kthreads(int cpu)
#endif /* #else #ifdef CONFIG_RCU_BOOST */
-#if !defined(CONFIG_RCU_FAST_NO_HZ)
+#if !defined(CONFIG_RCU_FAST_NO_HZ) || defined(CONFIG_PREEMPT_RT_FULL)
/*
* Check to see if any future RCU-related work will need to be done
@@ -1914,6 +1914,9 @@ int rcu_needs_cpu(int cpu)
{
return rcu_cpu_has_callbacks(cpu);
}
+#endif /* !defined(CONFIG_RCU_FAST_NO_HZ) || defined(CONFIG_PREEMPT_RT_FULL) */
+
+#if !defined(CONFIG_RCU_FAST_NO_HZ)
/*
* Because we do not have RCU_FAST_NO_HZ, don't bother initializing for it.
@@ -1984,6 +1987,7 @@ static DEFINE_PER_CPU(struct hrtimer, rcu_idle_gp_timer);
static ktime_t rcu_idle_gp_wait; /* If some non-lazy callbacks. */
static ktime_t rcu_idle_lazy_gp_wait; /* If only lazy callbacks. */
+#ifndef CONFIG_PREEMPT_RT_FULL
/*
* Allow the CPU to enter dyntick-idle mode if either: (1) There are no
* callbacks on this CPU, (2) this CPU has not yet attempted to enter
@@ -2001,6 +2005,7 @@ int rcu_needs_cpu(int cpu)
/* Otherwise, RCU needs the CPU only if it recently tried and failed. */
return per_cpu(rcu_dyntick_holdoff, cpu) == jiffies;
}
+#endif /* #ifndef CONFIG_PREEMPT_RT_FULL */
/*
* Does the specified flavor of RCU have non-lazy callbacks pending on
diff --git a/kernel/relay.c b/kernel/relay.c
index ab56a1764d4d..0fb979922f30 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -340,6 +340,10 @@ static void wakeup_readers(unsigned long data)
{
struct rchan_buf *buf = (struct rchan_buf *)data;
wake_up_interruptible(&buf->read_wait);
+ /*
+ * Stupid polling for now:
+ */
+ mod_timer(&buf->timer, jiffies + 1);
}
/**
@@ -357,6 +361,7 @@ static void __relay_reset(struct rchan_buf *buf, unsigned int init)
init_waitqueue_head(&buf->read_wait);
kref_init(&buf->kref);
setup_timer(&buf->timer, wakeup_readers, (unsigned long)buf);
+ mod_timer(&buf->timer, jiffies + 1);
} else
del_timer_sync(&buf->timer);
@@ -739,15 +744,6 @@ size_t relay_switch_subbuf(struct rchan_buf *buf, size_t length)
else
buf->early_bytes += buf->chan->subbuf_size -
buf->padding[old_subbuf];
- smp_mb();
- if (waitqueue_active(&buf->read_wait))
- /*
- * Calling wake_up_interruptible() from here
- * will deadlock if we happen to be logging
- * from the scheduler (trying to re-grab
- * rq->lock), so defer it.
- */
- mod_timer(&buf->timer, jiffies + 1);
}
old = buf->data;
diff --git a/kernel/res_counter.c b/kernel/res_counter.c
index d508363858b3..402f91ac98bc 100644
--- a/kernel/res_counter.c
+++ b/kernel/res_counter.c
@@ -43,7 +43,7 @@ int res_counter_charge(struct res_counter *counter, unsigned long val,
struct res_counter *c, *u;
*limit_fail_at = NULL;
- local_irq_save(flags);
+ local_irq_save_nort(flags);
for (c = counter; c != NULL; c = c->parent) {
spin_lock(&c->lock);
ret = res_counter_charge_locked(c, val);
@@ -62,7 +62,7 @@ undo:
spin_unlock(&u->lock);
}
done:
- local_irq_restore(flags);
+ local_irq_restore_nort(flags);
return ret;
}
@@ -104,13 +104,13 @@ void res_counter_uncharge(struct res_counter *counter, unsigned long val)
unsigned long flags;
struct res_counter *c;
- local_irq_save(flags);
+ local_irq_save_nort(flags);
for (c = counter; c != NULL; c = c->parent) {
spin_lock(&c->lock);
res_counter_uncharge_locked(c, val);
spin_unlock(&c->lock);
}
- local_irq_restore(flags);
+ local_irq_restore_nort(flags);
}
diff --git a/kernel/rt.c b/kernel/rt.c
new file mode 100644
index 000000000000..092d6b356d59
--- /dev/null
+++ b/kernel/rt.c
@@ -0,0 +1,442 @@
+/*
+ * kernel/rt.c
+ *
+ * Real-Time Preemption Support
+ *
+ * started by Ingo Molnar:
+ *
+ * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
+ * Copyright (C) 2006, Timesys Corp., Thomas Gleixner <tglx@timesys.com>
+ *
+ * historic credit for proving that Linux spinlocks can be implemented via
+ * RT-aware mutexes goes to many people: The Pmutex project (Dirk Grambow
+ * and others) who prototyped it on 2.4 and did lots of comparative
+ * research and analysis; TimeSys, for proving that you can implement a
+ * fully preemptible kernel via the use of IRQ threading and mutexes;
+ * Bill Huey for persuasively arguing on lkml that the mutex model is the
+ * right one; and to MontaVista, who ported pmutexes to 2.6.
+ *
+ * This code is a from-scratch implementation and is not based on pmutexes,
+ * but the idea of converting spinlocks to mutexes is used here too.
+ *
+ * lock debugging, locking tree, deadlock detection:
+ *
+ * Copyright (C) 2004, LynuxWorks, Inc., Igor Manyilov, Bill Huey
+ * Released under the General Public License (GPL).
+ *
+ * Includes portions of the generic R/W semaphore implementation from:
+ *
+ * Copyright (c) 2001 David Howells (dhowells@redhat.com).
+ * - Derived partially from idea by Andrea Arcangeli <andrea@suse.de>
+ * - Derived also from comments by Linus
+ *
+ * Pending ownership of locks and ownership stealing:
+ *
+ * Copyright (C) 2005, Kihon Technologies Inc., Steven Rostedt
+ *
+ * (also by Steven Rostedt)
+ * - Converted single pi_lock to individual task locks.
+ *
+ * By Esben Nielsen:
+ * Doing priority inheritance with help of the scheduler.
+ *
+ * Copyright (C) 2006, Timesys Corp., Thomas Gleixner <tglx@timesys.com>
+ * - major rework based on Esben Nielsens initial patch
+ * - replaced thread_info references by task_struct refs
+ * - removed task->pending_owner dependency
+ * - BKL drop/reacquire for semaphore style locks to avoid deadlocks
+ * in the scheduler return path as discussed with Steven Rostedt
+ *
+ * Copyright (C) 2006, Kihon Technologies Inc.
+ * Steven Rostedt <rostedt@goodmis.org>
+ * - debugged and patched Thomas Gleixner's rework.
+ * - added back the cmpxchg to the rework.
+ * - turned atomic require back on for SMP.
+ */
+
+#include <linux/spinlock.h>
+#include <linux/rtmutex.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/kallsyms.h>
+#include <linux/syscalls.h>
+#include <linux/interrupt.h>
+#include <linux/plist.h>
+#include <linux/fs.h>
+#include <linux/futex.h>
+#include <linux/hrtimer.h>
+
+#include "rtmutex_common.h"
+
+/*
+ * struct mutex functions
+ */
+void __mutex_do_init(struct mutex *mutex, const char *name,
+ struct lock_class_key *key)
+{
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ /*
+ * Make sure we are not reinitializing a held lock:
+ */
+ debug_check_no_locks_freed((void *)mutex, sizeof(*mutex));
+ lockdep_init_map(&mutex->dep_map, name, key, 0);
+#endif
+ mutex->lock.save_state = 0;
+}
+EXPORT_SYMBOL(__mutex_do_init);
+
+void __lockfunc _mutex_lock(struct mutex *lock)
+{
+ mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
+ rt_mutex_lock(&lock->lock);
+}
+EXPORT_SYMBOL(_mutex_lock);
+
+int __lockfunc _mutex_lock_interruptible(struct mutex *lock)
+{
+ int ret;
+
+ mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
+ ret = rt_mutex_lock_interruptible(&lock->lock, 0);
+ if (ret)
+ mutex_release(&lock->dep_map, 1, _RET_IP_);
+ return ret;
+}
+EXPORT_SYMBOL(_mutex_lock_interruptible);
+
+int __lockfunc _mutex_lock_killable(struct mutex *lock)
+{
+ int ret;
+
+ mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
+ ret = rt_mutex_lock_killable(&lock->lock, 0);
+ if (ret)
+ mutex_release(&lock->dep_map, 1, _RET_IP_);
+ return ret;
+}
+EXPORT_SYMBOL(_mutex_lock_killable);
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+void __lockfunc _mutex_lock_nested(struct mutex *lock, int subclass)
+{
+ mutex_acquire_nest(&lock->dep_map, subclass, 0, NULL, _RET_IP_);
+ rt_mutex_lock(&lock->lock);
+}
+EXPORT_SYMBOL(_mutex_lock_nested);
+
+void __lockfunc _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
+{
+ mutex_acquire_nest(&lock->dep_map, 0, 0, nest, _RET_IP_);
+ rt_mutex_lock(&lock->lock);
+}
+EXPORT_SYMBOL(_mutex_lock_nest_lock);
+
+int __lockfunc _mutex_lock_interruptible_nested(struct mutex *lock, int subclass)
+{
+ int ret;
+
+ mutex_acquire_nest(&lock->dep_map, subclass, 0, NULL, _RET_IP_);
+ ret = rt_mutex_lock_interruptible(&lock->lock, 0);
+ if (ret)
+ mutex_release(&lock->dep_map, 1, _RET_IP_);
+ return ret;
+}
+EXPORT_SYMBOL(_mutex_lock_interruptible_nested);
+
+int __lockfunc _mutex_lock_killable_nested(struct mutex *lock, int subclass)
+{
+ int ret;
+
+ mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
+ ret = rt_mutex_lock_killable(&lock->lock, 0);
+ if (ret)
+ mutex_release(&lock->dep_map, 1, _RET_IP_);
+ return ret;
+}
+EXPORT_SYMBOL(_mutex_lock_killable_nested);
+#endif
+
+int __lockfunc _mutex_trylock(struct mutex *lock)
+{
+ int ret = rt_mutex_trylock(&lock->lock);
+
+ if (ret)
+ mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
+
+ return ret;
+}
+EXPORT_SYMBOL(_mutex_trylock);
+
+void __lockfunc _mutex_unlock(struct mutex *lock)
+{
+ mutex_release(&lock->dep_map, 1, _RET_IP_);
+ rt_mutex_unlock(&lock->lock);
+}
+EXPORT_SYMBOL(_mutex_unlock);
+
+/*
+ * rwlock_t functions
+ */
+int __lockfunc rt_write_trylock(rwlock_t *rwlock)
+{
+ int ret = rt_mutex_trylock(&rwlock->lock);
+
+ migrate_disable();
+ if (ret)
+ rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_);
+ else
+ migrate_enable();
+
+ return ret;
+}
+EXPORT_SYMBOL(rt_write_trylock);
+
+int __lockfunc rt_write_trylock_irqsave(rwlock_t *rwlock, unsigned long *flags)
+{
+ int ret;
+
+ *flags = 0;
+ migrate_disable();
+ ret = rt_write_trylock(rwlock);
+ if (!ret)
+ migrate_enable();
+ return ret;
+}
+EXPORT_SYMBOL(rt_write_trylock_irqsave);
+
+int __lockfunc rt_read_trylock(rwlock_t *rwlock)
+{
+ struct rt_mutex *lock = &rwlock->lock;
+ int ret = 1;
+
+ /*
+ * recursive read locks succeed when current owns the lock,
+ * but not when read_depth == 0 which means that the lock is
+ * write locked.
+ */
+ migrate_disable();
+ if (rt_mutex_owner(lock) != current)
+ ret = rt_mutex_trylock(lock);
+ else if (!rwlock->read_depth)
+ ret = 0;
+
+ if (ret) {
+ rwlock->read_depth++;
+ rwlock_acquire_read(&rwlock->dep_map, 0, 1, _RET_IP_);
+ } else
+ migrate_enable();
+
+ return ret;
+}
+EXPORT_SYMBOL(rt_read_trylock);
+
+void __lockfunc rt_write_lock(rwlock_t *rwlock)
+{
+ rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_);
+ __rt_spin_lock(&rwlock->lock);
+}
+EXPORT_SYMBOL(rt_write_lock);
+
+void __lockfunc rt_read_lock(rwlock_t *rwlock)
+{
+ struct rt_mutex *lock = &rwlock->lock;
+
+ rwlock_acquire_read(&rwlock->dep_map, 0, 0, _RET_IP_);
+
+ /*
+ * recursive read locks succeed when current owns the lock
+ */
+ if (rt_mutex_owner(lock) != current)
+ __rt_spin_lock(lock);
+ rwlock->read_depth++;
+}
+
+EXPORT_SYMBOL(rt_read_lock);
+
+void __lockfunc rt_write_unlock(rwlock_t *rwlock)
+{
+ /* NOTE: we always pass in '1' for nested, for simplicity */
+ rwlock_release(&rwlock->dep_map, 1, _RET_IP_);
+ __rt_spin_unlock(&rwlock->lock);
+}
+EXPORT_SYMBOL(rt_write_unlock);
+
+void __lockfunc rt_read_unlock(rwlock_t *rwlock)
+{
+ rwlock_release(&rwlock->dep_map, 1, _RET_IP_);
+
+ /* Release the lock only when read_depth is down to 0 */
+ if (--rwlock->read_depth == 0)
+ __rt_spin_unlock(&rwlock->lock);
+}
+EXPORT_SYMBOL(rt_read_unlock);
+
+unsigned long __lockfunc rt_write_lock_irqsave(rwlock_t *rwlock)
+{
+ rt_write_lock(rwlock);
+
+ return 0;
+}
+EXPORT_SYMBOL(rt_write_lock_irqsave);
+
+unsigned long __lockfunc rt_read_lock_irqsave(rwlock_t *rwlock)
+{
+ rt_read_lock(rwlock);
+
+ return 0;
+}
+EXPORT_SYMBOL(rt_read_lock_irqsave);
+
+void __rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key *key)
+{
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ /*
+ * Make sure we are not reinitializing a held lock:
+ */
+ debug_check_no_locks_freed((void *)rwlock, sizeof(*rwlock));
+ lockdep_init_map(&rwlock->dep_map, name, key, 0);
+#endif
+ rwlock->lock.save_state = 1;
+ rwlock->read_depth = 0;
+}
+EXPORT_SYMBOL(__rt_rwlock_init);
+
+/*
+ * rw_semaphores
+ */
+
+void rt_up_write(struct rw_semaphore *rwsem)
+{
+ rwsem_release(&rwsem->dep_map, 1, _RET_IP_);
+ rt_mutex_unlock(&rwsem->lock);
+}
+EXPORT_SYMBOL(rt_up_write);
+
+void rt_up_read(struct rw_semaphore *rwsem)
+{
+ rwsem_release(&rwsem->dep_map, 1, _RET_IP_);
+ if (--rwsem->read_depth == 0)
+ rt_mutex_unlock(&rwsem->lock);
+}
+EXPORT_SYMBOL(rt_up_read);
+
+/*
+ * downgrade a write lock into a read lock
+ * - just wake up any readers at the front of the queue
+ */
+void rt_downgrade_write(struct rw_semaphore *rwsem)
+{
+ BUG_ON(rt_mutex_owner(&rwsem->lock) != current);
+ rwsem->read_depth = 1;
+}
+EXPORT_SYMBOL(rt_downgrade_write);
+
+int rt_down_write_trylock(struct rw_semaphore *rwsem)
+{
+ int ret = rt_mutex_trylock(&rwsem->lock);
+
+ if (ret)
+ rwsem_acquire(&rwsem->dep_map, 0, 1, _RET_IP_);
+ return ret;
+}
+EXPORT_SYMBOL(rt_down_write_trylock);
+
+void rt_down_write(struct rw_semaphore *rwsem)
+{
+ rwsem_acquire(&rwsem->dep_map, 0, 0, _RET_IP_);
+ rt_mutex_lock(&rwsem->lock);
+}
+EXPORT_SYMBOL(rt_down_write);
+
+void rt_down_write_nested(struct rw_semaphore *rwsem, int subclass)
+{
+ rwsem_acquire(&rwsem->dep_map, subclass, 0, _RET_IP_);
+ rt_mutex_lock(&rwsem->lock);
+}
+EXPORT_SYMBOL(rt_down_write_nested);
+
+int rt_down_read_trylock(struct rw_semaphore *rwsem)
+{
+ struct rt_mutex *lock = &rwsem->lock;
+ int ret = 1;
+
+ /*
+ * recursive read locks succeed when current owns the rwsem,
+ * but not when read_depth == 0 which means that the rwsem is
+ * write locked.
+ */
+ if (rt_mutex_owner(lock) != current)
+ ret = rt_mutex_trylock(&rwsem->lock);
+ else if (!rwsem->read_depth)
+ ret = 0;
+
+ if (ret) {
+ rwsem->read_depth++;
+ rwsem_acquire(&rwsem->dep_map, 0, 1, _RET_IP_);
+ }
+ return ret;
+}
+EXPORT_SYMBOL(rt_down_read_trylock);
+
+static void __rt_down_read(struct rw_semaphore *rwsem, int subclass)
+{
+ struct rt_mutex *lock = &rwsem->lock;
+
+ rwsem_acquire_read(&rwsem->dep_map, subclass, 0, _RET_IP_);
+
+ if (rt_mutex_owner(lock) != current)
+ rt_mutex_lock(&rwsem->lock);
+ rwsem->read_depth++;
+}
+
+void rt_down_read(struct rw_semaphore *rwsem)
+{
+ __rt_down_read(rwsem, 0);
+}
+EXPORT_SYMBOL(rt_down_read);
+
+void rt_down_read_nested(struct rw_semaphore *rwsem, int subclass)
+{
+ __rt_down_read(rwsem, subclass);
+}
+EXPORT_SYMBOL(rt_down_read_nested);
+
+void __rt_rwsem_init(struct rw_semaphore *rwsem, char *name,
+ struct lock_class_key *key)
+{
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ /*
+ * Make sure we are not reinitializing a held lock:
+ */
+ debug_check_no_locks_freed((void *)rwsem, sizeof(*rwsem));
+ lockdep_init_map(&rwsem->dep_map, name, key, 0);
+#endif
+ rwsem->read_depth = 0;
+ rwsem->lock.save_state = 0;
+}
+EXPORT_SYMBOL(__rt_rwsem_init);
+
+/**
+ * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
+ * @cnt: the atomic which we are to dec
+ * @lock: the mutex to return holding if we dec to 0
+ *
+ * return true and hold lock if we dec to 0, return false otherwise
+ */
+int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
+{
+ /* dec if we can't possibly hit 0 */
+ if (atomic_add_unless(cnt, -1, 1))
+ return 0;
+ /* we might hit 0, so take the lock */
+ mutex_lock(lock);
+ if (!atomic_dec_and_test(cnt)) {
+ /* when we actually did the dec, we didn't hit 0 */
+ mutex_unlock(lock);
+ return 0;
+ }
+ /* we hit 0, and we hold the lock */
+ return 1;
+}
+EXPORT_SYMBOL(atomic_dec_and_mutex_lock);
diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c
index a242e691c993..3bff72606b07 100644
--- a/kernel/rtmutex.c
+++ b/kernel/rtmutex.c
@@ -8,6 +8,12 @@
* Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt
* Copyright (C) 2006 Esben Nielsen
*
+ * Adaptive Spinlocks:
+ * Copyright (C) 2008 Novell, Inc., Gregory Haskins, Sven Dietrich,
+ * and Peter Morreale,
+ * Adaptive Spinlocks simplification:
+ * Copyright (C) 2008 Red Hat, Inc., Steven Rostedt <srostedt@redhat.com>
+ *
* See Documentation/rt-mutex-design.txt for details.
*/
#include <linux/spinlock.h>
@@ -67,6 +73,12 @@ static void fixup_rt_mutex_waiters(struct rt_mutex *lock)
clear_rt_mutex_waiters(lock);
}
+static int rt_mutex_real_waiter(struct rt_mutex_waiter *waiter)
+{
+ return waiter && waiter != PI_WAKEUP_INPROGRESS &&
+ waiter != PI_REQUEUE_INPROGRESS;
+}
+
/*
* We can speed up the acquire/release, if the architecture
* supports cmpxchg and if there's no debugging state to be set up
@@ -90,6 +102,12 @@ static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
}
#endif
+static inline void init_lists(struct rt_mutex *lock)
+{
+ if (unlikely(!lock->wait_list.node_list.prev))
+ plist_head_init(&lock->wait_list);
+}
+
/*
* Calculate task priority from the waiter list priority
*
@@ -136,6 +154,14 @@ static void rt_mutex_adjust_prio(struct task_struct *task)
raw_spin_unlock_irqrestore(&task->pi_lock, flags);
}
+static void rt_mutex_wake_waiter(struct rt_mutex_waiter *waiter)
+{
+ if (waiter->savestate)
+ wake_up_lock_sleeper(waiter->task);
+ else
+ wake_up_process(waiter->task);
+}
+
/*
* Max number of times we'll walk the boosting chain:
*/
@@ -196,7 +222,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
* reached or the state of the chain has changed while we
* dropped the locks.
*/
- if (!waiter)
+ if (!rt_mutex_real_waiter(waiter))
goto out_unlock_pi;
/*
@@ -247,13 +273,15 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
/* Release the task */
raw_spin_unlock_irqrestore(&task->pi_lock, flags);
if (!rt_mutex_owner(lock)) {
+ struct rt_mutex_waiter *lock_top_waiter;
+
/*
* If the requeue above changed the top waiter, then we need
* to wake the new top waiter up to try to get the lock.
*/
-
- if (top_waiter != rt_mutex_top_waiter(lock))
- wake_up_process(rt_mutex_top_waiter(lock)->task);
+ lock_top_waiter = rt_mutex_top_waiter(lock);
+ if (top_waiter != lock_top_waiter)
+ rt_mutex_wake_waiter(lock_top_waiter);
raw_spin_unlock(&lock->wait_lock);
goto out_put_task;
}
@@ -298,6 +326,25 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
return ret;
}
+
+#define STEAL_NORMAL 0
+#define STEAL_LATERAL 1
+
+/*
+ * Note that RT tasks are excluded from lateral-steals to prevent the
+ * introduction of an unbounded latency
+ */
+static inline int lock_is_stealable(struct task_struct *task,
+ struct task_struct *pendowner, int mode)
+{
+ if (mode == STEAL_NORMAL || rt_task(task)) {
+ if (task->prio >= pendowner->prio)
+ return 0;
+ } else if (task->prio > pendowner->prio)
+ return 0;
+ return 1;
+}
+
/*
* Try to take an rt-mutex
*
@@ -307,8 +354,9 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
* @task: the task which wants to acquire the lock
* @waiter: the waiter that is queued to the lock's wait list. (could be NULL)
*/
-static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
- struct rt_mutex_waiter *waiter)
+static int
+__try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
+ struct rt_mutex_waiter *waiter, int mode)
{
/*
* We have to be careful here if the atomic speedups are
@@ -341,12 +389,14 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
* 3) it is top waiter
*/
if (rt_mutex_has_waiters(lock)) {
- if (task->prio >= rt_mutex_top_waiter(lock)->list_entry.prio) {
- if (!waiter || waiter != rt_mutex_top_waiter(lock))
- return 0;
- }
+ struct task_struct *pown = rt_mutex_top_waiter(lock)->task;
+
+ if (task != pown && !lock_is_stealable(task, pown, mode))
+ return 0;
}
+ /* We got the lock. */
+
if (waiter || rt_mutex_has_waiters(lock)) {
unsigned long flags;
struct rt_mutex_waiter *top;
@@ -371,7 +421,6 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
raw_spin_unlock_irqrestore(&task->pi_lock, flags);
}
- /* We got the lock. */
debug_rt_mutex_lock(lock);
rt_mutex_set_owner(lock, task);
@@ -381,6 +430,13 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
return 1;
}
+static inline int
+try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
+ struct rt_mutex_waiter *waiter)
+{
+ return __try_to_take_rt_mutex(lock, task, waiter, STEAL_NORMAL);
+}
+
/*
* Task blocks on lock.
*
@@ -399,6 +455,23 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
int chain_walk = 0, res;
raw_spin_lock_irqsave(&task->pi_lock, flags);
+
+ /*
+ * In the case of futex requeue PI, this will be a proxy
+ * lock. The task will wake unaware that it is enqueueed on
+ * this lock. Avoid blocking on two locks and corrupting
+ * pi_blocked_on via the PI_WAKEUP_INPROGRESS
+ * flag. futex_wait_requeue_pi() sets this when it wakes up
+ * before requeue (due to a signal or timeout). Do not enqueue
+ * the task if PI_WAKEUP_INPROGRESS is set.
+ */
+ if (task != current && task->pi_blocked_on == PI_WAKEUP_INPROGRESS) {
+ raw_spin_unlock_irqrestore(&task->pi_lock, flags);
+ return -EAGAIN;
+ }
+
+ BUG_ON(rt_mutex_real_waiter(task->pi_blocked_on));
+
__rt_mutex_adjust_prio(task);
waiter->task = task;
waiter->lock = lock;
@@ -423,7 +496,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
plist_add(&waiter->pi_list_entry, &owner->pi_waiters);
__rt_mutex_adjust_prio(owner);
- if (owner->pi_blocked_on)
+ if (rt_mutex_real_waiter(owner->pi_blocked_on))
chain_walk = 1;
raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
}
@@ -478,7 +551,7 @@ static void wakeup_next_waiter(struct rt_mutex *lock)
raw_spin_unlock_irqrestore(&current->pi_lock, flags);
- wake_up_process(waiter->task);
+ rt_mutex_wake_waiter(waiter);
}
/*
@@ -517,7 +590,7 @@ static void remove_waiter(struct rt_mutex *lock,
}
__rt_mutex_adjust_prio(owner);
- if (owner->pi_blocked_on)
+ if (rt_mutex_real_waiter(owner->pi_blocked_on))
chain_walk = 1;
raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
@@ -551,23 +624,316 @@ void rt_mutex_adjust_pi(struct task_struct *task)
raw_spin_lock_irqsave(&task->pi_lock, flags);
waiter = task->pi_blocked_on;
- if (!waiter || waiter->list_entry.prio == task->prio) {
+ if (!rt_mutex_real_waiter(waiter) ||
+ waiter->list_entry.prio == task->prio) {
raw_spin_unlock_irqrestore(&task->pi_lock, flags);
return;
}
- raw_spin_unlock_irqrestore(&task->pi_lock, flags);
-
/* gets dropped in rt_mutex_adjust_prio_chain()! */
get_task_struct(task);
+ raw_spin_unlock_irqrestore(&task->pi_lock, flags);
rt_mutex_adjust_prio_chain(task, 0, NULL, NULL, task);
}
+#ifdef CONFIG_PREEMPT_RT_FULL
+/*
+ * preemptible spin_lock functions:
+ */
+static inline void rt_spin_lock_fastlock(struct rt_mutex *lock,
+ void (*slowfn)(struct rt_mutex *lock))
+{
+ might_sleep();
+
+ if (likely(rt_mutex_cmpxchg(lock, NULL, current)))
+ rt_mutex_deadlock_account_lock(lock, current);
+ else
+ slowfn(lock);
+}
+
+static inline void rt_spin_lock_fastunlock(struct rt_mutex *lock,
+ void (*slowfn)(struct rt_mutex *lock))
+{
+ if (likely(rt_mutex_cmpxchg(lock, current, NULL)))
+ rt_mutex_deadlock_account_unlock(current);
+ else
+ slowfn(lock);
+}
+
+#ifdef CONFIG_SMP
+/*
+ * Note that owner is a speculative pointer and dereferencing relies
+ * on rcu_read_lock() and the check against the lock owner.
+ */
+static int adaptive_wait(struct rt_mutex *lock,
+ struct task_struct *owner)
+{
+ int res = 0;
+
+ rcu_read_lock();
+ for (;;) {
+ if (owner != rt_mutex_owner(lock))
+ break;
+ /*
+ * Ensure that owner->on_cpu is dereferenced _after_
+ * checking the above to be valid.
+ */
+ barrier();
+ if (!owner->on_cpu) {
+ res = 1;
+ break;
+ }
+ cpu_relax();
+ }
+ rcu_read_unlock();
+ return res;
+}
+#else
+static int adaptive_wait(struct rt_mutex *lock,
+ struct task_struct *orig_owner)
+{
+ return 1;
+}
+#endif
+
+# define pi_lock(lock) raw_spin_lock_irq(lock)
+# define pi_unlock(lock) raw_spin_unlock_irq(lock)
+
+/*
+ * Slow path lock function spin_lock style: this variant is very
+ * careful not to miss any non-lock wakeups.
+ *
+ * We store the current state under p->pi_lock in p->saved_state and
+ * the try_to_wake_up() code handles this accordingly.
+ */
+static void noinline __sched rt_spin_lock_slowlock(struct rt_mutex *lock)
+{
+ struct task_struct *lock_owner, *self = current;
+ struct rt_mutex_waiter waiter, *top_waiter;
+ int ret;
+
+ rt_mutex_init_waiter(&waiter, true);
+
+ raw_spin_lock(&lock->wait_lock);
+ init_lists(lock);
+
+ if (__try_to_take_rt_mutex(lock, self, NULL, STEAL_LATERAL)) {
+ raw_spin_unlock(&lock->wait_lock);
+ return;
+ }
+
+ BUG_ON(rt_mutex_owner(lock) == self);
+
+ /*
+ * We save whatever state the task is in and we'll restore it
+ * after acquiring the lock taking real wakeups into account
+ * as well. We are serialized via pi_lock against wakeups. See
+ * try_to_wake_up().
+ */
+ pi_lock(&self->pi_lock);
+ self->saved_state = self->state;
+ __set_current_state(TASK_UNINTERRUPTIBLE);
+ pi_unlock(&self->pi_lock);
+
+ ret = task_blocks_on_rt_mutex(lock, &waiter, self, 0);
+ BUG_ON(ret);
+
+ for (;;) {
+ /* Try to acquire the lock again. */
+ if (__try_to_take_rt_mutex(lock, self, &waiter, STEAL_LATERAL))
+ break;
+
+ top_waiter = rt_mutex_top_waiter(lock);
+ lock_owner = rt_mutex_owner(lock);
+
+ raw_spin_unlock(&lock->wait_lock);
+
+ debug_rt_mutex_print_deadlock(&waiter);
+
+ if (top_waiter != &waiter || adaptive_wait(lock, lock_owner))
+ schedule_rt_mutex(lock);
+
+ raw_spin_lock(&lock->wait_lock);
+
+ pi_lock(&self->pi_lock);
+ __set_current_state(TASK_UNINTERRUPTIBLE);
+ pi_unlock(&self->pi_lock);
+ }
+
+ /*
+ * Restore the task state to current->saved_state. We set it
+ * to the original state above and the try_to_wake_up() code
+ * has possibly updated it when a real (non-rtmutex) wakeup
+ * happened while we were blocked. Clear saved_state so
+ * try_to_wakeup() does not get confused.
+ */
+ pi_lock(&self->pi_lock);
+ __set_current_state(self->saved_state);
+ self->saved_state = TASK_RUNNING;
+ pi_unlock(&self->pi_lock);
+
+ /*
+ * try_to_take_rt_mutex() sets the waiter bit
+ * unconditionally. We might have to fix that up:
+ */
+ fixup_rt_mutex_waiters(lock);
+
+ BUG_ON(rt_mutex_has_waiters(lock) && &waiter == rt_mutex_top_waiter(lock));
+ BUG_ON(!plist_node_empty(&waiter.list_entry));
+
+ raw_spin_unlock(&lock->wait_lock);
+
+ debug_rt_mutex_free_waiter(&waiter);
+}
+
+/*
+ * Slow path to release a rt_mutex spin_lock style
+ */
+static void noinline __sched rt_spin_lock_slowunlock(struct rt_mutex *lock)
+{
+ raw_spin_lock(&lock->wait_lock);
+
+ debug_rt_mutex_unlock(lock);
+
+ rt_mutex_deadlock_account_unlock(current);
+
+ if (!rt_mutex_has_waiters(lock)) {
+ lock->owner = NULL;
+ raw_spin_unlock(&lock->wait_lock);
+ return;
+ }
+
+ wakeup_next_waiter(lock);
+
+ raw_spin_unlock(&lock->wait_lock);
+
+ /* Undo pi boosting.when necessary */
+ rt_mutex_adjust_prio(current);
+}
+
+void __lockfunc rt_spin_lock(spinlock_t *lock)
+{
+ rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock);
+ spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
+}
+EXPORT_SYMBOL(rt_spin_lock);
+
+void __lockfunc __rt_spin_lock(struct rt_mutex *lock)
+{
+ rt_spin_lock_fastlock(lock, rt_spin_lock_slowlock);
+}
+EXPORT_SYMBOL(__rt_spin_lock);
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass)
+{
+ rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock);
+ spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
+}
+EXPORT_SYMBOL(rt_spin_lock_nested);
+#endif
+
+void __lockfunc rt_spin_unlock(spinlock_t *lock)
+{
+ /* NOTE: we always pass in '1' for nested, for simplicity */
+ spin_release(&lock->dep_map, 1, _RET_IP_);
+ rt_spin_lock_fastunlock(&lock->lock, rt_spin_lock_slowunlock);
+}
+EXPORT_SYMBOL(rt_spin_unlock);
+
+void __lockfunc __rt_spin_unlock(struct rt_mutex *lock)
+{
+ rt_spin_lock_fastunlock(lock, rt_spin_lock_slowunlock);
+}
+EXPORT_SYMBOL(__rt_spin_unlock);
+
+/*
+ * Wait for the lock to get unlocked: instead of polling for an unlock
+ * (like raw spinlocks do), we lock and unlock, to force the kernel to
+ * schedule if there's contention:
+ */
+void __lockfunc rt_spin_unlock_wait(spinlock_t *lock)
+{
+ spin_lock(lock);
+ spin_unlock(lock);
+}
+EXPORT_SYMBOL(rt_spin_unlock_wait);
+
+int __lockfunc rt_spin_trylock(spinlock_t *lock)
+{
+ int ret = rt_mutex_trylock(&lock->lock);
+
+ if (ret)
+ spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
+ return ret;
+}
+EXPORT_SYMBOL(rt_spin_trylock);
+
+int __lockfunc rt_spin_trylock_bh(spinlock_t *lock)
+{
+ int ret;
+
+ local_bh_disable();
+ ret = rt_mutex_trylock(&lock->lock);
+ if (ret) {
+ migrate_disable();
+ spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
+ } else
+ local_bh_enable();
+ return ret;
+}
+EXPORT_SYMBOL(rt_spin_trylock_bh);
+
+int __lockfunc rt_spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags)
+{
+ int ret;
+
+ *flags = 0;
+ migrate_disable();
+ ret = rt_mutex_trylock(&lock->lock);
+ if (ret)
+ spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
+ else
+ migrate_enable();
+ return ret;
+}
+EXPORT_SYMBOL(rt_spin_trylock_irqsave);
+
+int atomic_dec_and_spin_lock(atomic_t *atomic, spinlock_t *lock)
+{
+ /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */
+ if (atomic_add_unless(atomic, -1, 1))
+ return 0;
+ migrate_disable();
+ rt_spin_lock(lock);
+ if (atomic_dec_and_test(atomic))
+ return 1;
+ rt_spin_unlock(lock);
+ migrate_enable();
+ return 0;
+}
+EXPORT_SYMBOL(atomic_dec_and_spin_lock);
+
+void
+__rt_spin_lock_init(spinlock_t *lock, char *name, struct lock_class_key *key)
+{
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ /*
+ * Make sure we are not reinitializing a held lock:
+ */
+ debug_check_no_locks_freed((void *)lock, sizeof(*lock));
+ lockdep_init_map(&lock->dep_map, name, key, 0);
+#endif
+}
+EXPORT_SYMBOL(__rt_spin_lock_init);
+
+#endif /* PREEMPT_RT_FULL */
+
/**
* __rt_mutex_slowlock() - Perform the wait-wake-try-to-take loop
* @lock: the rt_mutex to take
* @state: the state the task should block in (TASK_INTERRUPTIBLE
- * or TASK_UNINTERRUPTIBLE)
+ * or TASK_UNINTERRUPTIBLE)
* @timeout: the pre-initialized and started timer, or NULL for none
* @waiter: the pre-initialized rt_mutex_waiter
*
@@ -623,9 +989,10 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
struct rt_mutex_waiter waiter;
int ret = 0;
- debug_rt_mutex_init_waiter(&waiter);
+ rt_mutex_init_waiter(&waiter, false);
raw_spin_lock(&lock->wait_lock);
+ init_lists(lock);
/* Try to acquire the lock again: */
if (try_to_take_rt_mutex(lock, current, NULL)) {
@@ -678,6 +1045,7 @@ rt_mutex_slowtrylock(struct rt_mutex *lock)
int ret = 0;
raw_spin_lock(&lock->wait_lock);
+ init_lists(lock);
if (likely(rt_mutex_owner(lock) != current)) {
@@ -791,12 +1159,12 @@ EXPORT_SYMBOL_GPL(rt_mutex_lock);
/**
* rt_mutex_lock_interruptible - lock a rt_mutex interruptible
*
- * @lock: the rt_mutex to be locked
+ * @lock: the rt_mutex to be locked
* @detect_deadlock: deadlock detection on/off
*
* Returns:
- * 0 on success
- * -EINTR when interrupted by a signal
+ * 0 on success
+ * -EINTR when interrupted by a signal
* -EDEADLK when the lock would deadlock (when deadlock detection is on)
*/
int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock,
@@ -810,17 +1178,38 @@ int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock,
EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
/**
+ * rt_mutex_lock_killable - lock a rt_mutex killable
+ *
+ * @lock: the rt_mutex to be locked
+ * @detect_deadlock: deadlock detection on/off
+ *
+ * Returns:
+ * 0 on success
+ * -EINTR when interrupted by a signal
+ * -EDEADLK when the lock would deadlock (when deadlock detection is on)
+ */
+int __sched rt_mutex_lock_killable(struct rt_mutex *lock,
+ int detect_deadlock)
+{
+ might_sleep();
+
+ return rt_mutex_fastlock(lock, TASK_KILLABLE,
+ detect_deadlock, rt_mutex_slowlock);
+}
+EXPORT_SYMBOL_GPL(rt_mutex_lock_killable);
+
+/**
* rt_mutex_timed_lock - lock a rt_mutex interruptible
* the timeout structure is provided
* by the caller
*
- * @lock: the rt_mutex to be locked
+ * @lock: the rt_mutex to be locked
* @timeout: timeout structure or NULL (no timeout)
* @detect_deadlock: deadlock detection on/off
*
* Returns:
- * 0 on success
- * -EINTR when interrupted by a signal
+ * 0 on success
+ * -EINTR when interrupted by a signal
* -ETIMEDOUT when the timeout expired
* -EDEADLK when the lock would deadlock (when deadlock detection is on)
*/
@@ -889,12 +1278,11 @@ EXPORT_SYMBOL_GPL(rt_mutex_destroy);
void __rt_mutex_init(struct rt_mutex *lock, const char *name)
{
lock->owner = NULL;
- raw_spin_lock_init(&lock->wait_lock);
plist_head_init(&lock->wait_list);
debug_rt_mutex_init(lock, name);
}
-EXPORT_SYMBOL_GPL(__rt_mutex_init);
+EXPORT_SYMBOL(__rt_mutex_init);
/**
* rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a
@@ -909,7 +1297,7 @@ EXPORT_SYMBOL_GPL(__rt_mutex_init);
void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
struct task_struct *proxy_owner)
{
- __rt_mutex_init(lock, NULL);
+ rt_mutex_init(lock);
debug_rt_mutex_proxy_lock(lock, proxy_owner);
rt_mutex_set_owner(lock, proxy_owner);
rt_mutex_deadlock_account_lock(lock, proxy_owner);
@@ -958,6 +1346,35 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
return 1;
}
+#ifdef CONFIG_PREEMPT_RT_FULL
+ /*
+ * In PREEMPT_RT there's an added race.
+ * If the task, that we are about to requeue, times out,
+ * it can set the PI_WAKEUP_INPROGRESS. This tells the requeue
+ * to skip this task. But right after the task sets
+ * its pi_blocked_on to PI_WAKEUP_INPROGRESS it can then
+ * block on the spin_lock(&hb->lock), which in RT is an rtmutex.
+ * This will replace the PI_WAKEUP_INPROGRESS with the actual
+ * lock that it blocks on. We *must not* place this task
+ * on this proxy lock in that case.
+ *
+ * To prevent this race, we first take the task's pi_lock
+ * and check if it has updated its pi_blocked_on. If it has,
+ * we assume that it woke up and we return -EAGAIN.
+ * Otherwise, we set the task's pi_blocked_on to
+ * PI_REQUEUE_INPROGRESS, so that if the task is waking up
+ * it will know that we are in the process of requeuing it.
+ */
+ raw_spin_lock_irq(&task->pi_lock);
+ if (task->pi_blocked_on) {
+ raw_spin_unlock_irq(&task->pi_lock);
+ raw_spin_unlock(&lock->wait_lock);
+ return -EAGAIN;
+ }
+ task->pi_blocked_on = PI_REQUEUE_INPROGRESS;
+ raw_spin_unlock_irq(&task->pi_lock);
+#endif
+
ret = task_blocks_on_rt_mutex(lock, waiter, task, detect_deadlock);
if (ret && !rt_mutex_owner(lock)) {
diff --git a/kernel/rtmutex_common.h b/kernel/rtmutex_common.h
index 53a66c85261b..6ec3dc1eab10 100644
--- a/kernel/rtmutex_common.h
+++ b/kernel/rtmutex_common.h
@@ -49,6 +49,7 @@ struct rt_mutex_waiter {
struct plist_node pi_list_entry;
struct task_struct *task;
struct rt_mutex *lock;
+ bool savestate;
#ifdef CONFIG_DEBUG_RT_MUTEXES
unsigned long ip;
struct pid *deadlock_task_pid;
@@ -103,6 +104,9 @@ static inline struct task_struct *rt_mutex_owner(struct rt_mutex *lock)
/*
* PI-futex support (proxy locking functions, etc.):
*/
+#define PI_WAKEUP_INPROGRESS ((struct rt_mutex_waiter *) 1)
+#define PI_REQUEUE_INPROGRESS ((struct rt_mutex_waiter *) 2)
+
extern struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock);
extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
struct task_struct *proxy_owner);
@@ -123,4 +127,12 @@ extern int rt_mutex_finish_proxy_lock(struct rt_mutex *lock,
# include "rtmutex.h"
#endif
+static inline void
+rt_mutex_init_waiter(struct rt_mutex_waiter *waiter, bool savestate)
+{
+ debug_rt_mutex_init_waiter(waiter);
+ waiter->task = NULL;
+ waiter->savestate = savestate;
+}
+
#endif
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 4603b9d8f30a..5fa31919118f 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -263,7 +263,11 @@ late_initcall(sched_init_debug);
* Number of tasks to iterate in a single balance run.
* Limited because this is done with IRQs disabled.
*/
+#ifndef CONFIG_PREEMPT_RT_FULL
const_debug unsigned int sysctl_sched_nr_migrate = 32;
+#else
+const_debug unsigned int sysctl_sched_nr_migrate = 8;
+#endif
/*
* period over which we average the RT time consumption, measured
@@ -480,6 +484,7 @@ static void init_rq_hrtick(struct rq *rq)
hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
rq->hrtick_timer.function = hrtick;
+ rq->hrtick_timer.irqsafe = 1;
}
#else /* CONFIG_SCHED_HRTICK */
static inline void hrtick_clear(struct rq *rq)
@@ -1169,7 +1174,8 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
* is actually now running somewhere else!
*/
while (task_running(rq, p)) {
- if (match_state && unlikely(p->state != match_state))
+ if (match_state && unlikely(p->state != match_state)
+ && unlikely(p->saved_state != match_state))
return 0;
cpu_relax();
}
@@ -1184,7 +1190,8 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
running = task_running(rq, p);
on_rq = p->on_rq;
ncsw = 0;
- if (!match_state || p->state == match_state)
+ if (!match_state || p->state == match_state
+ || p->saved_state == match_state)
ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
task_rq_unlock(rq, p, &flags);
@@ -1320,6 +1327,12 @@ out:
}
}
+ /*
+ * Clear PF_THREAD_BOUND, otherwise we wreckage
+ * migrate_disable/enable. See optimization for
+ * PF_THREAD_BOUND tasks there.
+ */
+ p->flags &= ~PF_THREAD_BOUND;
return dest_cpu;
}
@@ -1399,10 +1412,6 @@ static void ttwu_activate(struct rq *rq, struct task_struct *p, int en_flags)
{
activate_task(rq, p, en_flags);
p->on_rq = 1;
-
- /* if a worker is waking up, notify workqueue */
- if (p->flags & PF_WQ_WORKER)
- wq_worker_waking_up(p, cpu_of(rq));
}
/*
@@ -1585,8 +1594,27 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
smp_wmb();
raw_spin_lock_irqsave(&p->pi_lock, flags);
- if (!(p->state & state))
+ if (!(p->state & state)) {
+ /*
+ * The task might be running due to a spinlock sleeper
+ * wakeup. Check the saved state and set it to running
+ * if the wakeup condition is true.
+ */
+ if (!(wake_flags & WF_LOCK_SLEEPER)) {
+ if (p->saved_state & state) {
+ p->saved_state = TASK_RUNNING;
+ success = 1;
+ }
+ }
goto out;
+ }
+
+ /*
+ * If this is a regular wakeup, then we can unconditionally
+ * clear the saved state of a "lock sleeper".
+ */
+ if (!(wake_flags & WF_LOCK_SLEEPER))
+ p->saved_state = TASK_RUNNING;
success = 1; /* we're going to change ->state */
cpu = task_cpu(p);
@@ -1642,40 +1670,6 @@ out:
}
/**
- * try_to_wake_up_local - try to wake up a local task with rq lock held
- * @p: the thread to be awakened
- *
- * Put @p on the run-queue if it's not already there. The caller must
- * ensure that this_rq() is locked, @p is bound to this_rq() and not
- * the current task.
- */
-static void try_to_wake_up_local(struct task_struct *p)
-{
- struct rq *rq = task_rq(p);
-
- BUG_ON(rq != this_rq());
- BUG_ON(p == current);
- lockdep_assert_held(&rq->lock);
-
- if (!raw_spin_trylock(&p->pi_lock)) {
- raw_spin_unlock(&rq->lock);
- raw_spin_lock(&p->pi_lock);
- raw_spin_lock(&rq->lock);
- }
-
- if (!(p->state & TASK_NORMAL))
- goto out;
-
- if (!p->on_rq)
- ttwu_activate(rq, p, ENQUEUE_WAKEUP);
-
- ttwu_do_wakeup(rq, p, 0);
- ttwu_stat(p, smp_processor_id(), 0);
-out:
- raw_spin_unlock(&p->pi_lock);
-}
-
-/**
* wake_up_process - Wake up a specific process
* @p: The process to be woken up.
*
@@ -1692,6 +1686,18 @@ int wake_up_process(struct task_struct *p)
}
EXPORT_SYMBOL(wake_up_process);
+/**
+ * wake_up_lock_sleeper - Wake up a specific process blocked on a "sleeping lock"
+ * @p: The process to be woken up.
+ *
+ * Same as wake_up_process() above, but wake_flags=WF_LOCK_SLEEPER to indicate
+ * the nature of the wakeup.
+ */
+int wake_up_lock_sleeper(struct task_struct *p)
+{
+ return try_to_wake_up(p, TASK_ALL, WF_LOCK_SLEEPER);
+}
+
int wake_up_state(struct task_struct *p, unsigned int state)
{
return try_to_wake_up(p, state, 0);
@@ -1967,8 +1973,12 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
finish_arch_post_lock_switch();
fire_sched_in_preempt_notifiers(current);
+ /*
+ * We use mmdrop_delayed() here so we don't have to do the
+ * full __mmdrop() when we are the last user.
+ */
if (mm)
- mmdrop(mm);
+ mmdrop_delayed(mm);
if (unlikely(prev_state == TASK_DEAD)) {
/*
* Remove function-return probe instances associated with this
@@ -3134,6 +3144,126 @@ static inline void schedule_debug(struct task_struct *prev)
schedstat_inc(this_rq(), sched_count);
}
+#if defined(CONFIG_PREEMPT_RT_FULL) && defined(CONFIG_SMP)
+#define MIGRATE_DISABLE_SET_AFFIN (1<<30) /* Can't make a negative */
+#define migrate_disabled_updated(p) ((p)->migrate_disable & MIGRATE_DISABLE_SET_AFFIN)
+#define migrate_disable_count(p) ((p)->migrate_disable & ~MIGRATE_DISABLE_SET_AFFIN)
+
+static inline void update_migrate_disable(struct task_struct *p)
+{
+ const struct cpumask *mask;
+
+ if (likely(!p->migrate_disable))
+ return;
+
+ /* Did we already update affinity? */
+ if (unlikely(migrate_disabled_updated(p)))
+ return;
+
+ /*
+ * Since this is always current we can get away with only locking
+ * rq->lock, the ->cpus_allowed value can normally only be changed
+ * while holding both p->pi_lock and rq->lock, but seeing that this
+ * is current, we cannot actually be waking up, so all code that
+ * relies on serialization against p->pi_lock is out of scope.
+ *
+ * Having rq->lock serializes us against things like
+ * set_cpus_allowed_ptr() that can still happen concurrently.
+ */
+ mask = tsk_cpus_allowed(p);
+
+ if (p->sched_class->set_cpus_allowed)
+ p->sched_class->set_cpus_allowed(p, mask);
+ p->rt.nr_cpus_allowed = cpumask_weight(mask);
+
+ /* Let migrate_enable know to fix things back up */
+ p->migrate_disable |= MIGRATE_DISABLE_SET_AFFIN;
+}
+
+void migrate_disable(void)
+{
+ struct task_struct *p = current;
+
+ if (in_atomic() || p->flags & PF_THREAD_BOUND) {
+#ifdef CONFIG_SCHED_DEBUG
+ p->migrate_disable_atomic++;
+#endif
+ return;
+ }
+
+#ifdef CONFIG_SCHED_DEBUG
+ WARN_ON_ONCE(p->migrate_disable_atomic);
+#endif
+
+ preempt_disable();
+ if (p->migrate_disable) {
+ p->migrate_disable++;
+ preempt_enable();
+ return;
+ }
+
+ pin_current_cpu();
+ p->migrate_disable = 1;
+ preempt_enable();
+}
+EXPORT_SYMBOL(migrate_disable);
+
+void migrate_enable(void)
+{
+ struct task_struct *p = current;
+ const struct cpumask *mask;
+ unsigned long flags;
+ struct rq *rq;
+
+ if (in_atomic() || p->flags & PF_THREAD_BOUND) {
+#ifdef CONFIG_SCHED_DEBUG
+ p->migrate_disable_atomic--;
+#endif
+ return;
+ }
+
+#ifdef CONFIG_SCHED_DEBUG
+ WARN_ON_ONCE(p->migrate_disable_atomic);
+#endif
+ WARN_ON_ONCE(p->migrate_disable <= 0);
+
+ preempt_disable();
+ if (migrate_disable_count(p) > 1) {
+ p->migrate_disable--;
+ preempt_enable();
+ return;
+ }
+
+ if (unlikely(migrate_disabled_updated(p))) {
+ /*
+ * Undo whatever update_migrate_disable() did, also see there
+ * about locking.
+ */
+ rq = this_rq();
+ raw_spin_lock_irqsave(&rq->lock, flags);
+
+ /*
+ * Clearing migrate_disable causes tsk_cpus_allowed to
+ * show the tasks original cpu affinity.
+ */
+ p->migrate_disable = 0;
+ mask = tsk_cpus_allowed(p);
+ if (p->sched_class->set_cpus_allowed)
+ p->sched_class->set_cpus_allowed(p, mask);
+ p->rt.nr_cpus_allowed = cpumask_weight(mask);
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
+ } else
+ p->migrate_disable = 0;
+
+ unpin_current_cpu();
+ preempt_enable();
+}
+EXPORT_SYMBOL(migrate_enable);
+#else
+static inline void update_migrate_disable(struct task_struct *p) { }
+#define migrate_disabled_updated(p) 0
+#endif
+
static void put_prev_task(struct rq *rq, struct task_struct *prev)
{
if (prev->on_rq || rq->skip_clock_update < 0)
@@ -3193,6 +3323,8 @@ need_resched:
raw_spin_lock_irq(&rq->lock);
+ update_migrate_disable(prev);
+
switch_count = &prev->nivcsw;
if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
if (unlikely(signal_pending_state(prev->state, prev))) {
@@ -3200,19 +3332,6 @@ need_resched:
} else {
deactivate_task(rq, prev, DEQUEUE_SLEEP);
prev->on_rq = 0;
-
- /*
- * If a worker went to sleep, notify and ask workqueue
- * whether it wants to wake up a task to maintain
- * concurrency.
- */
- if (prev->flags & PF_WQ_WORKER) {
- struct task_struct *to_wakeup;
-
- to_wakeup = wq_worker_sleeping(prev, cpu);
- if (to_wakeup)
- try_to_wake_up_local(to_wakeup);
- }
}
switch_count = &prev->nvcsw;
}
@@ -3255,6 +3374,14 @@ static inline void sched_submit_work(struct task_struct *tsk)
{
if (!tsk->state || tsk_is_pi_blocked(tsk))
return;
+
+ /*
+ * If a worker went to sleep, notify and ask workqueue whether
+ * it wants to wake up a task to maintain concurrency.
+ */
+ if (tsk->flags & PF_WQ_WORKER)
+ wq_worker_sleeping(tsk);
+
/*
* If we are going to sleep and we have plugged IO queued,
* make sure to submit it to avoid deadlocks.
@@ -3263,12 +3390,19 @@ static inline void sched_submit_work(struct task_struct *tsk)
blk_schedule_flush_plug(tsk);
}
+static inline void sched_update_worker(struct task_struct *tsk)
+{
+ if (tsk->flags & PF_WQ_WORKER)
+ wq_worker_running(tsk);
+}
+
asmlinkage void __sched schedule(void)
{
struct task_struct *tsk = current;
sched_submit_work(tsk);
__schedule();
+ sched_update_worker(tsk);
}
EXPORT_SYMBOL(schedule);
@@ -3348,7 +3482,16 @@ asmlinkage void __sched notrace preempt_schedule(void)
do {
add_preempt_count_notrace(PREEMPT_ACTIVE);
+ /*
+ * The add/subtract must not be traced by the function
+ * tracer. But we still want to account for the
+ * preempt off latency tracer. Since the _notrace versions
+ * of add/subtract skip the accounting for latency tracer
+ * we must force it manually.
+ */
+ start_critical_timings();
__schedule();
+ stop_critical_timings();
sub_preempt_count_notrace(PREEMPT_ACTIVE);
/*
@@ -4543,9 +4686,17 @@ static inline int should_resched(void)
static void __cond_resched(void)
{
- add_preempt_count(PREEMPT_ACTIVE);
- __schedule();
- sub_preempt_count(PREEMPT_ACTIVE);
+ do {
+ add_preempt_count(PREEMPT_ACTIVE);
+ __schedule();
+ sub_preempt_count(PREEMPT_ACTIVE);
+ /*
+ * Check again in case we missed a preemption
+ * opportunity between schedule and now.
+ */
+ barrier();
+
+ } while (need_resched());
}
int __sched _cond_resched(void)
@@ -4586,6 +4737,7 @@ int __cond_resched_lock(spinlock_t *lock)
}
EXPORT_SYMBOL(__cond_resched_lock);
+#ifndef CONFIG_PREEMPT_RT_FULL
int __sched __cond_resched_softirq(void)
{
BUG_ON(!in_softirq());
@@ -4599,6 +4751,7 @@ int __sched __cond_resched_softirq(void)
return 0;
}
EXPORT_SYMBOL(__cond_resched_softirq);
+#endif
/**
* yield - yield the current processor to other threads.
@@ -4953,11 +5106,12 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
#ifdef CONFIG_SMP
void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
{
- if (p->sched_class && p->sched_class->set_cpus_allowed)
- p->sched_class->set_cpus_allowed(p, new_mask);
-
+ if (!migrate_disabled_updated(p)) {
+ if (p->sched_class && p->sched_class->set_cpus_allowed)
+ p->sched_class->set_cpus_allowed(p, new_mask);
+ p->rt.nr_cpus_allowed = cpumask_weight(new_mask);
+ }
cpumask_copy(&p->cpus_allowed, new_mask);
- p->rt.nr_cpus_allowed = cpumask_weight(new_mask);
}
/*
@@ -5008,7 +5162,7 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
do_set_cpus_allowed(p, new_mask);
/* Can the task run on the task's current CPU? If so, we're done */
- if (cpumask_test_cpu(task_cpu(p), new_mask))
+ if (cpumask_test_cpu(task_cpu(p), new_mask) || __migrate_disabled(p))
goto out;
dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
@@ -5097,6 +5251,8 @@ static int migration_cpu_stop(void *data)
#ifdef CONFIG_HOTPLUG_CPU
+static DEFINE_PER_CPU(struct mm_struct *, idle_last_mm);
+
/*
* Ensures that the idle task is using init_mm right before its cpu goes
* offline.
@@ -5109,7 +5265,12 @@ void idle_task_exit(void)
if (mm != &init_mm)
switch_mm(mm, &init_mm, current);
- mmdrop(mm);
+
+ /*
+ * Defer the cleanup to an alive cpu. On RT we can neither
+ * call mmdrop() nor mmdrop_delayed() from here.
+ */
+ per_cpu(idle_last_mm, smp_processor_id()) = mm;
}
/*
@@ -5430,6 +5591,12 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
migrate_nr_uninterruptible(rq);
calc_global_load_remove(rq);
break;
+ case CPU_DEAD:
+ if (per_cpu(idle_last_mm, cpu)) {
+ mmdrop(per_cpu(idle_last_mm, cpu));
+ per_cpu(idle_last_mm, cpu) = NULL;
+ }
+ break;
#endif
}
@@ -7058,7 +7225,8 @@ void __init sched_init(void)
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
static inline int preempt_count_equals(int preempt_offset)
{
- int nested = (preempt_count() & ~PREEMPT_ACTIVE) + rcu_preempt_depth();
+ int nested = (preempt_count() & ~PREEMPT_ACTIVE) +
+ sched_rcu_preempt_depth();
return (nested == preempt_offset);
}
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index 09acaa15161d..451512ff5f30 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -237,6 +237,9 @@ void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
P(rt_throttled);
PN(rt_time);
PN(rt_runtime);
+#ifdef CONFIG_SMP
+ P(rt_nr_migratory);
+#endif
#undef PN
#undef P
@@ -485,6 +488,10 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
P(se.load.weight);
P(policy);
P(prio);
+#ifdef CONFIG_PREEMPT_RT_FULL
+ P(migrate_disable);
+#endif
+ P(rt.nr_cpus_allowed);
#undef PN
#undef __PN
#undef P
diff --git a/kernel/sched/features.h b/kernel/sched/features.h
index e61fd73913d0..8e047dd4e4e1 100644
--- a/kernel/sched/features.h
+++ b/kernel/sched/features.h
@@ -60,11 +60,15 @@ SCHED_FEAT(OWNER_SPIN, true)
*/
SCHED_FEAT(NONTASK_POWER, true)
+#ifndef CONFIG_PREEMPT_RT_FULL
/*
* Queue remote wakeups on the target CPU and process them
* using the scheduler IPI. Reduces rq->lock contention/bounces.
*/
SCHED_FEAT(TTWU_QUEUE, true)
+#else
+SCHED_FEAT(TTWU_QUEUE, false)
+#endif
SCHED_FEAT(FORCE_SD_OVERLAP, false)
SCHED_FEAT(RT_RUNTIME_SHARE, true)
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 44af55e6d5d0..8bb9f00f8b49 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -41,6 +41,7 @@ void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
hrtimer_init(&rt_b->rt_period_timer,
CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ rt_b->rt_period_timer.irqsafe = 1;
rt_b->rt_period_timer.function = sched_rt_period_timer;
}
diff --git a/kernel/signal.c b/kernel/signal.c
index 17afcaf582d0..3d326518c1a7 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -345,13 +345,45 @@ static bool task_participate_group_stop(struct task_struct *task)
return false;
}
+#ifdef __HAVE_ARCH_CMPXCHG
+static inline struct sigqueue *get_task_cache(struct task_struct *t)
+{
+ struct sigqueue *q = t->sigqueue_cache;
+
+ if (cmpxchg(&t->sigqueue_cache, q, NULL) != q)
+ return NULL;
+ return q;
+}
+
+static inline int put_task_cache(struct task_struct *t, struct sigqueue *q)
+{
+ if (cmpxchg(&t->sigqueue_cache, NULL, q) == NULL)
+ return 0;
+ return 1;
+}
+
+#else
+
+static inline struct sigqueue *get_task_cache(struct task_struct *t)
+{
+ return NULL;
+}
+
+static inline int put_task_cache(struct task_struct *t, struct sigqueue *q)
+{
+ return 1;
+}
+
+#endif
+
/*
* allocate a new signal queue record
* - this may be called without locks if and only if t == current, otherwise an
* appropriate lock must be held to stop the target task from exiting
*/
static struct sigqueue *
-__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
+__sigqueue_do_alloc(int sig, struct task_struct *t, gfp_t flags,
+ int override_rlimit, int fromslab)
{
struct sigqueue *q = NULL;
struct user_struct *user;
@@ -368,7 +400,10 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
if (override_rlimit ||
atomic_read(&user->sigpending) <=
task_rlimit(t, RLIMIT_SIGPENDING)) {
- q = kmem_cache_alloc(sigqueue_cachep, flags);
+ if (!fromslab)
+ q = get_task_cache(t);
+ if (!q)
+ q = kmem_cache_alloc(sigqueue_cachep, flags);
} else {
print_dropped_signal(sig);
}
@@ -385,6 +420,13 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
return q;
}
+static struct sigqueue *
+__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags,
+ int override_rlimit)
+{
+ return __sigqueue_do_alloc(sig, t, flags, override_rlimit, 0);
+}
+
static void __sigqueue_free(struct sigqueue *q)
{
if (q->flags & SIGQUEUE_PREALLOC)
@@ -394,6 +436,21 @@ static void __sigqueue_free(struct sigqueue *q)
kmem_cache_free(sigqueue_cachep, q);
}
+static void sigqueue_free_current(struct sigqueue *q)
+{
+ struct user_struct *up;
+
+ if (q->flags & SIGQUEUE_PREALLOC)
+ return;
+
+ up = q->user;
+ if (rt_prio(current->normal_prio) && !put_task_cache(current, q)) {
+ atomic_dec(&up->sigpending);
+ free_uid(up);
+ } else
+ __sigqueue_free(q);
+}
+
void flush_sigqueue(struct sigpending *queue)
{
struct sigqueue *q;
@@ -407,6 +464,21 @@ void flush_sigqueue(struct sigpending *queue)
}
/*
+ * Called from __exit_signal. Flush tsk->pending and
+ * tsk->sigqueue_cache
+ */
+void flush_task_sigqueue(struct task_struct *tsk)
+{
+ struct sigqueue *q;
+
+ flush_sigqueue(&tsk->pending);
+
+ q = get_task_cache(tsk);
+ if (q)
+ kmem_cache_free(sigqueue_cachep, q);
+}
+
+/*
* Flush all pending signals for a task.
*/
void __flush_signals(struct task_struct *t)
@@ -555,7 +627,7 @@ static void collect_signal(int sig, struct sigpending *list, siginfo_t *info)
still_pending:
list_del_init(&first->list);
copy_siginfo(info, &first->info);
- __sigqueue_free(first);
+ sigqueue_free_current(first);
} else {
/*
* Ok, it wasn't in the queue. This must be
@@ -601,6 +673,8 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
{
int signr;
+ WARN_ON_ONCE(tsk != current);
+
/* We only dequeue private signals from ourselves, we don't let
* signalfd steal them
*/
@@ -683,6 +757,9 @@ void signal_wake_up(struct task_struct *t, int resume)
set_tsk_thread_flag(t, TIF_SIGPENDING);
+ if (unlikely(t == current))
+ return;
+
/*
* For SIGKILL, we want to wake it up in the stopped/traced/killable
* case. We don't check t->state here because there is a race with it
@@ -1235,8 +1312,8 @@ int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
* We don't want to have recursive SIGSEGV's etc, for example,
* that is why we also clear SIGNAL_UNKILLABLE.
*/
-int
-force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
+static int
+do_force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
{
unsigned long int flags;
int ret, blocked, ignored;
@@ -1261,6 +1338,39 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
return ret;
}
+int force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
+{
+/*
+ * On some archs, PREEMPT_RT has to delay sending a signal from a trap
+ * since it can not enable preemption, and the signal code's spin_locks
+ * turn into mutexes. Instead, it must set TIF_NOTIFY_RESUME which will
+ * send the signal on exit of the trap.
+ */
+#ifdef ARCH_RT_DELAYS_SIGNAL_SEND
+ if (in_atomic()) {
+ if (WARN_ON_ONCE(t != current))
+ return 0;
+ if (WARN_ON_ONCE(t->forced_info.si_signo))
+ return 0;
+
+ if (is_si_special(info)) {
+ WARN_ON_ONCE(info != SEND_SIG_PRIV);
+ t->forced_info.si_signo = sig;
+ t->forced_info.si_errno = 0;
+ t->forced_info.si_code = SI_KERNEL;
+ t->forced_info.si_pid = 0;
+ t->forced_info.si_uid = 0;
+ } else {
+ t->forced_info = *info;
+ }
+
+ set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
+ return 0;
+ }
+#endif
+ return do_force_sig_info(sig, info, t);
+}
+
/*
* Nuke all other threads in the group.
*/
@@ -1291,12 +1401,12 @@ struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
struct sighand_struct *sighand;
for (;;) {
- local_irq_save(*flags);
+ local_irq_save_nort(*flags);
rcu_read_lock();
sighand = rcu_dereference(tsk->sighand);
if (unlikely(sighand == NULL)) {
rcu_read_unlock();
- local_irq_restore(*flags);
+ local_irq_restore_nort(*flags);
break;
}
@@ -1307,7 +1417,7 @@ struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
}
spin_unlock(&sighand->siglock);
rcu_read_unlock();
- local_irq_restore(*flags);
+ local_irq_restore_nort(*flags);
}
return sighand;
@@ -1554,7 +1664,8 @@ EXPORT_SYMBOL(kill_pid);
*/
struct sigqueue *sigqueue_alloc(void)
{
- struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
+ /* Preallocated sigqueue objects always from the slabcache ! */
+ struct sigqueue *q = __sigqueue_do_alloc(-1, current, GFP_KERNEL, 0, 1);
if (q)
q->flags |= SIGQUEUE_PREALLOC;
@@ -1909,15 +2020,7 @@ static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
if (gstop_done && ptrace_reparented(current))
do_notify_parent_cldstop(current, false, why);
- /*
- * Don't want to allow preemption here, because
- * sys_ptrace() needs this task to be inactive.
- *
- * XXX: implement read_unlock_no_resched().
- */
- preempt_disable();
read_unlock(&tasklist_lock);
- preempt_enable_no_resched();
schedule();
} else {
/*
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 671f9594e368..34fe1db0d5e0 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -21,9 +21,11 @@
#include <linux/freezer.h>
#include <linux/kthread.h>
#include <linux/rcupdate.h>
+#include <linux/delay.h>
#include <linux/ftrace.h>
#include <linux/smp.h>
#include <linux/tick.h>
+#include <linux/locallock.h>
#define CREATE_TRACE_POINTS
#include <trace/events/irq.h>
@@ -61,6 +63,67 @@ char *softirq_to_name[NR_SOFTIRQS] = {
"TASKLET", "SCHED", "HRTIMER", "RCU"
};
+#ifdef CONFIG_NO_HZ
+# ifdef CONFIG_PREEMPT_RT_FULL
+/*
+ * On preempt-rt a softirq might be blocked on a lock. There might be
+ * no other runnable task on this CPU because the lock owner runs on
+ * some other CPU. So we have to go into idle with the pending bit
+ * set. Therefor we need to check this otherwise we warn about false
+ * positives which confuses users and defeats the whole purpose of
+ * this test.
+ *
+ * This code is called with interrupts disabled.
+ */
+void softirq_check_pending_idle(void)
+{
+ static int rate_limit;
+ u32 warnpending = 0, pending = local_softirq_pending();
+
+ if (rate_limit >= 10)
+ return;
+
+ if (pending) {
+ struct task_struct *tsk;
+
+ tsk = __get_cpu_var(ksoftirqd);
+ /*
+ * The wakeup code in rtmutex.c wakes up the task
+ * _before_ it sets pi_blocked_on to NULL under
+ * tsk->pi_lock. So we need to check for both: state
+ * and pi_blocked_on.
+ */
+ raw_spin_lock(&tsk->pi_lock);
+
+ if (!tsk->pi_blocked_on && !(tsk->state == TASK_RUNNING))
+ warnpending = 1;
+
+ raw_spin_unlock(&tsk->pi_lock);
+ }
+
+ if (warnpending) {
+ printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n",
+ pending);
+ rate_limit++;
+ }
+}
+# else
+/*
+ * On !PREEMPT_RT we just printk rate limited:
+ */
+void softirq_check_pending_idle(void)
+{
+ static int rate_limit;
+
+ if (rate_limit < 10) {
+ printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n",
+ local_softirq_pending());
+ rate_limit++;
+ }
+}
+# endif
+#endif
+
/*
* we cannot loop indefinitely here to avoid userspace starvation,
* but we also don't want to introduce a worst case 1/HZ latency
@@ -76,6 +139,36 @@ static void wakeup_softirqd(void)
wake_up_process(tsk);
}
+static void handle_pending_softirqs(u32 pending, int cpu, int need_rcu_bh_qs)
+{
+ struct softirq_action *h = softirq_vec;
+ unsigned int prev_count = preempt_count();
+
+ local_irq_enable();
+ for ( ; pending; h++, pending >>= 1) {
+ unsigned int vec_nr = h - softirq_vec;
+
+ if (!(pending & 1))
+ continue;
+
+ kstat_incr_softirqs_this_cpu(vec_nr);
+ trace_softirq_entry(vec_nr);
+ h->action(h);
+ trace_softirq_exit(vec_nr);
+ if (unlikely(prev_count != preempt_count())) {
+ printk(KERN_ERR
+ "huh, entered softirq %u %s %p with preempt_count %08x exited with %08x?\n",
+ vec_nr, softirq_to_name[vec_nr], h->action,
+ prev_count, (unsigned int) preempt_count());
+ preempt_count() = prev_count;
+ }
+ if (need_rcu_bh_qs)
+ rcu_bh_qs(cpu);
+ }
+ local_irq_disable();
+}
+
+#ifndef CONFIG_PREEMPT_RT_FULL
/*
* preempt_count and SOFTIRQ_OFFSET usage:
* - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving
@@ -206,7 +299,6 @@ EXPORT_SYMBOL(local_bh_enable_ip);
asmlinkage void __do_softirq(void)
{
- struct softirq_action *h;
__u32 pending;
int max_restart = MAX_SOFTIRQ_RESTART;
int cpu;
@@ -215,7 +307,7 @@ asmlinkage void __do_softirq(void)
account_system_vtime(current);
__local_bh_disable((unsigned long)__builtin_return_address(0),
- SOFTIRQ_OFFSET);
+ SOFTIRQ_OFFSET);
lockdep_softirq_enter();
cpu = smp_processor_id();
@@ -223,36 +315,7 @@ restart:
/* Reset the pending bitmask before enabling irqs */
set_softirq_pending(0);
- local_irq_enable();
-
- h = softirq_vec;
-
- do {
- if (pending & 1) {
- unsigned int vec_nr = h - softirq_vec;
- int prev_count = preempt_count();
-
- kstat_incr_softirqs_this_cpu(vec_nr);
-
- trace_softirq_entry(vec_nr);
- h->action(h);
- trace_softirq_exit(vec_nr);
- if (unlikely(prev_count != preempt_count())) {
- printk(KERN_ERR "huh, entered softirq %u %s %p"
- "with preempt_count %08x,"
- " exited with %08x?\n", vec_nr,
- softirq_to_name[vec_nr], h->action,
- prev_count, preempt_count());
- preempt_count() = prev_count;
- }
-
- rcu_bh_qs(cpu);
- }
- h++;
- pending >>= 1;
- } while (pending);
-
- local_irq_disable();
+ handle_pending_softirqs(pending, cpu, 1);
pending = local_softirq_pending();
if (pending && --max_restart)
@@ -267,6 +330,26 @@ restart:
__local_bh_enable(SOFTIRQ_OFFSET);
}
+/*
+ * Called with preemption disabled from run_ksoftirqd()
+ */
+static int ksoftirqd_do_softirq(int cpu)
+{
+ /*
+ * Preempt disable stops cpu going offline.
+ * If already offline, we'll be on wrong CPU:
+ * don't process.
+ */
+ if (cpu_is_offline(cpu))
+ return -1;
+
+ local_irq_disable();
+ if (local_softirq_pending())
+ __do_softirq();
+ local_irq_enable();
+ return 0;
+}
+
#ifndef __ARCH_HAS_DO_SOFTIRQ
asmlinkage void do_softirq(void)
@@ -289,6 +372,191 @@ asmlinkage void do_softirq(void)
#endif
+static inline void local_bh_disable_nort(void) { local_bh_disable(); }
+static inline void _local_bh_enable_nort(void) { _local_bh_enable(); }
+static inline void ksoftirqd_set_sched_params(void) { }
+static inline void ksoftirqd_clr_sched_params(void) { }
+
+#else /* !PREEMPT_RT_FULL */
+
+/*
+ * On RT we serialize softirq execution with a cpu local lock
+ */
+static DEFINE_LOCAL_IRQ_LOCK(local_softirq_lock);
+static DEFINE_PER_CPU(struct task_struct *, local_softirq_runner);
+
+static void __do_softirq_common(int need_rcu_bh_qs);
+
+void __do_softirq(void)
+{
+ __do_softirq_common(0);
+}
+
+void __init softirq_early_init(void)
+{
+ local_irq_lock_init(local_softirq_lock);
+}
+
+void local_bh_disable(void)
+{
+ migrate_disable();
+ current->softirq_nestcnt++;
+}
+EXPORT_SYMBOL(local_bh_disable);
+
+void local_bh_enable(void)
+{
+ if (WARN_ON(current->softirq_nestcnt == 0))
+ return;
+
+ if ((current->softirq_nestcnt == 1) &&
+ local_softirq_pending() &&
+ local_trylock(local_softirq_lock)) {
+
+ local_irq_disable();
+ if (local_softirq_pending())
+ __do_softirq();
+ local_irq_enable();
+ local_unlock(local_softirq_lock);
+ WARN_ON(current->softirq_nestcnt != 1);
+ }
+ current->softirq_nestcnt--;
+ migrate_enable();
+}
+EXPORT_SYMBOL(local_bh_enable);
+
+void local_bh_enable_ip(unsigned long ip)
+{
+ local_bh_enable();
+}
+EXPORT_SYMBOL(local_bh_enable_ip);
+
+void _local_bh_enable(void)
+{
+ current->softirq_nestcnt--;
+ migrate_enable();
+}
+EXPORT_SYMBOL(_local_bh_enable);
+
+/* For tracing */
+int notrace __in_softirq(void)
+{
+ if (__get_cpu_var(local_softirq_lock).owner == current)
+ return __get_cpu_var(local_softirq_lock).nestcnt;
+ return 0;
+}
+
+int in_serving_softirq(void)
+{
+ int res;
+
+ preempt_disable();
+ res = __get_cpu_var(local_softirq_runner) == current;
+ preempt_enable();
+ return res;
+}
+EXPORT_SYMBOL(in_serving_softirq);
+
+/*
+ * Called with bh and local interrupts disabled. For full RT cpu must
+ * be pinned.
+ */
+static void __do_softirq_common(int need_rcu_bh_qs)
+{
+ u32 pending = local_softirq_pending();
+ int cpu = smp_processor_id();
+
+ current->softirq_nestcnt++;
+
+ /* Reset the pending bitmask before enabling irqs */
+ set_softirq_pending(0);
+
+ __get_cpu_var(local_softirq_runner) = current;
+
+ lockdep_softirq_enter();
+
+ handle_pending_softirqs(pending, cpu, need_rcu_bh_qs);
+
+ pending = local_softirq_pending();
+ if (pending)
+ wakeup_softirqd();
+
+ lockdep_softirq_exit();
+ __get_cpu_var(local_softirq_runner) = NULL;
+
+ current->softirq_nestcnt--;
+}
+
+static int __thread_do_softirq(int cpu)
+{
+ /*
+ * Prevent the current cpu from going offline.
+ * pin_current_cpu() can reenable preemption and block on the
+ * hotplug mutex. When it returns, the current cpu is
+ * pinned. It might be the wrong one, but the offline check
+ * below catches that.
+ */
+ pin_current_cpu();
+ /*
+ * If called from ksoftirqd (cpu >= 0) we need to check
+ * whether we are on the wrong cpu due to cpu offlining. If
+ * called via thread_do_softirq() no action required.
+ */
+ if (cpu >= 0 && cpu_is_offline(cpu)) {
+ unpin_current_cpu();
+ return -1;
+ }
+ preempt_enable();
+ local_lock(local_softirq_lock);
+ local_irq_disable();
+ /*
+ * We cannot switch stacks on RT as we want to be able to
+ * schedule!
+ */
+ if (local_softirq_pending())
+ __do_softirq_common(cpu >= 0);
+ local_unlock(local_softirq_lock);
+ unpin_current_cpu();
+ preempt_disable();
+ local_irq_enable();
+ return 0;
+}
+
+/*
+ * Called from netif_rx_ni(). Preemption enabled.
+ */
+void thread_do_softirq(void)
+{
+ if (!in_serving_softirq()) {
+ preempt_disable();
+ __thread_do_softirq(-1);
+ preempt_enable();
+ }
+}
+
+static int ksoftirqd_do_softirq(int cpu)
+{
+ return __thread_do_softirq(cpu);
+}
+
+static inline void local_bh_disable_nort(void) { }
+static inline void _local_bh_enable_nort(void) { }
+
+static inline void ksoftirqd_set_sched_params(void)
+{
+ struct sched_param param = { .sched_priority = 1 };
+
+ sched_setscheduler(current, SCHED_FIFO, &param);
+}
+
+static inline void ksoftirqd_clr_sched_params(void)
+{
+ struct sched_param param = { .sched_priority = 0 };
+
+ sched_setscheduler(current, SCHED_NORMAL, &param);
+}
+
+#endif /* PREEMPT_RT_FULL */
/*
* Enter an interrupt context.
*/
@@ -302,9 +570,9 @@ void irq_enter(void)
* Prevent raise_softirq from needlessly waking up ksoftirqd
* here, as softirq will be serviced on return from interrupt.
*/
- local_bh_disable();
+ local_bh_disable_nort();
tick_check_idle(cpu);
- _local_bh_enable();
+ _local_bh_enable_nort();
}
__irq_enter();
@@ -312,6 +580,7 @@ void irq_enter(void)
static inline void invoke_softirq(void)
{
+#ifndef CONFIG_PREEMPT_RT_FULL
if (!force_irqthreads) {
#ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED
__do_softirq();
@@ -324,6 +593,9 @@ static inline void invoke_softirq(void)
wakeup_softirqd();
__local_bh_enable(SOFTIRQ_OFFSET);
}
+#else
+ wakeup_softirqd();
+#endif
}
/*
@@ -398,15 +670,45 @@ struct tasklet_head
static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
+static void inline
+__tasklet_common_schedule(struct tasklet_struct *t, struct tasklet_head *head, unsigned int nr)
+{
+ if (tasklet_trylock(t)) {
+again:
+ /* We may have been preempted before tasklet_trylock
+ * and __tasklet_action may have already run.
+ * So double check the sched bit while the takslet
+ * is locked before adding it to the list.
+ */
+ if (test_bit(TASKLET_STATE_SCHED, &t->state)) {
+ t->next = NULL;
+ *head->tail = t;
+ head->tail = &(t->next);
+ raise_softirq_irqoff(nr);
+ tasklet_unlock(t);
+ } else {
+ /* This is subtle. If we hit the corner case above
+ * It is possible that we get preempted right here,
+ * and another task has successfully called
+ * tasklet_schedule(), then this function, and
+ * failed on the trylock. Thus we must be sure
+ * before releasing the tasklet lock, that the
+ * SCHED_BIT is clear. Otherwise the tasklet
+ * may get its SCHED_BIT set, but not added to the
+ * list
+ */
+ if (!tasklet_tryunlock(t))
+ goto again;
+ }
+ }
+}
+
void __tasklet_schedule(struct tasklet_struct *t)
{
unsigned long flags;
local_irq_save(flags);
- t->next = NULL;
- *__this_cpu_read(tasklet_vec.tail) = t;
- __this_cpu_write(tasklet_vec.tail, &(t->next));
- raise_softirq_irqoff(TASKLET_SOFTIRQ);
+ __tasklet_common_schedule(t, &__get_cpu_var(tasklet_vec), TASKLET_SOFTIRQ);
local_irq_restore(flags);
}
@@ -417,10 +719,7 @@ void __tasklet_hi_schedule(struct tasklet_struct *t)
unsigned long flags;
local_irq_save(flags);
- t->next = NULL;
- *__this_cpu_read(tasklet_hi_vec.tail) = t;
- __this_cpu_write(tasklet_hi_vec.tail, &(t->next));
- raise_softirq_irqoff(HI_SOFTIRQ);
+ __tasklet_common_schedule(t, &__get_cpu_var(tasklet_hi_vec), HI_SOFTIRQ);
local_irq_restore(flags);
}
@@ -428,50 +727,119 @@ EXPORT_SYMBOL(__tasklet_hi_schedule);
void __tasklet_hi_schedule_first(struct tasklet_struct *t)
{
- BUG_ON(!irqs_disabled());
-
- t->next = __this_cpu_read(tasklet_hi_vec.head);
- __this_cpu_write(tasklet_hi_vec.head, t);
- __raise_softirq_irqoff(HI_SOFTIRQ);
+ __tasklet_hi_schedule(t);
}
EXPORT_SYMBOL(__tasklet_hi_schedule_first);
-static void tasklet_action(struct softirq_action *a)
+void tasklet_enable(struct tasklet_struct *t)
{
- struct tasklet_struct *list;
+ if (!atomic_dec_and_test(&t->count))
+ return;
+ if (test_and_clear_bit(TASKLET_STATE_PENDING, &t->state))
+ tasklet_schedule(t);
+}
- local_irq_disable();
- list = __this_cpu_read(tasklet_vec.head);
- __this_cpu_write(tasklet_vec.head, NULL);
- __this_cpu_write(tasklet_vec.tail, &__get_cpu_var(tasklet_vec).head);
- local_irq_enable();
+EXPORT_SYMBOL(tasklet_enable);
+
+void tasklet_hi_enable(struct tasklet_struct *t)
+{
+ if (!atomic_dec_and_test(&t->count))
+ return;
+ if (test_and_clear_bit(TASKLET_STATE_PENDING, &t->state))
+ tasklet_hi_schedule(t);
+}
+
+EXPORT_SYMBOL(tasklet_hi_enable);
+
+static void
+__tasklet_action(struct softirq_action *a, struct tasklet_struct *list)
+{
+ int loops = 1000000;
while (list) {
struct tasklet_struct *t = list;
list = list->next;
- if (tasklet_trylock(t)) {
- if (!atomic_read(&t->count)) {
- if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
- BUG();
- t->func(t->data);
- tasklet_unlock(t);
- continue;
- }
- tasklet_unlock(t);
+ /*
+ * Should always succeed - after a tasklist got on the
+ * list (after getting the SCHED bit set from 0 to 1),
+ * nothing but the tasklet softirq it got queued to can
+ * lock it:
+ */
+ if (!tasklet_trylock(t)) {
+ WARN_ON(1);
+ continue;
}
- local_irq_disable();
t->next = NULL;
- *__this_cpu_read(tasklet_vec.tail) = t;
- __this_cpu_write(tasklet_vec.tail, &(t->next));
- __raise_softirq_irqoff(TASKLET_SOFTIRQ);
- local_irq_enable();
+
+ /*
+ * If we cannot handle the tasklet because it's disabled,
+ * mark it as pending. tasklet_enable() will later
+ * re-schedule the tasklet.
+ */
+ if (unlikely(atomic_read(&t->count))) {
+out_disabled:
+ /* implicit unlock: */
+ wmb();
+ t->state = TASKLET_STATEF_PENDING;
+ continue;
+ }
+
+ /*
+ * After this point on the tasklet might be rescheduled
+ * on another CPU, but it can only be added to another
+ * CPU's tasklet list if we unlock the tasklet (which we
+ * dont do yet).
+ */
+ if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
+ WARN_ON(1);
+
+again:
+ t->func(t->data);
+
+ /*
+ * Try to unlock the tasklet. We must use cmpxchg, because
+ * another CPU might have scheduled or disabled the tasklet.
+ * We only allow the STATE_RUN -> 0 transition here.
+ */
+ while (!tasklet_tryunlock(t)) {
+ /*
+ * If it got disabled meanwhile, bail out:
+ */
+ if (atomic_read(&t->count))
+ goto out_disabled;
+ /*
+ * If it got scheduled meanwhile, re-execute
+ * the tasklet function:
+ */
+ if (test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
+ goto again;
+ if (!--loops) {
+ printk("hm, tasklet state: %08lx\n", t->state);
+ WARN_ON(1);
+ tasklet_unlock(t);
+ break;
+ }
+ }
}
}
+static void tasklet_action(struct softirq_action *a)
+{
+ struct tasklet_struct *list;
+
+ local_irq_disable();
+ list = __get_cpu_var(tasklet_vec).head;
+ __get_cpu_var(tasklet_vec).head = NULL;
+ __get_cpu_var(tasklet_vec).tail = &__get_cpu_var(tasklet_vec).head;
+ local_irq_enable();
+
+ __tasklet_action(a, list);
+}
+
static void tasklet_hi_action(struct softirq_action *a)
{
struct tasklet_struct *list;
@@ -482,29 +850,7 @@ static void tasklet_hi_action(struct softirq_action *a)
__this_cpu_write(tasklet_hi_vec.tail, &__get_cpu_var(tasklet_hi_vec).head);
local_irq_enable();
- while (list) {
- struct tasklet_struct *t = list;
-
- list = list->next;
-
- if (tasklet_trylock(t)) {
- if (!atomic_read(&t->count)) {
- if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
- BUG();
- t->func(t->data);
- tasklet_unlock(t);
- continue;
- }
- tasklet_unlock(t);
- }
-
- local_irq_disable();
- t->next = NULL;
- *__this_cpu_read(tasklet_hi_vec.tail) = t;
- __this_cpu_write(tasklet_hi_vec.tail, &(t->next));
- __raise_softirq_irqoff(HI_SOFTIRQ);
- local_irq_enable();
- }
+ __tasklet_action(a, list);
}
@@ -527,7 +873,7 @@ void tasklet_kill(struct tasklet_struct *t)
while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
do {
- yield();
+ msleep(1);
} while (test_bit(TASKLET_STATE_SCHED, &t->state));
}
tasklet_unlock_wait(t);
@@ -733,28 +1079,39 @@ void __init softirq_init(void)
open_softirq(HI_SOFTIRQ, tasklet_hi_action);
}
+#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL)
+void tasklet_unlock_wait(struct tasklet_struct *t)
+{
+ while (test_bit(TASKLET_STATE_RUN, &(t)->state)) {
+ /*
+ * Hack for now to avoid this busy-loop:
+ */
+#ifdef CONFIG_PREEMPT_RT_FULL
+ msleep(1);
+#else
+ barrier();
+#endif
+ }
+}
+EXPORT_SYMBOL(tasklet_unlock_wait);
+#endif
+
static int run_ksoftirqd(void * __bind_cpu)
{
+ ksoftirqd_set_sched_params();
+
set_current_state(TASK_INTERRUPTIBLE);
while (!kthread_should_stop()) {
preempt_disable();
- if (!local_softirq_pending()) {
+ if (!local_softirq_pending())
schedule_preempt_disabled();
- }
__set_current_state(TASK_RUNNING);
while (local_softirq_pending()) {
- /* Preempt disable stops cpu going offline.
- If already offline, we'll be on wrong CPU:
- don't process */
- if (cpu_is_offline((long)__bind_cpu))
+ if (ksoftirqd_do_softirq((long) __bind_cpu))
goto wait_to_die;
- local_irq_disable();
- if (local_softirq_pending())
- __do_softirq();
- local_irq_enable();
sched_preempt_enable_no_resched();
cond_resched();
preempt_disable();
@@ -768,6 +1125,7 @@ static int run_ksoftirqd(void * __bind_cpu)
wait_to_die:
preempt_enable();
+ ksoftirqd_clr_sched_params();
/* Wait for kthread_stop */
set_current_state(TASK_INTERRUPTIBLE);
while (!kthread_should_stop()) {
@@ -844,9 +1202,8 @@ static int __cpuinit cpu_callback(struct notifier_block *nfb,
int hotcpu = (unsigned long)hcpu;
struct task_struct *p;
- switch (action) {
+ switch (action & ~CPU_TASKS_FROZEN) {
case CPU_UP_PREPARE:
- case CPU_UP_PREPARE_FROZEN:
p = kthread_create_on_node(run_ksoftirqd,
hcpu,
cpu_to_node(hotcpu),
@@ -859,19 +1216,16 @@ static int __cpuinit cpu_callback(struct notifier_block *nfb,
per_cpu(ksoftirqd, hotcpu) = p;
break;
case CPU_ONLINE:
- case CPU_ONLINE_FROZEN:
wake_up_process(per_cpu(ksoftirqd, hotcpu));
break;
#ifdef CONFIG_HOTPLUG_CPU
case CPU_UP_CANCELED:
- case CPU_UP_CANCELED_FROZEN:
if (!per_cpu(ksoftirqd, hotcpu))
break;
/* Unbind so it can run. Fall thru. */
kthread_bind(per_cpu(ksoftirqd, hotcpu),
cpumask_any(cpu_online_mask));
- case CPU_DEAD:
- case CPU_DEAD_FROZEN: {
+ case CPU_POST_DEAD: {
static const struct sched_param param = {
.sched_priority = MAX_RT_PRIO-1
};
diff --git a/kernel/spinlock.c b/kernel/spinlock.c
index 5cdd8065a3ce..da9775b3038f 100644
--- a/kernel/spinlock.c
+++ b/kernel/spinlock.c
@@ -110,8 +110,11 @@ void __lockfunc __raw_##op##_lock_bh(locktype##_t *lock) \
* __[spin|read|write]_lock_bh()
*/
BUILD_LOCK_OPS(spin, raw_spinlock);
+
+#ifndef CONFIG_PREEMPT_RT_FULL
BUILD_LOCK_OPS(read, rwlock);
BUILD_LOCK_OPS(write, rwlock);
+#endif
#endif
@@ -195,6 +198,8 @@ void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock)
EXPORT_SYMBOL(_raw_spin_unlock_bh);
#endif
+#ifndef CONFIG_PREEMPT_RT_FULL
+
#ifndef CONFIG_INLINE_READ_TRYLOCK
int __lockfunc _raw_read_trylock(rwlock_t *lock)
{
@@ -339,6 +344,8 @@ void __lockfunc _raw_write_unlock_bh(rwlock_t *lock)
EXPORT_SYMBOL(_raw_write_unlock_bh);
#endif
+#endif /* !PREEMPT_RT_FULL */
+
#ifdef CONFIG_DEBUG_LOCK_ALLOC
void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass)
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index 2f194e965715..561ba3a7b8cf 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -29,12 +29,12 @@ struct cpu_stop_done {
atomic_t nr_todo; /* nr left to execute */
bool executed; /* actually executed? */
int ret; /* collected return value */
- struct completion completion; /* fired if nr_todo reaches 0 */
+ struct task_struct *waiter; /* woken when nr_todo reaches 0 */
};
/* the actual stopper, one per every possible cpu, enabled on online cpus */
struct cpu_stopper {
- spinlock_t lock;
+ raw_spinlock_t lock;
bool enabled; /* is this stopper enabled? */
struct list_head works; /* list of pending works */
struct task_struct *thread; /* stopper thread */
@@ -47,7 +47,7 @@ static void cpu_stop_init_done(struct cpu_stop_done *done, unsigned int nr_todo)
{
memset(done, 0, sizeof(*done));
atomic_set(&done->nr_todo, nr_todo);
- init_completion(&done->completion);
+ done->waiter = current;
}
/* signal completion unless @done is NULL */
@@ -56,8 +56,10 @@ static void cpu_stop_signal_done(struct cpu_stop_done *done, bool executed)
if (done) {
if (executed)
done->executed = true;
- if (atomic_dec_and_test(&done->nr_todo))
- complete(&done->completion);
+ if (atomic_dec_and_test(&done->nr_todo)) {
+ wake_up_process(done->waiter);
+ done->waiter = NULL;
+ }
}
}
@@ -67,7 +69,7 @@ static void cpu_stop_queue_work(struct cpu_stopper *stopper,
{
unsigned long flags;
- spin_lock_irqsave(&stopper->lock, flags);
+ raw_spin_lock_irqsave(&stopper->lock, flags);
if (stopper->enabled) {
list_add_tail(&work->list, &stopper->works);
@@ -75,7 +77,23 @@ static void cpu_stop_queue_work(struct cpu_stopper *stopper,
} else
cpu_stop_signal_done(work->done, false);
- spin_unlock_irqrestore(&stopper->lock, flags);
+ raw_spin_unlock_irqrestore(&stopper->lock, flags);
+}
+
+static void wait_for_stop_done(struct cpu_stop_done *done)
+{
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ while (atomic_read(&done->nr_todo)) {
+ schedule();
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ }
+ /*
+ * We need to wait until cpu_stop_signal_done() has cleared
+ * done->waiter.
+ */
+ while (done->waiter)
+ cpu_relax();
+ set_current_state(TASK_RUNNING);
}
/**
@@ -109,7 +127,7 @@ int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg)
cpu_stop_init_done(&done, 1);
cpu_stop_queue_work(&per_cpu(cpu_stopper, cpu), &work);
- wait_for_completion(&done.completion);
+ wait_for_stop_done(&done);
return done.executed ? done.ret : -ENOENT;
}
@@ -135,6 +153,7 @@ void stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg,
/* static data for stop_cpus */
static DEFINE_MUTEX(stop_cpus_mutex);
+static DEFINE_MUTEX(stopper_lock);
static DEFINE_PER_CPU(struct cpu_stop_work, stop_cpus_work);
static void queue_stop_cpus_work(const struct cpumask *cpumask,
@@ -153,15 +172,14 @@ static void queue_stop_cpus_work(const struct cpumask *cpumask,
}
/*
- * Disable preemption while queueing to avoid getting
- * preempted by a stopper which might wait for other stoppers
- * to enter @fn which can lead to deadlock.
+ * Make sure that all work is queued on all cpus before we
+ * any of the cpus can execute it.
*/
- preempt_disable();
+ mutex_lock(&stopper_lock);
for_each_cpu(cpu, cpumask)
cpu_stop_queue_work(&per_cpu(cpu_stopper, cpu),
&per_cpu(stop_cpus_work, cpu));
- preempt_enable();
+ mutex_unlock(&stopper_lock);
}
static int __stop_cpus(const struct cpumask *cpumask,
@@ -171,7 +189,7 @@ static int __stop_cpus(const struct cpumask *cpumask,
cpu_stop_init_done(&done, cpumask_weight(cpumask));
queue_stop_cpus_work(cpumask, fn, arg, &done);
- wait_for_completion(&done.completion);
+ wait_for_stop_done(&done);
return done.executed ? done.ret : -ENOENT;
}
@@ -259,13 +277,13 @@ repeat:
}
work = NULL;
- spin_lock_irq(&stopper->lock);
+ raw_spin_lock_irq(&stopper->lock);
if (!list_empty(&stopper->works)) {
work = list_first_entry(&stopper->works,
struct cpu_stop_work, list);
list_del_init(&work->list);
}
- spin_unlock_irq(&stopper->lock);
+ raw_spin_unlock_irq(&stopper->lock);
if (work) {
cpu_stop_fn_t fn = work->fn;
@@ -275,6 +293,16 @@ repeat:
__set_current_state(TASK_RUNNING);
+ /*
+ * Wait until the stopper finished scheduling on all
+ * cpus
+ */
+ mutex_lock(&stopper_lock);
+ /*
+ * Let other cpu threads continue as well
+ */
+ mutex_unlock(&stopper_lock);
+
/* cpu stop callbacks are not allowed to sleep */
preempt_disable();
@@ -289,7 +317,13 @@ repeat:
kallsyms_lookup((unsigned long)fn, NULL, NULL, NULL,
ksym_buf), arg);
+ /*
+ * Make sure that the wakeup and setting done->waiter
+ * to NULL is atomic.
+ */
+ local_irq_disable();
cpu_stop_signal_done(done, true);
+ local_irq_enable();
} else
schedule();
@@ -317,6 +351,7 @@ static int __cpuinit cpu_stop_cpu_callback(struct notifier_block *nfb,
if (IS_ERR(p))
return notifier_from_errno(PTR_ERR(p));
get_task_struct(p);
+ p->flags |= PF_STOMPER;
kthread_bind(p, cpu);
sched_set_stop_task(cpu, p);
stopper->thread = p;
@@ -326,9 +361,9 @@ static int __cpuinit cpu_stop_cpu_callback(struct notifier_block *nfb,
/* strictly unnecessary, as first user will wake it */
wake_up_process(stopper->thread);
/* mark enabled */
- spin_lock_irq(&stopper->lock);
+ raw_spin_lock_irq(&stopper->lock);
stopper->enabled = true;
- spin_unlock_irq(&stopper->lock);
+ raw_spin_unlock_irq(&stopper->lock);
break;
#ifdef CONFIG_HOTPLUG_CPU
@@ -341,11 +376,11 @@ static int __cpuinit cpu_stop_cpu_callback(struct notifier_block *nfb,
/* kill the stopper */
kthread_stop(stopper->thread);
/* drain remaining works */
- spin_lock_irq(&stopper->lock);
+ raw_spin_lock_irq(&stopper->lock);
list_for_each_entry(work, &stopper->works, list)
cpu_stop_signal_done(work->done, false);
stopper->enabled = false;
- spin_unlock_irq(&stopper->lock);
+ raw_spin_unlock_irq(&stopper->lock);
/* release the stopper */
put_task_struct(stopper->thread);
stopper->thread = NULL;
@@ -376,7 +411,7 @@ static int __init cpu_stop_init(void)
for_each_possible_cpu(cpu) {
struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
- spin_lock_init(&stopper->lock);
+ raw_spin_lock_init(&stopper->lock);
INIT_LIST_HEAD(&stopper->works);
}
@@ -570,7 +605,7 @@ int stop_machine_from_inactive_cpu(int (*fn)(void *), void *data,
ret = stop_machine_cpu_stop(&smdata);
/* Busy wait for completion. */
- while (!completion_done(&done.completion))
+ while (atomic_read(&done.nr_todo))
cpu_relax();
mutex_unlock(&stop_cpus_mutex);
diff --git a/kernel/time/jiffies.c b/kernel/time/jiffies.c
index a470154e0408..21940eb049a1 100644
--- a/kernel/time/jiffies.c
+++ b/kernel/time/jiffies.c
@@ -74,9 +74,9 @@ u64 get_jiffies_64(void)
u64 ret;
do {
- seq = read_seqbegin(&xtime_lock);
+ seq = read_seqcount_begin(&xtime_seq);
ret = jiffies_64;
- } while (read_seqretry(&xtime_lock, seq));
+ } while (read_seqcount_retry(&xtime_seq, seq));
return ret;
}
EXPORT_SYMBOL(get_jiffies_64);
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
index f03fd83b170b..82897ea52dcb 100644
--- a/kernel/time/ntp.c
+++ b/kernel/time/ntp.c
@@ -22,7 +22,7 @@
* NTP timekeeping variables:
*/
-DEFINE_SPINLOCK(ntp_lock);
+DEFINE_RAW_SPINLOCK(ntp_lock);
/* USER_HZ period (usecs): */
@@ -347,7 +347,7 @@ void ntp_clear(void)
{
unsigned long flags;
- spin_lock_irqsave(&ntp_lock, flags);
+ raw_spin_lock_irqsave(&ntp_lock, flags);
time_adjust = 0; /* stop active adjtime() */
time_status |= STA_UNSYNC;
@@ -361,7 +361,7 @@ void ntp_clear(void)
/* Clear PPS state variables */
pps_clear();
- spin_unlock_irqrestore(&ntp_lock, flags);
+ raw_spin_unlock_irqrestore(&ntp_lock, flags);
}
@@ -371,9 +371,9 @@ u64 ntp_tick_length(void)
unsigned long flags;
s64 ret;
- spin_lock_irqsave(&ntp_lock, flags);
+ raw_spin_lock_irqsave(&ntp_lock, flags);
ret = tick_length;
- spin_unlock_irqrestore(&ntp_lock, flags);
+ raw_spin_unlock_irqrestore(&ntp_lock, flags);
return ret;
}
@@ -394,7 +394,7 @@ int second_overflow(unsigned long secs)
int leap = 0;
unsigned long flags;
- spin_lock_irqsave(&ntp_lock, flags);
+ raw_spin_lock_irqsave(&ntp_lock, flags);
/*
* Leap second processing. If in leap-insert state at the end of the
@@ -476,7 +476,7 @@ int second_overflow(unsigned long secs)
out:
- spin_unlock_irqrestore(&ntp_lock, flags);
+ raw_spin_unlock_irqrestore(&ntp_lock, flags);
return leap;
}
@@ -658,7 +658,7 @@ int do_adjtimex(struct timex *txc)
getnstimeofday(&ts);
- spin_lock_irq(&ntp_lock);
+ raw_spin_lock_irq(&ntp_lock);
if (txc->modes & ADJ_ADJTIME) {
long save_adjust = time_adjust;
@@ -700,7 +700,7 @@ int do_adjtimex(struct timex *txc)
/* fill PPS status fields */
pps_fill_timex(txc);
- spin_unlock_irq(&ntp_lock);
+ raw_spin_unlock_irq(&ntp_lock);
txc->time.tv_sec = ts.tv_sec;
txc->time.tv_usec = ts.tv_nsec;
@@ -898,7 +898,7 @@ void hardpps(const struct timespec *phase_ts, const struct timespec *raw_ts)
pts_norm = pps_normalize_ts(*phase_ts);
- spin_lock_irqsave(&ntp_lock, flags);
+ raw_spin_lock_irqsave(&ntp_lock, flags);
/* clear the error bits, they will be set again if needed */
time_status &= ~(STA_PPSJITTER | STA_PPSWANDER | STA_PPSERROR);
@@ -911,7 +911,7 @@ void hardpps(const struct timespec *phase_ts, const struct timespec *raw_ts)
* just start the frequency interval */
if (unlikely(pps_fbase.tv_sec == 0)) {
pps_fbase = *raw_ts;
- spin_unlock_irqrestore(&ntp_lock, flags);
+ raw_spin_unlock_irqrestore(&ntp_lock, flags);
return;
}
@@ -926,7 +926,7 @@ void hardpps(const struct timespec *phase_ts, const struct timespec *raw_ts)
time_status |= STA_PPSJITTER;
/* restart the frequency calibration interval */
pps_fbase = *raw_ts;
- spin_unlock_irqrestore(&ntp_lock, flags);
+ raw_spin_unlock_irqrestore(&ntp_lock, flags);
pr_err("hardpps: PPSJITTER: bad pulse\n");
return;
}
@@ -943,7 +943,7 @@ void hardpps(const struct timespec *phase_ts, const struct timespec *raw_ts)
hardpps_update_phase(pts_norm.nsec);
- spin_unlock_irqrestore(&ntp_lock, flags);
+ raw_spin_unlock_irqrestore(&ntp_lock, flags);
}
EXPORT_SYMBOL(hardpps);
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index da6c9ecad4e4..39de540fd7a7 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -63,13 +63,15 @@ int tick_is_oneshot_available(void)
static void tick_periodic(int cpu)
{
if (tick_do_timer_cpu == cpu) {
- write_seqlock(&xtime_lock);
+ raw_spin_lock(&xtime_lock);
+ write_seqcount_begin(&xtime_seq);
/* Keep track of the next tick event */
tick_next_period = ktime_add(tick_next_period, tick_period);
do_timer(1);
- write_sequnlock(&xtime_lock);
+ write_seqcount_end(&xtime_seq);
+ raw_spin_unlock(&xtime_lock);
}
update_process_times(user_mode(get_irq_regs()));
@@ -130,9 +132,9 @@ void tick_setup_periodic(struct clock_event_device *dev, int broadcast)
ktime_t next;
do {
- seq = read_seqbegin(&xtime_lock);
+ seq = read_seqcount_begin(&xtime_seq);
next = tick_next_period;
- } while (read_seqretry(&xtime_lock, seq));
+ } while (read_seqcount_retry(&xtime_seq, seq));
clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT);
diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h
index 4e265b901fed..c91100d99fb9 100644
--- a/kernel/time/tick-internal.h
+++ b/kernel/time/tick-internal.h
@@ -141,4 +141,5 @@ static inline int tick_device_is_functional(struct clock_event_device *dev)
#endif
extern void do_timer(unsigned long ticks);
-extern seqlock_t xtime_lock;
+extern raw_spinlock_t xtime_lock;
+extern seqcount_t xtime_seq;
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 6a3a5b9ff561..489413d832b0 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -56,7 +56,8 @@ static void tick_do_update_jiffies64(ktime_t now)
return;
/* Reevalute with xtime_lock held */
- write_seqlock(&xtime_lock);
+ raw_spin_lock(&xtime_lock);
+ write_seqcount_begin(&xtime_seq);
delta = ktime_sub(now, last_jiffies_update);
if (delta.tv64 >= tick_period.tv64) {
@@ -79,7 +80,8 @@ static void tick_do_update_jiffies64(ktime_t now)
/* Keep the tick_next_period variable up to date */
tick_next_period = ktime_add(last_jiffies_update, tick_period);
}
- write_sequnlock(&xtime_lock);
+ write_seqcount_end(&xtime_seq);
+ raw_spin_unlock(&xtime_lock);
}
/*
@@ -89,12 +91,14 @@ static ktime_t tick_init_jiffy_update(void)
{
ktime_t period;
- write_seqlock(&xtime_lock);
+ raw_spin_lock(&xtime_lock);
+ write_seqcount_begin(&xtime_seq);
/* Did we start the jiffies update yet ? */
if (last_jiffies_update.tv64 == 0)
last_jiffies_update = tick_next_period;
period = last_jiffies_update;
- write_sequnlock(&xtime_lock);
+ write_seqcount_end(&xtime_seq);
+ raw_spin_unlock(&xtime_lock);
return period;
}
@@ -303,24 +307,18 @@ static void tick_nohz_stop_sched_tick(struct tick_sched *ts)
return;
if (unlikely(local_softirq_pending() && cpu_online(cpu))) {
- static int ratelimit;
-
- if (ratelimit < 10) {
- printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n",
- (unsigned int) local_softirq_pending());
- ratelimit++;
- }
+ softirq_check_pending_idle();
return;
}
ts->idle_calls++;
/* Read jiffies and the time when jiffies were updated last */
do {
- seq = read_seqbegin(&xtime_lock);
+ seq = read_seqcount_begin(&xtime_seq);
last_update = last_jiffies_update;
last_jiffies = jiffies;
time_delta = timekeeping_max_deferment();
- } while (read_seqretry(&xtime_lock, seq));
+ } while (read_seqcount_retry(&xtime_seq, seq));
if (rcu_needs_cpu(cpu) || printk_needs_cpu(cpu) ||
arch_needs_cpu(cpu)) {
@@ -826,6 +824,7 @@ void tick_setup_sched_timer(void)
* Emulate tick processing via per-CPU hrtimers:
*/
hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+ ts->sched_timer.irqsafe = 1;
ts->sched_timer.function = tick_sched_timer;
/* Get the next period (per cpu) */
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index d66b21308f7c..56e151b44a91 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -70,8 +70,9 @@ struct timekeeper {
/* The raw monotonic time for the CLOCK_MONOTONIC_RAW posix clock. */
struct timespec raw_time;
- /* Seqlock for all timekeeper values */
- seqlock_t lock;
+ /* Open coded seqlock for all timekeeper values */
+ seqcount_t seq;
+ raw_spinlock_t lock;
};
static struct timekeeper timekeeper;
@@ -80,7 +81,8 @@ static struct timekeeper timekeeper;
* This read-write spinlock protects us from races in SMP while
* playing with xtime.
*/
-__cacheline_aligned_in_smp DEFINE_SEQLOCK(xtime_lock);
+__cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(xtime_lock);
+seqcount_t xtime_seq;
/* flag for if timekeeping is suspended */
@@ -228,7 +230,7 @@ void getnstimeofday(struct timespec *ts)
WARN_ON(timekeeping_suspended);
do {
- seq = read_seqbegin(&timekeeper.lock);
+ seq = read_seqcount_begin(&timekeeper.seq);
*ts = timekeeper.xtime;
nsecs = timekeeping_get_ns();
@@ -236,7 +238,7 @@ void getnstimeofday(struct timespec *ts)
/* If arch requires, add in gettimeoffset() */
nsecs += arch_gettimeoffset();
- } while (read_seqretry(&timekeeper.lock, seq));
+ } while (read_seqcount_retry(&timekeeper.seq, seq));
timespec_add_ns(ts, nsecs);
}
@@ -251,7 +253,7 @@ ktime_t ktime_get(void)
WARN_ON(timekeeping_suspended);
do {
- seq = read_seqbegin(&timekeeper.lock);
+ seq = read_seqcount_begin(&timekeeper.seq);
secs = timekeeper.xtime.tv_sec +
timekeeper.wall_to_monotonic.tv_sec;
nsecs = timekeeper.xtime.tv_nsec +
@@ -260,7 +262,7 @@ ktime_t ktime_get(void)
/* If arch requires, add in gettimeoffset() */
nsecs += arch_gettimeoffset();
- } while (read_seqretry(&timekeeper.lock, seq));
+ } while (read_seqcount_retry(&timekeeper.seq, seq));
/*
* Use ktime_set/ktime_add_ns to create a proper ktime on
* 32-bit architectures without CONFIG_KTIME_SCALAR.
@@ -286,14 +288,14 @@ void ktime_get_ts(struct timespec *ts)
WARN_ON(timekeeping_suspended);
do {
- seq = read_seqbegin(&timekeeper.lock);
+ seq = read_seqcount_begin(&timekeeper.seq);
*ts = timekeeper.xtime;
tomono = timekeeper.wall_to_monotonic;
nsecs = timekeeping_get_ns();
/* If arch requires, add in gettimeoffset() */
nsecs += arch_gettimeoffset();
- } while (read_seqretry(&timekeeper.lock, seq));
+ } while (read_seqcount_retry(&timekeeper.seq, seq));
set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec,
ts->tv_nsec + tomono.tv_nsec + nsecs);
@@ -321,7 +323,7 @@ void getnstime_raw_and_real(struct timespec *ts_raw, struct timespec *ts_real)
do {
u32 arch_offset;
- seq = read_seqbegin(&timekeeper.lock);
+ seq = read_seqcount_begin(&timekeeper.seq);
*ts_raw = timekeeper.raw_time;
*ts_real = timekeeper.xtime;
@@ -334,7 +336,7 @@ void getnstime_raw_and_real(struct timespec *ts_raw, struct timespec *ts_real)
nsecs_raw += arch_offset;
nsecs_real += arch_offset;
- } while (read_seqretry(&timekeeper.lock, seq));
+ } while (read_seqcount_retry(&timekeeper.seq, seq));
timespec_add_ns(ts_raw, nsecs_raw);
timespec_add_ns(ts_real, nsecs_real);
@@ -373,7 +375,8 @@ int do_settimeofday(const struct timespec *tv)
if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
return -EINVAL;
- write_seqlock_irqsave(&timekeeper.lock, flags);
+ raw_spin_lock_irqsave(&timekeeper.lock, flags);
+ write_seqcount_begin(&timekeeper.seq);
timekeeping_forward_now();
@@ -385,7 +388,8 @@ int do_settimeofday(const struct timespec *tv)
timekeeper.xtime = *tv;
timekeeping_update(true);
- write_sequnlock_irqrestore(&timekeeper.lock, flags);
+ write_seqcount_end(&timekeeper.seq);
+ raw_spin_unlock_irqrestore(&timekeeper.lock, flags);
/* signal hrtimers about time change */
clock_was_set();
@@ -409,7 +413,8 @@ int timekeeping_inject_offset(struct timespec *ts)
if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC)
return -EINVAL;
- write_seqlock_irqsave(&timekeeper.lock, flags);
+ raw_spin_lock_irqsave(&timekeeper.lock, flags);
+ write_seqcount_begin(&timekeeper.seq);
timekeeping_forward_now();
@@ -419,7 +424,8 @@ int timekeeping_inject_offset(struct timespec *ts)
timekeeping_update(true);
- write_sequnlock_irqrestore(&timekeeper.lock, flags);
+ write_seqcount_end(&timekeeper.seq);
+ raw_spin_unlock_irqrestore(&timekeeper.lock, flags);
/* signal hrtimers about time change */
clock_was_set();
@@ -440,7 +446,8 @@ static int change_clocksource(void *data)
new = (struct clocksource *) data;
- write_seqlock_irqsave(&timekeeper.lock, flags);
+ raw_spin_lock_irqsave(&timekeeper.lock, flags);
+ write_seqcount_begin(&timekeeper.seq);
timekeeping_forward_now();
if (!new->enable || new->enable(new) == 0) {
@@ -451,7 +458,8 @@ static int change_clocksource(void *data)
}
timekeeping_update(true);
- write_sequnlock_irqrestore(&timekeeper.lock, flags);
+ write_seqcount_end(&timekeeper.seq);
+ raw_spin_unlock_irqrestore(&timekeeper.lock, flags);
return 0;
}
@@ -498,11 +506,11 @@ void getrawmonotonic(struct timespec *ts)
s64 nsecs;
do {
- seq = read_seqbegin(&timekeeper.lock);
+ seq = read_seqcount_begin(&timekeeper.seq);
nsecs = timekeeping_get_ns_raw();
*ts = timekeeper.raw_time;
- } while (read_seqretry(&timekeeper.lock, seq));
+ } while (read_seqcount_retry(&timekeeper.seq, seq));
timespec_add_ns(ts, nsecs);
}
@@ -518,11 +526,11 @@ int timekeeping_valid_for_hres(void)
int ret;
do {
- seq = read_seqbegin(&timekeeper.lock);
+ seq = read_seqcount_begin(&timekeeper.seq);
ret = timekeeper.clock->flags & CLOCK_SOURCE_VALID_FOR_HRES;
- } while (read_seqretry(&timekeeper.lock, seq));
+ } while (read_seqcount_retry(&timekeeper.seq, seq));
return ret;
}
@@ -535,11 +543,11 @@ u64 timekeeping_max_deferment(void)
unsigned long seq;
u64 ret;
do {
- seq = read_seqbegin(&timekeeper.lock);
+ seq = read_seqcount_begin(&timekeeper.seq);
ret = timekeeper.clock->max_idle_ns;
- } while (read_seqretry(&timekeeper.lock, seq));
+ } while (read_seqcount_retry(&timekeeper.seq, seq));
return ret;
}
@@ -586,11 +594,13 @@ void __init timekeeping_init(void)
read_persistent_clock(&now);
read_boot_clock(&boot);
- seqlock_init(&timekeeper.lock);
+ raw_spin_lock_init(&timekeeper.lock);
+ seqcount_init(&timekeeper.seq);
ntp_init();
- write_seqlock_irqsave(&timekeeper.lock, flags);
+ raw_spin_lock_irqsave(&timekeeper.lock, flags);
+ write_seqcount_begin(&timekeeper.seq);
clock = clocksource_default_clock();
if (clock->enable)
clock->enable(clock);
@@ -608,7 +618,8 @@ void __init timekeeping_init(void)
-boot.tv_sec, -boot.tv_nsec);
timekeeper.total_sleep_time.tv_sec = 0;
timekeeper.total_sleep_time.tv_nsec = 0;
- write_sequnlock_irqrestore(&timekeeper.lock, flags);
+ write_seqcount_end(&timekeeper.seq);
+ raw_spin_unlock_irqrestore(&timekeeper.lock, flags);
}
/* time in seconds when suspend began */
@@ -657,7 +668,8 @@ void timekeeping_inject_sleeptime(struct timespec *delta)
if (!(ts.tv_sec == 0 && ts.tv_nsec == 0))
return;
- write_seqlock_irqsave(&timekeeper.lock, flags);
+ raw_spin_lock_irqsave(&timekeeper.lock, flags);
+ write_seqcount_begin(&timekeeper.seq);
timekeeping_forward_now();
@@ -665,7 +677,8 @@ void timekeeping_inject_sleeptime(struct timespec *delta)
timekeeping_update(true);
- write_sequnlock_irqrestore(&timekeeper.lock, flags);
+ write_seqcount_end(&timekeeper.seq);
+ raw_spin_unlock_irqrestore(&timekeeper.lock, flags);
/* signal hrtimers about time change */
clock_was_set();
@@ -688,7 +701,8 @@ static void timekeeping_resume(void)
clocksource_resume();
- write_seqlock_irqsave(&timekeeper.lock, flags);
+ raw_spin_lock_irqsave(&timekeeper.lock, flags);
+ write_seqcount_begin(&timekeeper.seq);
if (timespec_compare(&ts, &timekeeping_suspend_time) > 0) {
ts = timespec_sub(ts, timekeeping_suspend_time);
@@ -698,7 +712,8 @@ static void timekeeping_resume(void)
timekeeper.clock->cycle_last = timekeeper.clock->read(timekeeper.clock);
timekeeper.ntp_error = 0;
timekeeping_suspended = 0;
- write_sequnlock_irqrestore(&timekeeper.lock, flags);
+ write_seqcount_end(&timekeeper.seq);
+ raw_spin_unlock_irqrestore(&timekeeper.lock, flags);
touch_softlockup_watchdog();
@@ -716,7 +731,8 @@ static int timekeeping_suspend(void)
read_persistent_clock(&timekeeping_suspend_time);
- write_seqlock_irqsave(&timekeeper.lock, flags);
+ raw_spin_lock_irqsave(&timekeeper.lock, flags);
+ write_seqcount_begin(&timekeeper.seq);
timekeeping_forward_now();
timekeeping_suspended = 1;
@@ -739,7 +755,8 @@ static int timekeeping_suspend(void)
timekeeping_suspend_time =
timespec_add(timekeeping_suspend_time, delta_delta);
}
- write_sequnlock_irqrestore(&timekeeper.lock, flags);
+ write_seqcount_end(&timekeeper.seq);
+ raw_spin_unlock_irqrestore(&timekeeper.lock, flags);
clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL);
clocksource_suspend();
@@ -997,7 +1014,8 @@ static void update_wall_time(void)
int shift = 0, maxshift;
unsigned long flags;
- write_seqlock_irqsave(&timekeeper.lock, flags);
+ raw_spin_lock_irqsave(&timekeeper.lock, flags);
+ write_seqcount_begin(&timekeeper.seq);
/* Make sure we're fully resumed: */
if (unlikely(timekeeping_suspended))
@@ -1084,8 +1102,8 @@ static void update_wall_time(void)
timekeeping_update(false);
out:
- write_sequnlock_irqrestore(&timekeeper.lock, flags);
-
+ write_seqcount_end(&timekeeper.seq);
+ raw_spin_unlock_irqrestore(&timekeeper.lock, flags);
}
/**
@@ -1131,13 +1149,13 @@ void get_monotonic_boottime(struct timespec *ts)
WARN_ON(timekeeping_suspended);
do {
- seq = read_seqbegin(&timekeeper.lock);
+ seq = read_seqcount_begin(&timekeeper.seq);
*ts = timekeeper.xtime;
tomono = timekeeper.wall_to_monotonic;
sleep = timekeeper.total_sleep_time;
nsecs = timekeeping_get_ns();
- } while (read_seqretry(&timekeeper.lock, seq));
+ } while (read_seqcount_retry(&timekeeper.seq, seq));
set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec + sleep.tv_sec,
ts->tv_nsec + tomono.tv_nsec + sleep.tv_nsec + nsecs);
@@ -1188,10 +1206,10 @@ struct timespec current_kernel_time(void)
unsigned long seq;
do {
- seq = read_seqbegin(&timekeeper.lock);
+ seq = read_seqcount_begin(&timekeeper.seq);
now = timekeeper.xtime;
- } while (read_seqretry(&timekeeper.lock, seq));
+ } while (read_seqcount_retry(&timekeeper.seq, seq));
return now;
}
@@ -1203,11 +1221,11 @@ struct timespec get_monotonic_coarse(void)
unsigned long seq;
do {
- seq = read_seqbegin(&timekeeper.lock);
+ seq = read_seqcount_begin(&timekeeper.seq);
now = timekeeper.xtime;
mono = timekeeper.wall_to_monotonic;
- } while (read_seqretry(&timekeeper.lock, seq));
+ } while (read_seqcount_retry(&timekeeper.seq, seq));
set_normalized_timespec(&now, now.tv_sec + mono.tv_sec,
now.tv_nsec + mono.tv_nsec);
@@ -1239,11 +1257,11 @@ void get_xtime_and_monotonic_and_sleep_offset(struct timespec *xtim,
unsigned long seq;
do {
- seq = read_seqbegin(&timekeeper.lock);
+ seq = read_seqcount_begin(&timekeeper.seq);
*xtim = timekeeper.xtime;
*wtom = timekeeper.wall_to_monotonic;
*sleep = timekeeper.total_sleep_time;
- } while (read_seqretry(&timekeeper.lock, seq));
+ } while (read_seqcount_retry(&timekeeper.seq, seq));
}
/**
@@ -1255,9 +1273,9 @@ ktime_t ktime_get_monotonic_offset(void)
struct timespec wtom;
do {
- seq = read_seqbegin(&timekeeper.lock);
+ seq = read_seqcount_begin(&timekeeper.seq);
wtom = timekeeper.wall_to_monotonic;
- } while (read_seqretry(&timekeeper.lock, seq));
+ } while (read_seqcount_retry(&timekeeper.seq, seq));
return timespec_to_ktime(wtom);
}
@@ -1272,7 +1290,9 @@ EXPORT_SYMBOL_GPL(ktime_get_monotonic_offset);
*/
void xtime_update(unsigned long ticks)
{
- write_seqlock(&xtime_lock);
+ raw_spin_lock(&xtime_lock);
+ write_seqcount_begin(&xtime_seq);
do_timer(ticks);
- write_sequnlock(&xtime_lock);
+ write_seqcount_end(&xtime_seq);
+ raw_spin_unlock(&xtime_lock);
}
diff --git a/kernel/timer.c b/kernel/timer.c
index a297ffcf888e..5413ff6f98c8 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -75,6 +75,7 @@ struct tvec_root {
struct tvec_base {
spinlock_t lock;
struct timer_list *running_timer;
+ wait_queue_head_t wait_for_running_timer;
unsigned long timer_jiffies;
unsigned long next_timer;
struct tvec_root tv1;
@@ -699,6 +700,36 @@ static struct tvec_base *lock_timer_base(struct timer_list *timer,
}
}
+#ifndef CONFIG_PREEMPT_RT_FULL
+static inline struct tvec_base *switch_timer_base(struct timer_list *timer,
+ struct tvec_base *old,
+ struct tvec_base *new)
+{
+ /* See the comment in lock_timer_base() */
+ timer_set_base(timer, NULL);
+ spin_unlock(&old->lock);
+ spin_lock(&new->lock);
+ timer_set_base(timer, new);
+ return new;
+}
+#else
+static inline struct tvec_base *switch_timer_base(struct timer_list *timer,
+ struct tvec_base *old,
+ struct tvec_base *new)
+{
+ /*
+ * We cannot do the above because we might be preempted and
+ * then the preempter would see NULL and loop forever.
+ */
+ if (spin_trylock(&new->lock)) {
+ timer_set_base(timer, new);
+ spin_unlock(&old->lock);
+ return new;
+ }
+ return old;
+}
+#endif
+
static inline int
__mod_timer(struct timer_list *timer, unsigned long expires,
bool pending_only, int pinned)
@@ -725,12 +756,15 @@ __mod_timer(struct timer_list *timer, unsigned long expires,
debug_activate(timer, expires);
+ preempt_disable_rt();
cpu = smp_processor_id();
#if defined(CONFIG_NO_HZ) && defined(CONFIG_SMP)
if (!pinned && get_sysctl_timer_migration() && idle_cpu(cpu))
cpu = get_nohz_timer_target();
#endif
+ preempt_enable_rt();
+
new_base = per_cpu(tvec_bases, cpu);
if (base != new_base) {
@@ -741,14 +775,8 @@ __mod_timer(struct timer_list *timer, unsigned long expires,
* handler yet has not finished. This also guarantees that
* the timer is serialized wrt itself.
*/
- if (likely(base->running_timer != timer)) {
- /* See the comment in lock_timer_base() */
- timer_set_base(timer, NULL);
- spin_unlock(&base->lock);
- base = new_base;
- spin_lock(&base->lock);
- timer_set_base(timer, base);
- }
+ if (likely(base->running_timer != timer))
+ base = switch_timer_base(timer, base, new_base);
}
timer->expires = expires;
@@ -931,6 +959,29 @@ void add_timer_on(struct timer_list *timer, int cpu)
}
EXPORT_SYMBOL_GPL(add_timer_on);
+#ifdef CONFIG_PREEMPT_RT_FULL
+/*
+ * Wait for a running timer
+ */
+static void wait_for_running_timer(struct timer_list *timer)
+{
+ struct tvec_base *base = timer->base;
+
+ if (base->running_timer == timer)
+ wait_event(base->wait_for_running_timer,
+ base->running_timer != timer);
+}
+
+# define wakeup_timer_waiters(b) wake_up(&(b)->wait_for_tunning_timer)
+#else
+static inline void wait_for_running_timer(struct timer_list *timer)
+{
+ cpu_relax();
+}
+
+# define wakeup_timer_waiters(b) do { } while (0)
+#endif
+
/**
* del_timer - deactive a timer.
* @timer: the timer to be deactivated
@@ -1003,7 +1054,7 @@ out:
}
EXPORT_SYMBOL(try_to_del_timer_sync);
-#ifdef CONFIG_SMP
+#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL)
/**
* del_timer_sync - deactivate a timer and wait for the handler to finish.
* @timer: the timer to be deactivated
@@ -1063,7 +1114,7 @@ int del_timer_sync(struct timer_list *timer)
int ret = try_to_del_timer_sync(timer);
if (ret >= 0)
return ret;
- cpu_relax();
+ wait_for_running_timer(timer);
}
}
EXPORT_SYMBOL(del_timer_sync);
@@ -1174,10 +1225,11 @@ static inline void __run_timers(struct tvec_base *base)
spin_unlock_irq(&base->lock);
call_timer_fn(timer, fn, data);
+ base->running_timer = NULL;
spin_lock_irq(&base->lock);
}
}
- base->running_timer = NULL;
+ wake_up(&base->wait_for_running_timer);
spin_unlock_irq(&base->lock);
}
@@ -1316,6 +1368,23 @@ unsigned long get_next_timer_interrupt(unsigned long now)
*/
if (cpu_is_offline(smp_processor_id()))
return now + NEXT_TIMER_MAX_DELTA;
+
+#ifdef CONFIG_PREEMPT_RT_FULL
+ /*
+ * On PREEMPT_RT we cannot sleep here. If the trylock does not
+ * succeed then we return the worst-case 'expires in 1 tick'
+ * value. We use the rt functions here directly to avoid a
+ * migrate_disable() call.
+ */
+ if (spin_do_trylock(&base->lock)) {
+ if (time_before_eq(base->next_timer, base->timer_jiffies))
+ base->next_timer = __next_timer_interrupt(base);
+ expires = base->next_timer;
+ rt_spin_unlock(&base->lock);
+ } else {
+ expires = now + 1;
+ }
+#else
spin_lock(&base->lock);
if (time_before_eq(base->next_timer, base->timer_jiffies))
base->next_timer = __next_timer_interrupt(base);
@@ -1324,7 +1393,7 @@ unsigned long get_next_timer_interrupt(unsigned long now)
if (time_before_eq(expires, now))
return now;
-
+#endif
return cmp_next_hrtimer_event(now, expires);
}
#endif
@@ -1340,14 +1409,13 @@ void update_process_times(int user_tick)
/* Note: this timer irq context must be accounted for as well. */
account_process_tick(p, user_tick);
+ scheduler_tick();
run_local_timers();
rcu_check_callbacks(cpu, user_tick);
- printk_tick();
-#ifdef CONFIG_IRQ_WORK
+#if defined(CONFIG_IRQ_WORK) && !defined(CONFIG_PREEMPT_RT_FULL)
if (in_irq())
irq_work_run();
#endif
- scheduler_tick();
run_posix_cpu_timers(p);
}
@@ -1358,6 +1426,11 @@ static void run_timer_softirq(struct softirq_action *h)
{
struct tvec_base *base = __this_cpu_read(tvec_bases);
+#if defined(CONFIG_IRQ_WORK) && defined(CONFIG_PREEMPT_RT_FULL)
+ irq_work_run();
+#endif
+
+ printk_tick();
hrtimer_run_pending();
if (time_after_eq(jiffies, base->timer_jiffies))
@@ -1684,6 +1757,7 @@ static int __cpuinit init_timers_cpu(int cpu)
}
spin_lock_init(&base->lock);
+ init_waitqueue_head(&base->wait_for_running_timer);
for (j = 0; j < TVN_SIZE; j++) {
INIT_LIST_HEAD(base->tv5.vec + j);
@@ -1723,7 +1797,7 @@ static void __cpuinit migrate_timers(int cpu)
BUG_ON(cpu_online(cpu));
old_base = per_cpu(tvec_bases, cpu);
- new_base = get_cpu_var(tvec_bases);
+ new_base = get_local_var(tvec_bases);
/*
* The caller is globally serialized and nobody else
* takes two locks at once, deadlock is not possible.
@@ -1744,7 +1818,7 @@ static void __cpuinit migrate_timers(int cpu)
spin_unlock(&old_base->lock);
spin_unlock_irq(&new_base->lock);
- put_cpu_var(tvec_bases);
+ put_local_var(tvec_bases);
}
#endif /* CONFIG_HOTPLUG_CPU */
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index a1d2849f2473..23c737e59ece 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -192,6 +192,24 @@ config IRQSOFF_TRACER
enabled. This option and the preempt-off timing option can be
used together or separately.)
+config INTERRUPT_OFF_HIST
+ bool "Interrupts-off Latency Histogram"
+ depends on IRQSOFF_TRACER
+ help
+ This option generates continuously updated histograms (one per cpu)
+ of the duration of time periods with interrupts disabled. The
+ histograms are disabled by default. To enable them, write a non-zero
+ number to
+
+ /sys/kernel/debug/tracing/latency_hist/enable/preemptirqsoff
+
+ If PREEMPT_OFF_HIST is also selected, additional histograms (one
+ per cpu) are generated that accumulate the duration of time periods
+ when both interrupts and preemption are disabled. The histogram data
+ will be located in the debug file system at
+
+ /sys/kernel/debug/tracing/latency_hist/irqsoff
+
config PREEMPT_TRACER
bool "Preemption-off Latency Tracer"
default n
@@ -214,6 +232,24 @@ config PREEMPT_TRACER
enabled. This option and the irqs-off timing option can be
used together or separately.)
+config PREEMPT_OFF_HIST
+ bool "Preemption-off Latency Histogram"
+ depends on PREEMPT_TRACER
+ help
+ This option generates continuously updated histograms (one per cpu)
+ of the duration of time periods with preemption disabled. The
+ histograms are disabled by default. To enable them, write a non-zero
+ number to
+
+ /sys/kernel/debug/tracing/latency_hist/enable/preemptirqsoff
+
+ If INTERRUPT_OFF_HIST is also selected, additional histograms (one
+ per cpu) are generated that accumulate the duration of time periods
+ when both interrupts and preemption are disabled. The histogram data
+ will be located in the debug file system at
+
+ /sys/kernel/debug/tracing/latency_hist/preemptoff
+
config SCHED_TRACER
bool "Scheduling Latency Tracer"
select GENERIC_TRACER
@@ -223,6 +259,74 @@ config SCHED_TRACER
This tracer tracks the latency of the highest priority task
to be scheduled in, starting from the point it has woken up.
+config WAKEUP_LATENCY_HIST
+ bool "Scheduling Latency Histogram"
+ depends on SCHED_TRACER
+ help
+ This option generates continuously updated histograms (one per cpu)
+ of the scheduling latency of the highest priority task.
+ The histograms are disabled by default. To enable them, write a
+ non-zero number to
+
+ /sys/kernel/debug/tracing/latency_hist/enable/wakeup
+
+ Two different algorithms are used, one to determine the latency of
+ processes that exclusively use the highest priority of the system and
+ another one to determine the latency of processes that share the
+ highest system priority with other processes. The former is used to
+ improve hardware and system software, the latter to optimize the
+ priority design of a given system. The histogram data will be
+ located in the debug file system at
+
+ /sys/kernel/debug/tracing/latency_hist/wakeup
+
+ and
+
+ /sys/kernel/debug/tracing/latency_hist/wakeup/sharedprio
+
+ If both Scheduling Latency Histogram and Missed Timer Offsets
+ Histogram are selected, additional histogram data will be collected
+ that contain, in addition to the wakeup latency, the timer latency, in
+ case the wakeup was triggered by an expired timer. These histograms
+ are available in the
+
+ /sys/kernel/debug/tracing/latency_hist/timerandwakeup
+
+ directory. They reflect the apparent interrupt and scheduling latency
+ and are best suitable to determine the worst-case latency of a given
+ system. To enable these histograms, write a non-zero number to
+
+ /sys/kernel/debug/tracing/latency_hist/enable/timerandwakeup
+
+config MISSED_TIMER_OFFSETS_HIST
+ depends on HIGH_RES_TIMERS
+ select GENERIC_TRACER
+ bool "Missed Timer Offsets Histogram"
+ help
+ Generate a histogram of missed timer offsets in microseconds. The
+ histograms are disabled by default. To enable them, write a non-zero
+ number to
+
+ /sys/kernel/debug/tracing/latency_hist/enable/missed_timer_offsets
+
+ The histogram data will be located in the debug file system at
+
+ /sys/kernel/debug/tracing/latency_hist/missed_timer_offsets
+
+ If both Scheduling Latency Histogram and Missed Timer Offsets
+ Histogram are selected, additional histogram data will be collected
+ that contain, in addition to the wakeup latency, the timer latency, in
+ case the wakeup was triggered by an expired timer. These histograms
+ are available in the
+
+ /sys/kernel/debug/tracing/latency_hist/timerandwakeup
+
+ directory. They reflect the apparent interrupt and scheduling latency
+ and are best suitable to determine the worst-case latency of a given
+ system. To enable these histograms, write a non-zero number to
+
+ /sys/kernel/debug/tracing/latency_hist/enable/timerandwakeup
+
config ENABLE_DEFAULT_TRACERS
bool "Trace process context switches and events"
depends on !GENERIC_TRACER
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
index 5f39a07fe5ea..108a387e1459 100644
--- a/kernel/trace/Makefile
+++ b/kernel/trace/Makefile
@@ -36,6 +36,10 @@ obj-$(CONFIG_FUNCTION_TRACER) += trace_functions.o
obj-$(CONFIG_IRQSOFF_TRACER) += trace_irqsoff.o
obj-$(CONFIG_PREEMPT_TRACER) += trace_irqsoff.o
obj-$(CONFIG_SCHED_TRACER) += trace_sched_wakeup.o
+obj-$(CONFIG_INTERRUPT_OFF_HIST) += latency_hist.o
+obj-$(CONFIG_PREEMPT_OFF_HIST) += latency_hist.o
+obj-$(CONFIG_WAKEUP_LATENCY_HIST) += latency_hist.o
+obj-$(CONFIG_MISSED_TIMER_OFFSETS_HIST) += latency_hist.o
obj-$(CONFIG_NOP_TRACER) += trace_nop.o
obj-$(CONFIG_STACK_TRACER) += trace_stack.o
obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o
diff --git a/kernel/trace/latency_hist.c b/kernel/trace/latency_hist.c
new file mode 100644
index 000000000000..9d49fcb8962f
--- /dev/null
+++ b/kernel/trace/latency_hist.c
@@ -0,0 +1,1170 @@
+/*
+ * kernel/trace/latency_hist.c
+ *
+ * Add support for histograms of preemption-off latency and
+ * interrupt-off latency and wakeup latency, it depends on
+ * Real-Time Preemption Support.
+ *
+ * Copyright (C) 2005 MontaVista Software, Inc.
+ * Yi Yang <yyang@ch.mvista.com>
+ *
+ * Converted to work with the new latency tracer.
+ * Copyright (C) 2008 Red Hat, Inc.
+ * Steven Rostedt <srostedt@redhat.com>
+ *
+ */
+#include <linux/module.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/percpu.h>
+#include <linux/kallsyms.h>
+#include <linux/uaccess.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <asm/atomic.h>
+#include <asm/div64.h>
+
+#include "trace.h"
+#include <trace/events/sched.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/hist.h>
+
+enum {
+ IRQSOFF_LATENCY = 0,
+ PREEMPTOFF_LATENCY,
+ PREEMPTIRQSOFF_LATENCY,
+ WAKEUP_LATENCY,
+ WAKEUP_LATENCY_SHAREDPRIO,
+ MISSED_TIMER_OFFSETS,
+ TIMERANDWAKEUP_LATENCY,
+ MAX_LATENCY_TYPE,
+};
+
+#define MAX_ENTRY_NUM 10240
+
+struct hist_data {
+ atomic_t hist_mode; /* 0 log, 1 don't log */
+ long offset; /* set it to MAX_ENTRY_NUM/2 for a bipolar scale */
+ unsigned long min_lat;
+ unsigned long max_lat;
+ unsigned long long below_hist_bound_samples;
+ unsigned long long above_hist_bound_samples;
+ unsigned long long accumulate_lat;
+ unsigned long long total_samples;
+ unsigned long long hist_array[MAX_ENTRY_NUM];
+};
+
+struct enable_data {
+ int latency_type;
+ int enabled;
+};
+
+static char *latency_hist_dir_root = "latency_hist";
+
+#ifdef CONFIG_INTERRUPT_OFF_HIST
+static DEFINE_PER_CPU(struct hist_data, irqsoff_hist);
+static char *irqsoff_hist_dir = "irqsoff";
+static DEFINE_PER_CPU(cycles_t, hist_irqsoff_start);
+static DEFINE_PER_CPU(int, hist_irqsoff_counting);
+#endif
+
+#ifdef CONFIG_PREEMPT_OFF_HIST
+static DEFINE_PER_CPU(struct hist_data, preemptoff_hist);
+static char *preemptoff_hist_dir = "preemptoff";
+static DEFINE_PER_CPU(cycles_t, hist_preemptoff_start);
+static DEFINE_PER_CPU(int, hist_preemptoff_counting);
+#endif
+
+#if defined(CONFIG_PREEMPT_OFF_HIST) && defined(CONFIG_INTERRUPT_OFF_HIST)
+static DEFINE_PER_CPU(struct hist_data, preemptirqsoff_hist);
+static char *preemptirqsoff_hist_dir = "preemptirqsoff";
+static DEFINE_PER_CPU(cycles_t, hist_preemptirqsoff_start);
+static DEFINE_PER_CPU(int, hist_preemptirqsoff_counting);
+#endif
+
+#if defined(CONFIG_PREEMPT_OFF_HIST) || defined(CONFIG_INTERRUPT_OFF_HIST)
+static notrace void probe_preemptirqsoff_hist(void *v, int reason, int start);
+static struct enable_data preemptirqsoff_enabled_data = {
+ .latency_type = PREEMPTIRQSOFF_LATENCY,
+ .enabled = 0,
+};
+#endif
+
+#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
+ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
+struct maxlatproc_data {
+ char comm[FIELD_SIZEOF(struct task_struct, comm)];
+ char current_comm[FIELD_SIZEOF(struct task_struct, comm)];
+ int pid;
+ int current_pid;
+ int prio;
+ int current_prio;
+ long latency;
+ long timeroffset;
+ cycle_t timestamp;
+};
+#endif
+
+#ifdef CONFIG_WAKEUP_LATENCY_HIST
+static DEFINE_PER_CPU(struct hist_data, wakeup_latency_hist);
+static DEFINE_PER_CPU(struct hist_data, wakeup_latency_hist_sharedprio);
+static char *wakeup_latency_hist_dir = "wakeup";
+static char *wakeup_latency_hist_dir_sharedprio = "sharedprio";
+static notrace void probe_wakeup_latency_hist_start(void *v,
+ struct task_struct *p, int success);
+static notrace void probe_wakeup_latency_hist_stop(void *v,
+ struct task_struct *prev, struct task_struct *next);
+static notrace void probe_sched_migrate_task(void *,
+ struct task_struct *task, int cpu);
+static struct enable_data wakeup_latency_enabled_data = {
+ .latency_type = WAKEUP_LATENCY,
+ .enabled = 0,
+};
+static DEFINE_PER_CPU(struct maxlatproc_data, wakeup_maxlatproc);
+static DEFINE_PER_CPU(struct maxlatproc_data, wakeup_maxlatproc_sharedprio);
+static DEFINE_PER_CPU(struct task_struct *, wakeup_task);
+static DEFINE_PER_CPU(int, wakeup_sharedprio);
+static unsigned long wakeup_pid;
+#endif
+
+#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
+static DEFINE_PER_CPU(struct hist_data, missed_timer_offsets);
+static char *missed_timer_offsets_dir = "missed_timer_offsets";
+static notrace void probe_hrtimer_interrupt(void *v, int cpu,
+ long long offset, struct task_struct *curr, struct task_struct *task);
+static struct enable_data missed_timer_offsets_enabled_data = {
+ .latency_type = MISSED_TIMER_OFFSETS,
+ .enabled = 0,
+};
+static DEFINE_PER_CPU(struct maxlatproc_data, missed_timer_offsets_maxlatproc);
+static unsigned long missed_timer_offsets_pid;
+#endif
+
+#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \
+ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
+static DEFINE_PER_CPU(struct hist_data, timerandwakeup_latency_hist);
+static char *timerandwakeup_latency_hist_dir = "timerandwakeup";
+static struct enable_data timerandwakeup_enabled_data = {
+ .latency_type = TIMERANDWAKEUP_LATENCY,
+ .enabled = 0,
+};
+static DEFINE_PER_CPU(struct maxlatproc_data, timerandwakeup_maxlatproc);
+#endif
+
+void notrace latency_hist(int latency_type, int cpu, unsigned long latency,
+ unsigned long timeroffset, cycle_t stop,
+ struct task_struct *p)
+{
+ struct hist_data *my_hist;
+#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
+ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
+ struct maxlatproc_data *mp = NULL;
+#endif
+
+ if (cpu < 0 || cpu >= NR_CPUS || latency_type < 0 ||
+ latency_type >= MAX_LATENCY_TYPE)
+ return;
+
+ switch (latency_type) {
+#ifdef CONFIG_INTERRUPT_OFF_HIST
+ case IRQSOFF_LATENCY:
+ my_hist = &per_cpu(irqsoff_hist, cpu);
+ break;
+#endif
+#ifdef CONFIG_PREEMPT_OFF_HIST
+ case PREEMPTOFF_LATENCY:
+ my_hist = &per_cpu(preemptoff_hist, cpu);
+ break;
+#endif
+#if defined(CONFIG_PREEMPT_OFF_HIST) && defined(CONFIG_INTERRUPT_OFF_HIST)
+ case PREEMPTIRQSOFF_LATENCY:
+ my_hist = &per_cpu(preemptirqsoff_hist, cpu);
+ break;
+#endif
+#ifdef CONFIG_WAKEUP_LATENCY_HIST
+ case WAKEUP_LATENCY:
+ my_hist = &per_cpu(wakeup_latency_hist, cpu);
+ mp = &per_cpu(wakeup_maxlatproc, cpu);
+ break;
+ case WAKEUP_LATENCY_SHAREDPRIO:
+ my_hist = &per_cpu(wakeup_latency_hist_sharedprio, cpu);
+ mp = &per_cpu(wakeup_maxlatproc_sharedprio, cpu);
+ break;
+#endif
+#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
+ case MISSED_TIMER_OFFSETS:
+ my_hist = &per_cpu(missed_timer_offsets, cpu);
+ mp = &per_cpu(missed_timer_offsets_maxlatproc, cpu);
+ break;
+#endif
+#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \
+ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
+ case TIMERANDWAKEUP_LATENCY:
+ my_hist = &per_cpu(timerandwakeup_latency_hist, cpu);
+ mp = &per_cpu(timerandwakeup_maxlatproc, cpu);
+ break;
+#endif
+
+ default:
+ return;
+ }
+
+ latency += my_hist->offset;
+
+ if (atomic_read(&my_hist->hist_mode) == 0)
+ return;
+
+ if (latency < 0 || latency >= MAX_ENTRY_NUM) {
+ if (latency < 0)
+ my_hist->below_hist_bound_samples++;
+ else
+ my_hist->above_hist_bound_samples++;
+ } else
+ my_hist->hist_array[latency]++;
+
+ if (unlikely(latency > my_hist->max_lat ||
+ my_hist->min_lat == ULONG_MAX)) {
+#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
+ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
+ if (latency_type == WAKEUP_LATENCY ||
+ latency_type == WAKEUP_LATENCY_SHAREDPRIO ||
+ latency_type == MISSED_TIMER_OFFSETS ||
+ latency_type == TIMERANDWAKEUP_LATENCY) {
+ strncpy(mp->comm, p->comm, sizeof(mp->comm));
+ strncpy(mp->current_comm, current->comm,
+ sizeof(mp->current_comm));
+ mp->pid = task_pid_nr(p);
+ mp->current_pid = task_pid_nr(current);
+ mp->prio = p->prio;
+ mp->current_prio = current->prio;
+ mp->latency = latency;
+ mp->timeroffset = timeroffset;
+ mp->timestamp = stop;
+ }
+#endif
+ my_hist->max_lat = latency;
+ }
+ if (unlikely(latency < my_hist->min_lat))
+ my_hist->min_lat = latency;
+ my_hist->total_samples++;
+ my_hist->accumulate_lat += latency;
+}
+
+static void *l_start(struct seq_file *m, loff_t *pos)
+{
+ loff_t *index_ptr = NULL;
+ loff_t index = *pos;
+ struct hist_data *my_hist = m->private;
+
+ if (index == 0) {
+ char minstr[32], avgstr[32], maxstr[32];
+
+ atomic_dec(&my_hist->hist_mode);
+
+ if (likely(my_hist->total_samples)) {
+ unsigned long avg = (unsigned long)
+ div64_u64(my_hist->accumulate_lat,
+ my_hist->total_samples);
+ snprintf(minstr, sizeof(minstr), "%ld",
+ (long) my_hist->min_lat - my_hist->offset);
+ snprintf(avgstr, sizeof(avgstr), "%ld",
+ (long) avg - my_hist->offset);
+ snprintf(maxstr, sizeof(maxstr), "%ld",
+ (long) my_hist->max_lat - my_hist->offset);
+ } else {
+ strcpy(minstr, "<undef>");
+ strcpy(avgstr, minstr);
+ strcpy(maxstr, minstr);
+ }
+
+ seq_printf(m, "#Minimum latency: %s microseconds\n"
+ "#Average latency: %s microseconds\n"
+ "#Maximum latency: %s microseconds\n"
+ "#Total samples: %llu\n"
+ "#There are %llu samples lower than %ld"
+ " microseconds.\n"
+ "#There are %llu samples greater or equal"
+ " than %ld microseconds.\n"
+ "#usecs\t%16s\n",
+ minstr, avgstr, maxstr,
+ my_hist->total_samples,
+ my_hist->below_hist_bound_samples,
+ -my_hist->offset,
+ my_hist->above_hist_bound_samples,
+ MAX_ENTRY_NUM - my_hist->offset,
+ "samples");
+ }
+ if (index < MAX_ENTRY_NUM) {
+ index_ptr = kmalloc(sizeof(loff_t), GFP_KERNEL);
+ if (index_ptr)
+ *index_ptr = index;
+ }
+
+ return index_ptr;
+}
+
+static void *l_next(struct seq_file *m, void *p, loff_t *pos)
+{
+ loff_t *index_ptr = p;
+ struct hist_data *my_hist = m->private;
+
+ if (++*pos >= MAX_ENTRY_NUM) {
+ atomic_inc(&my_hist->hist_mode);
+ return NULL;
+ }
+ *index_ptr = *pos;
+ return index_ptr;
+}
+
+static void l_stop(struct seq_file *m, void *p)
+{
+ kfree(p);
+}
+
+static int l_show(struct seq_file *m, void *p)
+{
+ int index = *(loff_t *) p;
+ struct hist_data *my_hist = m->private;
+
+ seq_printf(m, "%6ld\t%16llu\n", index - my_hist->offset,
+ my_hist->hist_array[index]);
+ return 0;
+}
+
+static struct seq_operations latency_hist_seq_op = {
+ .start = l_start,
+ .next = l_next,
+ .stop = l_stop,
+ .show = l_show
+};
+
+static int latency_hist_open(struct inode *inode, struct file *file)
+{
+ int ret;
+
+ ret = seq_open(file, &latency_hist_seq_op);
+ if (!ret) {
+ struct seq_file *seq = file->private_data;
+ seq->private = inode->i_private;
+ }
+ return ret;
+}
+
+static struct file_operations latency_hist_fops = {
+ .open = latency_hist_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
+ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
+static void clear_maxlatprocdata(struct maxlatproc_data *mp)
+{
+ mp->comm[0] = mp->current_comm[0] = '\0';
+ mp->prio = mp->current_prio = mp->pid = mp->current_pid =
+ mp->latency = mp->timeroffset = -1;
+ mp->timestamp = 0;
+}
+#endif
+
+static void hist_reset(struct hist_data *hist)
+{
+ atomic_dec(&hist->hist_mode);
+
+ memset(hist->hist_array, 0, sizeof(hist->hist_array));
+ hist->below_hist_bound_samples = 0ULL;
+ hist->above_hist_bound_samples = 0ULL;
+ hist->min_lat = ULONG_MAX;
+ hist->max_lat = 0UL;
+ hist->total_samples = 0ULL;
+ hist->accumulate_lat = 0ULL;
+
+ atomic_inc(&hist->hist_mode);
+}
+
+static ssize_t
+latency_hist_reset(struct file *file, const char __user *a,
+ size_t size, loff_t *off)
+{
+ int cpu;
+ struct hist_data *hist = NULL;
+#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
+ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
+ struct maxlatproc_data *mp = NULL;
+#endif
+ off_t latency_type = (off_t) file->private_data;
+
+ for_each_online_cpu(cpu) {
+
+ switch (latency_type) {
+#ifdef CONFIG_PREEMPT_OFF_HIST
+ case PREEMPTOFF_LATENCY:
+ hist = &per_cpu(preemptoff_hist, cpu);
+ break;
+#endif
+#ifdef CONFIG_INTERRUPT_OFF_HIST
+ case IRQSOFF_LATENCY:
+ hist = &per_cpu(irqsoff_hist, cpu);
+ break;
+#endif
+#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST)
+ case PREEMPTIRQSOFF_LATENCY:
+ hist = &per_cpu(preemptirqsoff_hist, cpu);
+ break;
+#endif
+#ifdef CONFIG_WAKEUP_LATENCY_HIST
+ case WAKEUP_LATENCY:
+ hist = &per_cpu(wakeup_latency_hist, cpu);
+ mp = &per_cpu(wakeup_maxlatproc, cpu);
+ break;
+ case WAKEUP_LATENCY_SHAREDPRIO:
+ hist = &per_cpu(wakeup_latency_hist_sharedprio, cpu);
+ mp = &per_cpu(wakeup_maxlatproc_sharedprio, cpu);
+ break;
+#endif
+#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
+ case MISSED_TIMER_OFFSETS:
+ hist = &per_cpu(missed_timer_offsets, cpu);
+ mp = &per_cpu(missed_timer_offsets_maxlatproc, cpu);
+ break;
+#endif
+#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \
+ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
+ case TIMERANDWAKEUP_LATENCY:
+ hist = &per_cpu(timerandwakeup_latency_hist, cpu);
+ mp = &per_cpu(timerandwakeup_maxlatproc, cpu);
+ break;
+#endif
+ }
+
+ hist_reset(hist);
+#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
+ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
+ if (latency_type == WAKEUP_LATENCY ||
+ latency_type == WAKEUP_LATENCY_SHAREDPRIO ||
+ latency_type == MISSED_TIMER_OFFSETS ||
+ latency_type == TIMERANDWAKEUP_LATENCY)
+ clear_maxlatprocdata(mp);
+#endif
+ }
+
+ return size;
+}
+
+#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
+ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
+static ssize_t
+show_pid(struct file *file, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ char buf[64];
+ int r;
+ unsigned long *this_pid = file->private_data;
+
+ r = snprintf(buf, sizeof(buf), "%lu\n", *this_pid);
+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+}
+
+static ssize_t do_pid(struct file *file, const char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ char buf[64];
+ unsigned long pid;
+ unsigned long *this_pid = file->private_data;
+
+ if (cnt >= sizeof(buf))
+ return -EINVAL;
+
+ if (copy_from_user(&buf, ubuf, cnt))
+ return -EFAULT;
+
+ buf[cnt] = '\0';
+
+ if (strict_strtoul(buf, 10, &pid))
+ return(-EINVAL);
+
+ *this_pid = pid;
+
+ return cnt;
+}
+#endif
+
+#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
+ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
+static ssize_t
+show_maxlatproc(struct file *file, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ int r;
+ struct maxlatproc_data *mp = file->private_data;
+ int strmaxlen = (TASK_COMM_LEN * 2) + (8 * 8);
+ unsigned long long t;
+ unsigned long usecs, secs;
+ char *buf;
+
+ if (mp->pid == -1 || mp->current_pid == -1) {
+ buf = "(none)\n";
+ return simple_read_from_buffer(ubuf, cnt, ppos, buf,
+ strlen(buf));
+ }
+
+ buf = kmalloc(strmaxlen, GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+
+ t = ns2usecs(mp->timestamp);
+ usecs = do_div(t, USEC_PER_SEC);
+ secs = (unsigned long) t;
+ r = snprintf(buf, strmaxlen,
+ "%d %d %ld (%ld) %s <- %d %d %s %lu.%06lu\n", mp->pid,
+ MAX_RT_PRIO-1 - mp->prio, mp->latency, mp->timeroffset, mp->comm,
+ mp->current_pid, MAX_RT_PRIO-1 - mp->current_prio, mp->current_comm,
+ secs, usecs);
+ r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+ kfree(buf);
+ return r;
+}
+#endif
+
+static ssize_t
+show_enable(struct file *file, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ char buf[64];
+ struct enable_data *ed = file->private_data;
+ int r;
+
+ r = snprintf(buf, sizeof(buf), "%d\n", ed->enabled);
+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+}
+
+static ssize_t
+do_enable(struct file *file, const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ char buf[64];
+ long enable;
+ struct enable_data *ed = file->private_data;
+
+ if (cnt >= sizeof(buf))
+ return -EINVAL;
+
+ if (copy_from_user(&buf, ubuf, cnt))
+ return -EFAULT;
+
+ buf[cnt] = 0;
+
+ if (strict_strtol(buf, 10, &enable))
+ return(-EINVAL);
+
+ if ((enable && ed->enabled) || (!enable && !ed->enabled))
+ return cnt;
+
+ if (enable) {
+ int ret;
+
+ switch (ed->latency_type) {
+#if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST)
+ case PREEMPTIRQSOFF_LATENCY:
+ ret = register_trace_preemptirqsoff_hist(
+ probe_preemptirqsoff_hist, NULL);
+ if (ret) {
+ pr_info("wakeup trace: Couldn't assign "
+ "probe_preemptirqsoff_hist "
+ "to trace_preemptirqsoff_hist\n");
+ return ret;
+ }
+ break;
+#endif
+#ifdef CONFIG_WAKEUP_LATENCY_HIST
+ case WAKEUP_LATENCY:
+ ret = register_trace_sched_wakeup(
+ probe_wakeup_latency_hist_start, NULL);
+ if (ret) {
+ pr_info("wakeup trace: Couldn't assign "
+ "probe_wakeup_latency_hist_start "
+ "to trace_sched_wakeup\n");
+ return ret;
+ }
+ ret = register_trace_sched_wakeup_new(
+ probe_wakeup_latency_hist_start, NULL);
+ if (ret) {
+ pr_info("wakeup trace: Couldn't assign "
+ "probe_wakeup_latency_hist_start "
+ "to trace_sched_wakeup_new\n");
+ unregister_trace_sched_wakeup(
+ probe_wakeup_latency_hist_start, NULL);
+ return ret;
+ }
+ ret = register_trace_sched_switch(
+ probe_wakeup_latency_hist_stop, NULL);
+ if (ret) {
+ pr_info("wakeup trace: Couldn't assign "
+ "probe_wakeup_latency_hist_stop "
+ "to trace_sched_switch\n");
+ unregister_trace_sched_wakeup(
+ probe_wakeup_latency_hist_start, NULL);
+ unregister_trace_sched_wakeup_new(
+ probe_wakeup_latency_hist_start, NULL);
+ return ret;
+ }
+ ret = register_trace_sched_migrate_task(
+ probe_sched_migrate_task, NULL);
+ if (ret) {
+ pr_info("wakeup trace: Couldn't assign "
+ "probe_sched_migrate_task "
+ "to trace_sched_migrate_task\n");
+ unregister_trace_sched_wakeup(
+ probe_wakeup_latency_hist_start, NULL);
+ unregister_trace_sched_wakeup_new(
+ probe_wakeup_latency_hist_start, NULL);
+ unregister_trace_sched_switch(
+ probe_wakeup_latency_hist_stop, NULL);
+ return ret;
+ }
+ break;
+#endif
+#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
+ case MISSED_TIMER_OFFSETS:
+ ret = register_trace_hrtimer_interrupt(
+ probe_hrtimer_interrupt, NULL);
+ if (ret) {
+ pr_info("wakeup trace: Couldn't assign "
+ "probe_hrtimer_interrupt "
+ "to trace_hrtimer_interrupt\n");
+ return ret;
+ }
+ break;
+#endif
+#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \
+ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
+ case TIMERANDWAKEUP_LATENCY:
+ if (!wakeup_latency_enabled_data.enabled ||
+ !missed_timer_offsets_enabled_data.enabled)
+ return -EINVAL;
+ break;
+#endif
+ default:
+ break;
+ }
+ } else {
+ switch (ed->latency_type) {
+#if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST)
+ case PREEMPTIRQSOFF_LATENCY:
+ {
+ int cpu;
+
+ unregister_trace_preemptirqsoff_hist(
+ probe_preemptirqsoff_hist, NULL);
+ for_each_online_cpu(cpu) {
+#ifdef CONFIG_INTERRUPT_OFF_HIST
+ per_cpu(hist_irqsoff_counting,
+ cpu) = 0;
+#endif
+#ifdef CONFIG_PREEMPT_OFF_HIST
+ per_cpu(hist_preemptoff_counting,
+ cpu) = 0;
+#endif
+#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST)
+ per_cpu(hist_preemptirqsoff_counting,
+ cpu) = 0;
+#endif
+ }
+ }
+ break;
+#endif
+#ifdef CONFIG_WAKEUP_LATENCY_HIST
+ case WAKEUP_LATENCY:
+ {
+ int cpu;
+
+ unregister_trace_sched_wakeup(
+ probe_wakeup_latency_hist_start, NULL);
+ unregister_trace_sched_wakeup_new(
+ probe_wakeup_latency_hist_start, NULL);
+ unregister_trace_sched_switch(
+ probe_wakeup_latency_hist_stop, NULL);
+ unregister_trace_sched_migrate_task(
+ probe_sched_migrate_task, NULL);
+
+ for_each_online_cpu(cpu) {
+ per_cpu(wakeup_task, cpu) = NULL;
+ per_cpu(wakeup_sharedprio, cpu) = 0;
+ }
+ }
+#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
+ timerandwakeup_enabled_data.enabled = 0;
+#endif
+ break;
+#endif
+#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
+ case MISSED_TIMER_OFFSETS:
+ unregister_trace_hrtimer_interrupt(
+ probe_hrtimer_interrupt, NULL);
+#ifdef CONFIG_WAKEUP_LATENCY_HIST
+ timerandwakeup_enabled_data.enabled = 0;
+#endif
+ break;
+#endif
+ default:
+ break;
+ }
+ }
+ ed->enabled = enable;
+ return cnt;
+}
+
+static const struct file_operations latency_hist_reset_fops = {
+ .open = tracing_open_generic,
+ .write = latency_hist_reset,
+};
+
+static const struct file_operations enable_fops = {
+ .open = tracing_open_generic,
+ .read = show_enable,
+ .write = do_enable,
+};
+
+#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
+ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
+static const struct file_operations pid_fops = {
+ .open = tracing_open_generic,
+ .read = show_pid,
+ .write = do_pid,
+};
+
+static const struct file_operations maxlatproc_fops = {
+ .open = tracing_open_generic,
+ .read = show_maxlatproc,
+};
+#endif
+
+#if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST)
+static notrace void probe_preemptirqsoff_hist(void *v, int reason,
+ int starthist)
+{
+ int cpu = raw_smp_processor_id();
+ int time_set = 0;
+
+ if (starthist) {
+ cycle_t uninitialized_var(start);
+
+ if (!preempt_count() && !irqs_disabled())
+ return;
+
+#ifdef CONFIG_INTERRUPT_OFF_HIST
+ if ((reason == IRQS_OFF || reason == TRACE_START) &&
+ !per_cpu(hist_irqsoff_counting, cpu)) {
+ per_cpu(hist_irqsoff_counting, cpu) = 1;
+ start = ftrace_now(cpu);
+ time_set++;
+ per_cpu(hist_irqsoff_start, cpu) = start;
+ }
+#endif
+
+#ifdef CONFIG_PREEMPT_OFF_HIST
+ if ((reason == PREEMPT_OFF || reason == TRACE_START) &&
+ !per_cpu(hist_preemptoff_counting, cpu)) {
+ per_cpu(hist_preemptoff_counting, cpu) = 1;
+ if (!(time_set++))
+ start = ftrace_now(cpu);
+ per_cpu(hist_preemptoff_start, cpu) = start;
+ }
+#endif
+
+#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST)
+ if (per_cpu(hist_irqsoff_counting, cpu) &&
+ per_cpu(hist_preemptoff_counting, cpu) &&
+ !per_cpu(hist_preemptirqsoff_counting, cpu)) {
+ per_cpu(hist_preemptirqsoff_counting, cpu) = 1;
+ if (!time_set)
+ start = ftrace_now(cpu);
+ per_cpu(hist_preemptirqsoff_start, cpu) = start;
+ }
+#endif
+ } else {
+ cycle_t uninitialized_var(stop);
+
+#ifdef CONFIG_INTERRUPT_OFF_HIST
+ if ((reason == IRQS_ON || reason == TRACE_STOP) &&
+ per_cpu(hist_irqsoff_counting, cpu)) {
+ cycle_t start = per_cpu(hist_irqsoff_start, cpu);
+
+ stop = ftrace_now(cpu);
+ time_set++;
+ if (start && stop >= start) {
+ unsigned long latency =
+ nsecs_to_usecs(stop - start);
+
+ latency_hist(IRQSOFF_LATENCY, cpu, latency, 0,
+ stop, NULL);
+ }
+ per_cpu(hist_irqsoff_counting, cpu) = 0;
+ }
+#endif
+
+#ifdef CONFIG_PREEMPT_OFF_HIST
+ if ((reason == PREEMPT_ON || reason == TRACE_STOP) &&
+ per_cpu(hist_preemptoff_counting, cpu)) {
+ cycle_t start = per_cpu(hist_preemptoff_start, cpu);
+
+ if (!(time_set++))
+ stop = ftrace_now(cpu);
+ if (start && stop >= start) {
+ unsigned long latency =
+ nsecs_to_usecs(stop - start);
+
+ latency_hist(PREEMPTOFF_LATENCY, cpu, latency,
+ 0, stop, NULL);
+ }
+ per_cpu(hist_preemptoff_counting, cpu) = 0;
+ }
+#endif
+
+#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST)
+ if ((!per_cpu(hist_irqsoff_counting, cpu) ||
+ !per_cpu(hist_preemptoff_counting, cpu)) &&
+ per_cpu(hist_preemptirqsoff_counting, cpu)) {
+ cycle_t start = per_cpu(hist_preemptirqsoff_start, cpu);
+
+ if (!time_set)
+ stop = ftrace_now(cpu);
+ if (start && stop >= start) {
+ unsigned long latency =
+ nsecs_to_usecs(stop - start);
+ latency_hist(PREEMPTIRQSOFF_LATENCY, cpu,
+ latency, 0, stop, NULL);
+ }
+ per_cpu(hist_preemptirqsoff_counting, cpu) = 0;
+ }
+#endif
+ }
+}
+#endif
+
+#ifdef CONFIG_WAKEUP_LATENCY_HIST
+static DEFINE_RAW_SPINLOCK(wakeup_lock);
+static notrace void probe_sched_migrate_task(void *v, struct task_struct *task,
+ int cpu)
+{
+ int old_cpu = task_cpu(task);
+
+ if (cpu != old_cpu) {
+ unsigned long flags;
+ struct task_struct *cpu_wakeup_task;
+
+ raw_spin_lock_irqsave(&wakeup_lock, flags);
+
+ cpu_wakeup_task = per_cpu(wakeup_task, old_cpu);
+ if (task == cpu_wakeup_task) {
+ put_task_struct(cpu_wakeup_task);
+ per_cpu(wakeup_task, old_cpu) = NULL;
+ cpu_wakeup_task = per_cpu(wakeup_task, cpu) = task;
+ get_task_struct(cpu_wakeup_task);
+ }
+
+ raw_spin_unlock_irqrestore(&wakeup_lock, flags);
+ }
+}
+
+static notrace void probe_wakeup_latency_hist_start(void *v,
+ struct task_struct *p, int success)
+{
+ unsigned long flags;
+ struct task_struct *curr = current;
+ int cpu = task_cpu(p);
+ struct task_struct *cpu_wakeup_task;
+
+ raw_spin_lock_irqsave(&wakeup_lock, flags);
+
+ cpu_wakeup_task = per_cpu(wakeup_task, cpu);
+
+ if (wakeup_pid) {
+ if ((cpu_wakeup_task && p->prio == cpu_wakeup_task->prio) ||
+ p->prio == curr->prio)
+ per_cpu(wakeup_sharedprio, cpu) = 1;
+ if (likely(wakeup_pid != task_pid_nr(p)))
+ goto out;
+ } else {
+ if (likely(!rt_task(p)) ||
+ (cpu_wakeup_task && p->prio > cpu_wakeup_task->prio) ||
+ p->prio > curr->prio)
+ goto out;
+ if ((cpu_wakeup_task && p->prio == cpu_wakeup_task->prio) ||
+ p->prio == curr->prio)
+ per_cpu(wakeup_sharedprio, cpu) = 1;
+ }
+
+ if (cpu_wakeup_task)
+ put_task_struct(cpu_wakeup_task);
+ cpu_wakeup_task = per_cpu(wakeup_task, cpu) = p;
+ get_task_struct(cpu_wakeup_task);
+ cpu_wakeup_task->preempt_timestamp_hist =
+ ftrace_now(raw_smp_processor_id());
+out:
+ raw_spin_unlock_irqrestore(&wakeup_lock, flags);
+}
+
+static notrace void probe_wakeup_latency_hist_stop(void *v,
+ struct task_struct *prev, struct task_struct *next)
+{
+ unsigned long flags;
+ int cpu = task_cpu(next);
+ unsigned long latency;
+ cycle_t stop;
+ struct task_struct *cpu_wakeup_task;
+
+ raw_spin_lock_irqsave(&wakeup_lock, flags);
+
+ cpu_wakeup_task = per_cpu(wakeup_task, cpu);
+
+ if (cpu_wakeup_task == NULL)
+ goto out;
+
+ /* Already running? */
+ if (unlikely(current == cpu_wakeup_task))
+ goto out_reset;
+
+ if (next != cpu_wakeup_task) {
+ if (next->prio < cpu_wakeup_task->prio)
+ goto out_reset;
+
+ if (next->prio == cpu_wakeup_task->prio)
+ per_cpu(wakeup_sharedprio, cpu) = 1;
+
+ goto out;
+ }
+
+ /*
+ * The task we are waiting for is about to be switched to.
+ * Calculate latency and store it in histogram.
+ */
+ stop = ftrace_now(raw_smp_processor_id());
+
+ latency = nsecs_to_usecs(stop - next->preempt_timestamp_hist);
+
+ if (per_cpu(wakeup_sharedprio, cpu)) {
+ latency_hist(WAKEUP_LATENCY_SHAREDPRIO, cpu, latency, 0, stop,
+ next);
+ per_cpu(wakeup_sharedprio, cpu) = 0;
+ } else {
+ latency_hist(WAKEUP_LATENCY, cpu, latency, 0, stop, next);
+#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
+ if (timerandwakeup_enabled_data.enabled) {
+ latency_hist(TIMERANDWAKEUP_LATENCY, cpu,
+ next->timer_offset + latency, next->timer_offset,
+ stop, next);
+ }
+#endif
+ }
+
+out_reset:
+#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
+ next->timer_offset = 0;
+#endif
+ put_task_struct(cpu_wakeup_task);
+ per_cpu(wakeup_task, cpu) = NULL;
+out:
+ raw_spin_unlock_irqrestore(&wakeup_lock, flags);
+}
+#endif
+
+#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
+static notrace void probe_hrtimer_interrupt(void *v, int cpu,
+ long long latency_ns, struct task_struct *curr, struct task_struct *task)
+{
+ if (latency_ns <= 0 && task != NULL && rt_task(task) &&
+ (task->prio < curr->prio ||
+ (task->prio == curr->prio &&
+ !cpumask_test_cpu(cpu, &task->cpus_allowed)))) {
+ unsigned long latency;
+ cycle_t now;
+
+ if (missed_timer_offsets_pid) {
+ if (likely(missed_timer_offsets_pid !=
+ task_pid_nr(task)))
+ return;
+ }
+
+ now = ftrace_now(cpu);
+ latency = (unsigned long) div_s64(-latency_ns, 1000);
+ latency_hist(MISSED_TIMER_OFFSETS, cpu, latency, latency, now,
+ task);
+#ifdef CONFIG_WAKEUP_LATENCY_HIST
+ task->timer_offset = latency;
+#endif
+ }
+}
+#endif
+
+static __init int latency_hist_init(void)
+{
+ struct dentry *latency_hist_root = NULL;
+ struct dentry *dentry;
+#ifdef CONFIG_WAKEUP_LATENCY_HIST
+ struct dentry *dentry_sharedprio;
+#endif
+ struct dentry *entry;
+ struct dentry *enable_root;
+ int i = 0;
+ struct hist_data *my_hist;
+ char name[64];
+ char *cpufmt = "CPU%d";
+#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
+ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
+ char *cpufmt_maxlatproc = "max_latency-CPU%d";
+ struct maxlatproc_data *mp = NULL;
+#endif
+
+ dentry = tracing_init_dentry();
+ latency_hist_root = debugfs_create_dir(latency_hist_dir_root, dentry);
+ enable_root = debugfs_create_dir("enable", latency_hist_root);
+
+#ifdef CONFIG_INTERRUPT_OFF_HIST
+ dentry = debugfs_create_dir(irqsoff_hist_dir, latency_hist_root);
+ for_each_possible_cpu(i) {
+ sprintf(name, cpufmt, i);
+ entry = debugfs_create_file(name, 0444, dentry,
+ &per_cpu(irqsoff_hist, i), &latency_hist_fops);
+ my_hist = &per_cpu(irqsoff_hist, i);
+ atomic_set(&my_hist->hist_mode, 1);
+ my_hist->min_lat = 0xFFFFFFFFUL;
+ }
+ entry = debugfs_create_file("reset", 0644, dentry,
+ (void *)IRQSOFF_LATENCY, &latency_hist_reset_fops);
+#endif
+
+#ifdef CONFIG_PREEMPT_OFF_HIST
+ dentry = debugfs_create_dir(preemptoff_hist_dir,
+ latency_hist_root);
+ for_each_possible_cpu(i) {
+ sprintf(name, cpufmt, i);
+ entry = debugfs_create_file(name, 0444, dentry,
+ &per_cpu(preemptoff_hist, i), &latency_hist_fops);
+ my_hist = &per_cpu(preemptoff_hist, i);
+ atomic_set(&my_hist->hist_mode, 1);
+ my_hist->min_lat = 0xFFFFFFFFUL;
+ }
+ entry = debugfs_create_file("reset", 0644, dentry,
+ (void *)PREEMPTOFF_LATENCY, &latency_hist_reset_fops);
+#endif
+
+#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST)
+ dentry = debugfs_create_dir(preemptirqsoff_hist_dir,
+ latency_hist_root);
+ for_each_possible_cpu(i) {
+ sprintf(name, cpufmt, i);
+ entry = debugfs_create_file(name, 0444, dentry,
+ &per_cpu(preemptirqsoff_hist, i), &latency_hist_fops);
+ my_hist = &per_cpu(preemptirqsoff_hist, i);
+ atomic_set(&my_hist->hist_mode, 1);
+ my_hist->min_lat = 0xFFFFFFFFUL;
+ }
+ entry = debugfs_create_file("reset", 0644, dentry,
+ (void *)PREEMPTIRQSOFF_LATENCY, &latency_hist_reset_fops);
+#endif
+
+#if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST)
+ entry = debugfs_create_file("preemptirqsoff", 0644,
+ enable_root, (void *)&preemptirqsoff_enabled_data,
+ &enable_fops);
+#endif
+
+#ifdef CONFIG_WAKEUP_LATENCY_HIST
+ dentry = debugfs_create_dir(wakeup_latency_hist_dir,
+ latency_hist_root);
+ dentry_sharedprio = debugfs_create_dir(
+ wakeup_latency_hist_dir_sharedprio, dentry);
+ for_each_possible_cpu(i) {
+ sprintf(name, cpufmt, i);
+
+ entry = debugfs_create_file(name, 0444, dentry,
+ &per_cpu(wakeup_latency_hist, i),
+ &latency_hist_fops);
+ my_hist = &per_cpu(wakeup_latency_hist, i);
+ atomic_set(&my_hist->hist_mode, 1);
+ my_hist->min_lat = 0xFFFFFFFFUL;
+
+ entry = debugfs_create_file(name, 0444, dentry_sharedprio,
+ &per_cpu(wakeup_latency_hist_sharedprio, i),
+ &latency_hist_fops);
+ my_hist = &per_cpu(wakeup_latency_hist_sharedprio, i);
+ atomic_set(&my_hist->hist_mode, 1);
+ my_hist->min_lat = 0xFFFFFFFFUL;
+
+ sprintf(name, cpufmt_maxlatproc, i);
+
+ mp = &per_cpu(wakeup_maxlatproc, i);
+ entry = debugfs_create_file(name, 0444, dentry, mp,
+ &maxlatproc_fops);
+ clear_maxlatprocdata(mp);
+
+ mp = &per_cpu(wakeup_maxlatproc_sharedprio, i);
+ entry = debugfs_create_file(name, 0444, dentry_sharedprio, mp,
+ &maxlatproc_fops);
+ clear_maxlatprocdata(mp);
+ }
+ entry = debugfs_create_file("pid", 0644, dentry,
+ (void *)&wakeup_pid, &pid_fops);
+ entry = debugfs_create_file("reset", 0644, dentry,
+ (void *)WAKEUP_LATENCY, &latency_hist_reset_fops);
+ entry = debugfs_create_file("reset", 0644, dentry_sharedprio,
+ (void *)WAKEUP_LATENCY_SHAREDPRIO, &latency_hist_reset_fops);
+ entry = debugfs_create_file("wakeup", 0644,
+ enable_root, (void *)&wakeup_latency_enabled_data,
+ &enable_fops);
+#endif
+
+#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
+ dentry = debugfs_create_dir(missed_timer_offsets_dir,
+ latency_hist_root);
+ for_each_possible_cpu(i) {
+ sprintf(name, cpufmt, i);
+ entry = debugfs_create_file(name, 0444, dentry,
+ &per_cpu(missed_timer_offsets, i), &latency_hist_fops);
+ my_hist = &per_cpu(missed_timer_offsets, i);
+ atomic_set(&my_hist->hist_mode, 1);
+ my_hist->min_lat = 0xFFFFFFFFUL;
+
+ sprintf(name, cpufmt_maxlatproc, i);
+ mp = &per_cpu(missed_timer_offsets_maxlatproc, i);
+ entry = debugfs_create_file(name, 0444, dentry, mp,
+ &maxlatproc_fops);
+ clear_maxlatprocdata(mp);
+ }
+ entry = debugfs_create_file("pid", 0644, dentry,
+ (void *)&missed_timer_offsets_pid, &pid_fops);
+ entry = debugfs_create_file("reset", 0644, dentry,
+ (void *)MISSED_TIMER_OFFSETS, &latency_hist_reset_fops);
+ entry = debugfs_create_file("missed_timer_offsets", 0644,
+ enable_root, (void *)&missed_timer_offsets_enabled_data,
+ &enable_fops);
+#endif
+
+#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \
+ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
+ dentry = debugfs_create_dir(timerandwakeup_latency_hist_dir,
+ latency_hist_root);
+ for_each_possible_cpu(i) {
+ sprintf(name, cpufmt, i);
+ entry = debugfs_create_file(name, 0444, dentry,
+ &per_cpu(timerandwakeup_latency_hist, i),
+ &latency_hist_fops);
+ my_hist = &per_cpu(timerandwakeup_latency_hist, i);
+ atomic_set(&my_hist->hist_mode, 1);
+ my_hist->min_lat = 0xFFFFFFFFUL;
+
+ sprintf(name, cpufmt_maxlatproc, i);
+ mp = &per_cpu(timerandwakeup_maxlatproc, i);
+ entry = debugfs_create_file(name, 0444, dentry, mp,
+ &maxlatproc_fops);
+ clear_maxlatprocdata(mp);
+ }
+ entry = debugfs_create_file("reset", 0644, dentry,
+ (void *)TIMERANDWAKEUP_LATENCY, &latency_hist_reset_fops);
+ entry = debugfs_create_file("timerandwakeup", 0644,
+ enable_root, (void *)&timerandwakeup_enabled_data,
+ &enable_fops);
+#endif
+ return 0;
+}
+
+__initcall(latency_hist_init);
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index cf8d11e91efd..e59be4178536 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -446,7 +446,7 @@ struct ring_buffer_per_cpu {
int cpu;
atomic_t record_disabled;
struct ring_buffer *buffer;
- raw_spinlock_t reader_lock; /* serialize readers */
+ spinlock_t reader_lock; /* serialize readers */
arch_spinlock_t lock;
struct lock_class_key lock_key;
struct list_head *pages;
@@ -1017,6 +1017,44 @@ static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
return -ENOMEM;
}
+static inline int ok_to_lock(void)
+{
+ if (in_nmi())
+ return 0;
+#ifdef CONFIG_PREEMPT_RT_FULL
+ if (in_atomic() || irqs_disabled())
+ return 0;
+#endif
+ return 1;
+}
+
+static int
+read_buffer_lock(struct ring_buffer_per_cpu *cpu_buffer,
+ unsigned long *flags)
+{
+ /*
+ * If an NMI die dumps out the content of the ring buffer
+ * do not grab locks. We also permanently disable the ring
+ * buffer too. A one time deal is all you get from reading
+ * the ring buffer from an NMI.
+ */
+ if (!ok_to_lock()) {
+ if (spin_trylock_irqsave(&cpu_buffer->reader_lock, *flags))
+ return 1;
+ tracing_off_permanent();
+ return 0;
+ }
+ spin_lock_irqsave(&cpu_buffer->reader_lock, *flags);
+ return 1;
+}
+
+static void
+read_buffer_unlock(struct ring_buffer_per_cpu *cpu_buffer,
+ unsigned long flags, int locked)
+{
+ if (locked)
+ spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
+}
static struct ring_buffer_per_cpu *
rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
{
@@ -1032,7 +1070,7 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
cpu_buffer->cpu = cpu;
cpu_buffer->buffer = buffer;
- raw_spin_lock_init(&cpu_buffer->reader_lock);
+ spin_lock_init(&cpu_buffer->reader_lock);
lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
@@ -1227,9 +1265,11 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
{
struct buffer_page *bpage;
struct list_head *p;
+ unsigned long flags;
unsigned i;
+ int locked;
- raw_spin_lock_irq(&cpu_buffer->reader_lock);
+ locked = read_buffer_lock(cpu_buffer, &flags);
rb_head_page_deactivate(cpu_buffer);
for (i = 0; i < nr_pages; i++) {
@@ -1247,7 +1287,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
rb_check_pages(cpu_buffer);
out:
- raw_spin_unlock_irq(&cpu_buffer->reader_lock);
+ read_buffer_unlock(cpu_buffer, flags, locked);
}
static void
@@ -1256,9 +1296,11 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
{
struct buffer_page *bpage;
struct list_head *p;
+ unsigned long flags;
unsigned i;
+ int locked;
- raw_spin_lock_irq(&cpu_buffer->reader_lock);
+ locked = read_buffer_lock(cpu_buffer, &flags);
rb_head_page_deactivate(cpu_buffer);
for (i = 0; i < nr_pages; i++) {
@@ -1273,7 +1315,7 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
rb_check_pages(cpu_buffer);
out:
- raw_spin_unlock_irq(&cpu_buffer->reader_lock);
+ read_buffer_unlock(cpu_buffer, flags, locked);
}
/**
@@ -2714,7 +2756,7 @@ unsigned long ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu)
return 0;
cpu_buffer = buffer->buffers[cpu];
- raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
+ spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
/*
* if the tail is on reader_page, oldest time stamp is on the reader
* page
@@ -2724,7 +2766,7 @@ unsigned long ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu)
else
bpage = rb_set_head_page(cpu_buffer);
ret = bpage->page->time_stamp;
- raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
+ spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
return ret;
}
@@ -2888,15 +2930,16 @@ void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
{
struct ring_buffer_per_cpu *cpu_buffer;
unsigned long flags;
+ int locked;
if (!iter)
return;
cpu_buffer = iter->cpu_buffer;
- raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
+ locked = read_buffer_lock(cpu_buffer, &flags);
rb_iter_reset(iter);
- raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
+ read_buffer_unlock(cpu_buffer, flags, locked);
}
EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
@@ -3314,21 +3357,6 @@ rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
}
EXPORT_SYMBOL_GPL(ring_buffer_iter_peek);
-static inline int rb_ok_to_lock(void)
-{
- /*
- * If an NMI die dumps out the content of the ring buffer
- * do not grab locks. We also permanently disable the ring
- * buffer too. A one time deal is all you get from reading
- * the ring buffer from an NMI.
- */
- if (likely(!in_nmi()))
- return 1;
-
- tracing_off_permanent();
- return 0;
-}
-
/**
* ring_buffer_peek - peek at the next event to be read
* @buffer: The ring buffer to read
@@ -3346,22 +3374,17 @@ ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts,
struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
struct ring_buffer_event *event;
unsigned long flags;
- int dolock;
+ int locked;
if (!cpumask_test_cpu(cpu, buffer->cpumask))
return NULL;
- dolock = rb_ok_to_lock();
again:
- local_irq_save(flags);
- if (dolock)
- raw_spin_lock(&cpu_buffer->reader_lock);
+ locked = read_buffer_lock(cpu_buffer, &flags);
event = rb_buffer_peek(cpu_buffer, ts, lost_events);
if (event && event->type_len == RINGBUF_TYPE_PADDING)
rb_advance_reader(cpu_buffer);
- if (dolock)
- raw_spin_unlock(&cpu_buffer->reader_lock);
- local_irq_restore(flags);
+ read_buffer_unlock(cpu_buffer, flags, locked);
if (event && event->type_len == RINGBUF_TYPE_PADDING)
goto again;
@@ -3383,11 +3406,12 @@ ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
struct ring_buffer_event *event;
unsigned long flags;
+ int locked;
again:
- raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
+ locked = read_buffer_lock(cpu_buffer, &flags);
event = rb_iter_peek(iter, ts);
- raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
+ read_buffer_unlock(cpu_buffer, flags, locked);
if (event && event->type_len == RINGBUF_TYPE_PADDING)
goto again;
@@ -3413,9 +3437,7 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts,
struct ring_buffer_per_cpu *cpu_buffer;
struct ring_buffer_event *event = NULL;
unsigned long flags;
- int dolock;
-
- dolock = rb_ok_to_lock();
+ int locked;
again:
/* might be called in atomic */
@@ -3425,9 +3447,7 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts,
goto out;
cpu_buffer = buffer->buffers[cpu];
- local_irq_save(flags);
- if (dolock)
- raw_spin_lock(&cpu_buffer->reader_lock);
+ locked = read_buffer_lock(cpu_buffer, &flags);
event = rb_buffer_peek(cpu_buffer, ts, lost_events);
if (event) {
@@ -3435,9 +3455,8 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts,
rb_advance_reader(cpu_buffer);
}
- if (dolock)
- raw_spin_unlock(&cpu_buffer->reader_lock);
- local_irq_restore(flags);
+ read_buffer_unlock(cpu_buffer, flags, locked);
+
out:
preempt_enable();
@@ -3522,17 +3541,18 @@ ring_buffer_read_start(struct ring_buffer_iter *iter)
{
struct ring_buffer_per_cpu *cpu_buffer;
unsigned long flags;
+ int locked;
if (!iter)
return;
cpu_buffer = iter->cpu_buffer;
- raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
+ locked = read_buffer_lock(cpu_buffer, &flags);
arch_spin_lock(&cpu_buffer->lock);
rb_iter_reset(iter);
arch_spin_unlock(&cpu_buffer->lock);
- raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
+ read_buffer_unlock(cpu_buffer, flags, locked);
}
EXPORT_SYMBOL_GPL(ring_buffer_read_start);
@@ -3566,8 +3586,9 @@ ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
struct ring_buffer_event *event;
struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
unsigned long flags;
+ int locked;
- raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
+ locked = read_buffer_lock(cpu_buffer, &flags);
again:
event = rb_iter_peek(iter, ts);
if (!event)
@@ -3578,7 +3599,7 @@ ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
rb_advance_iter(iter);
out:
- raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
+ read_buffer_unlock(cpu_buffer, flags, locked);
return event;
}
@@ -3643,13 +3664,14 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
{
struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
unsigned long flags;
+ int locked;
if (!cpumask_test_cpu(cpu, buffer->cpumask))
return;
atomic_inc(&cpu_buffer->record_disabled);
- raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
+ locked = read_buffer_lock(cpu_buffer, &flags);
if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing)))
goto out;
@@ -3661,7 +3683,7 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
arch_spin_unlock(&cpu_buffer->lock);
out:
- raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
+ read_buffer_unlock(cpu_buffer, flags, locked);
atomic_dec(&cpu_buffer->record_disabled);
}
@@ -3688,22 +3710,16 @@ int ring_buffer_empty(struct ring_buffer *buffer)
{
struct ring_buffer_per_cpu *cpu_buffer;
unsigned long flags;
- int dolock;
+ int locked;
int cpu;
int ret;
- dolock = rb_ok_to_lock();
-
/* yes this is racy, but if you don't like the race, lock the buffer */
for_each_buffer_cpu(buffer, cpu) {
cpu_buffer = buffer->buffers[cpu];
- local_irq_save(flags);
- if (dolock)
- raw_spin_lock(&cpu_buffer->reader_lock);
+ locked = read_buffer_lock(cpu_buffer, &flags);
ret = rb_per_cpu_empty(cpu_buffer);
- if (dolock)
- raw_spin_unlock(&cpu_buffer->reader_lock);
- local_irq_restore(flags);
+ read_buffer_unlock(cpu_buffer, flags, locked);
if (!ret)
return 0;
@@ -3722,22 +3738,16 @@ int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
{
struct ring_buffer_per_cpu *cpu_buffer;
unsigned long flags;
- int dolock;
+ int locked;
int ret;
if (!cpumask_test_cpu(cpu, buffer->cpumask))
return 1;
- dolock = rb_ok_to_lock();
-
cpu_buffer = buffer->buffers[cpu];
- local_irq_save(flags);
- if (dolock)
- raw_spin_lock(&cpu_buffer->reader_lock);
+ locked = read_buffer_lock(cpu_buffer, &flags);
ret = rb_per_cpu_empty(cpu_buffer);
- if (dolock)
- raw_spin_unlock(&cpu_buffer->reader_lock);
- local_irq_restore(flags);
+ read_buffer_unlock(cpu_buffer, flags, locked);
return ret;
}
@@ -3912,6 +3922,7 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
unsigned int commit;
unsigned int read;
u64 save_timestamp;
+ int locked;
int ret = -1;
if (!cpumask_test_cpu(cpu, buffer->cpumask))
@@ -3933,7 +3944,7 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
if (!bpage)
goto out;
- raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
+ locked = read_buffer_lock(cpu_buffer, &flags);
reader = rb_get_reader_page(cpu_buffer);
if (!reader)
@@ -4057,7 +4068,7 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
memset(&bpage->data[commit], 0, BUF_PAGE_SIZE - commit);
out_unlock:
- raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
+ read_buffer_unlock(cpu_buffer, flags, locked);
out:
return ret;
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index ed7b5d1e12f4..87f6121a48fd 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -414,11 +414,13 @@ EXPORT_SYMBOL_GPL(tracing_is_on);
*/
void trace_wake_up(void)
{
+#ifndef CONFIG_PREEMPT_RT_FULL
const unsigned long delay = msecs_to_jiffies(2);
if (trace_flags & TRACE_ITER_BLOCK)
return;
schedule_delayed_work(&wakeup_work, delay);
+#endif
}
static int __init set_buf_size(char *str)
@@ -775,6 +777,12 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
}
#endif /* CONFIG_TRACER_MAX_TRACE */
+#ifndef CONFIG_PREEMPT_RT_FULL
+static void default_wait_pipe(struct trace_iterator *iter);
+#else
+#define default_wait_pipe poll_wait_pipe
+#endif
+
/**
* register_tracer - register a tracer with the ftrace system.
* @type - the plugin for the tracer
@@ -1179,6 +1187,8 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
(need_resched() ? TRACE_FLAG_NEED_RESCHED : 0);
+
+ entry->migrate_disable = (tsk) ? __migrate_disabled(tsk) & 0xFF : 0;
}
EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
@@ -1937,9 +1947,10 @@ static void print_lat_help_header(struct seq_file *m)
seq_puts(m, "# | / _----=> need-resched \n");
seq_puts(m, "# || / _---=> hardirq/softirq \n");
seq_puts(m, "# ||| / _--=> preempt-depth \n");
- seq_puts(m, "# |||| / delay \n");
- seq_puts(m, "# cmd pid ||||| time | caller \n");
- seq_puts(m, "# \\ / ||||| \\ | / \n");
+ seq_puts(m, "# |||| / _--=> migrate-disable\n");
+ seq_puts(m, "# ||||| / delay \n");
+ seq_puts(m, "# cmd pid |||||| time | caller \n");
+ seq_puts(m, "# \\ / ||||| \\ | / \n");
}
static void print_event_info(struct trace_array *tr, struct seq_file *m)
@@ -3300,6 +3311,7 @@ static int tracing_release_pipe(struct inode *inode, struct file *file)
return 0;
}
+#ifndef CONFIG_PREEMPT_RT_FULL
static unsigned int
tracing_poll_pipe(struct file *filp, poll_table *poll_table)
{
@@ -3321,8 +3333,7 @@ tracing_poll_pipe(struct file *filp, poll_table *poll_table)
}
}
-
-void default_wait_pipe(struct trace_iterator *iter)
+static void default_wait_pipe(struct trace_iterator *iter)
{
DEFINE_WAIT(wait);
@@ -3333,6 +3344,20 @@ void default_wait_pipe(struct trace_iterator *iter)
finish_wait(&trace_wait, &wait);
}
+#else
+static unsigned int
+tracing_poll_pipe(struct file *filp, poll_table *poll_table)
+{
+ struct trace_iterator *iter = filp->private_data;
+
+ if ((trace_flags & TRACE_ITER_BLOCK) || !trace_empty(iter))
+ return POLLIN | POLLRDNORM;
+ poll_wait_pipe(iter);
+ if (!trace_empty(iter))
+ return POLLIN | POLLRDNORM;
+ return 0;
+}
+#endif
/*
* This is a make-shift waitqueue.
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 95059f091a24..d6e195146e05 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -354,7 +354,6 @@ void trace_init_global_iter(struct trace_iterator *iter);
void tracing_iter_reset(struct trace_iterator *iter, int cpu);
-void default_wait_pipe(struct trace_iterator *iter);
void poll_wait_pipe(struct trace_iterator *iter);
void ftrace(struct trace_array *tr,
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 079a93ae8a9d..6ae931dc3c19 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -116,7 +116,8 @@ static int trace_define_common_fields(void)
__common_field(unsigned char, flags);
__common_field(unsigned char, preempt_count);
__common_field(int, pid);
- __common_field(int, padding);
+ __common_field(unsigned short, migrate_disable);
+ __common_field(unsigned short, padding);
return ret;
}
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index 99d20e920368..384f6034236e 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -17,6 +17,7 @@
#include <linux/fs.h>
#include "trace.h"
+#include <trace/events/hist.h>
static struct trace_array *irqsoff_trace __read_mostly;
static int tracer_enabled __read_mostly;
@@ -437,11 +438,13 @@ void start_critical_timings(void)
{
if (preempt_trace() || irq_trace())
start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
+ trace_preemptirqsoff_hist(TRACE_START, 1);
}
EXPORT_SYMBOL_GPL(start_critical_timings);
void stop_critical_timings(void)
{
+ trace_preemptirqsoff_hist(TRACE_STOP, 0);
if (preempt_trace() || irq_trace())
stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
}
@@ -451,6 +454,7 @@ EXPORT_SYMBOL_GPL(stop_critical_timings);
#ifdef CONFIG_PROVE_LOCKING
void time_hardirqs_on(unsigned long a0, unsigned long a1)
{
+ trace_preemptirqsoff_hist(IRQS_ON, 0);
if (!preempt_trace() && irq_trace())
stop_critical_timing(a0, a1);
}
@@ -459,6 +463,7 @@ void time_hardirqs_off(unsigned long a0, unsigned long a1)
{
if (!preempt_trace() && irq_trace())
start_critical_timing(a0, a1);
+ trace_preemptirqsoff_hist(IRQS_OFF, 1);
}
#else /* !CONFIG_PROVE_LOCKING */
@@ -484,6 +489,7 @@ inline void print_irqtrace_events(struct task_struct *curr)
*/
void trace_hardirqs_on(void)
{
+ trace_preemptirqsoff_hist(IRQS_ON, 0);
if (!preempt_trace() && irq_trace())
stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
}
@@ -493,11 +499,13 @@ void trace_hardirqs_off(void)
{
if (!preempt_trace() && irq_trace())
start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
+ trace_preemptirqsoff_hist(IRQS_OFF, 1);
}
EXPORT_SYMBOL(trace_hardirqs_off);
void trace_hardirqs_on_caller(unsigned long caller_addr)
{
+ trace_preemptirqsoff_hist(IRQS_ON, 0);
if (!preempt_trace() && irq_trace())
stop_critical_timing(CALLER_ADDR0, caller_addr);
}
@@ -507,6 +515,7 @@ void trace_hardirqs_off_caller(unsigned long caller_addr)
{
if (!preempt_trace() && irq_trace())
start_critical_timing(CALLER_ADDR0, caller_addr);
+ trace_preemptirqsoff_hist(IRQS_OFF, 1);
}
EXPORT_SYMBOL(trace_hardirqs_off_caller);
@@ -516,12 +525,14 @@ EXPORT_SYMBOL(trace_hardirqs_off_caller);
#ifdef CONFIG_PREEMPT_TRACER
void trace_preempt_on(unsigned long a0, unsigned long a1)
{
+ trace_preemptirqsoff_hist(PREEMPT_ON, 0);
if (preempt_trace() && !irq_trace())
stop_critical_timing(a0, a1);
}
void trace_preempt_off(unsigned long a0, unsigned long a1)
{
+ trace_preemptirqsoff_hist(PREEMPT_ON, 1);
if (preempt_trace() && !irq_trace())
start_critical_timing(a0, a1);
}
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index 859fae6b1825..41b6957eb47f 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -593,6 +593,11 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
else
ret = trace_seq_putc(s, '.');
+ if (entry->migrate_disable)
+ ret = trace_seq_printf(s, "%x", entry->migrate_disable);
+ else
+ ret = trace_seq_putc(s, '.');
+
return ret;
}
diff --git a/kernel/user.c b/kernel/user.c
index 71dd2363ab0f..b831e51c3489 100644
--- a/kernel/user.c
+++ b/kernel/user.c
@@ -129,11 +129,11 @@ void free_uid(struct user_struct *up)
if (!up)
return;
- local_irq_save(flags);
+ local_irq_save_nort(flags);
if (atomic_dec_and_lock(&up->__count, &uidhash_lock))
free_user(up, flags);
else
- local_irq_restore(flags);
+ local_irq_restore_nort(flags);
}
struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid)
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index df30ee08bdd4..87192eb940c0 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -201,6 +201,8 @@ static int is_softlockup(unsigned long touch_ts)
#ifdef CONFIG_HARDLOCKUP_DETECTOR
+static DEFINE_RAW_SPINLOCK(watchdog_output_lock);
+
static struct perf_event_attr wd_hw_attr = {
.type = PERF_TYPE_HARDWARE,
.config = PERF_COUNT_HW_CPU_CYCLES,
@@ -235,10 +237,19 @@ static void watchdog_overflow_callback(struct perf_event *event,
if (__this_cpu_read(hard_watchdog_warn) == true)
return;
- if (hardlockup_panic)
+ /*
+ * If early-printk is enabled then make sure we do not
+ * lock up in printk() and kill console logging:
+ */
+ printk_kill();
+
+ if (hardlockup_panic) {
panic("Watchdog detected hard LOCKUP on cpu %d", this_cpu);
- else
+ } else {
+ raw_spin_lock(&watchdog_output_lock);
WARN(1, "Watchdog detected hard LOCKUP on cpu %d", this_cpu);
+ raw_spin_unlock(&watchdog_output_lock);
+ }
__this_cpu_write(hard_watchdog_warn, true);
return;
@@ -430,6 +441,7 @@ static void watchdog_prepare_cpu(int cpu)
WARN_ON(per_cpu(softlockup_watchdog, cpu));
hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
hrtimer->function = watchdog_timer_fn;
+ hrtimer->irqsafe = 1;
}
static int watchdog_enable(int cpu)
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 5abf42f63c08..bc867e8c5ca9 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -41,6 +41,7 @@
#include <linux/debug_locks.h>
#include <linux/lockdep.h>
#include <linux/idr.h>
+#include <linux/delay.h>
#include "workqueue_sched.h"
@@ -57,20 +58,10 @@ enum {
WORKER_DIE = 1 << 1, /* die die die */
WORKER_IDLE = 1 << 2, /* is idle */
WORKER_PREP = 1 << 3, /* preparing to run works */
- WORKER_ROGUE = 1 << 4, /* not bound to any cpu */
- WORKER_REBIND = 1 << 5, /* mom is home, come back */
- WORKER_CPU_INTENSIVE = 1 << 6, /* cpu intensive */
- WORKER_UNBOUND = 1 << 7, /* worker is unbound */
+ WORKER_CPU_INTENSIVE = 1 << 4, /* cpu intensive */
+ WORKER_UNBOUND = 1 << 5, /* worker is unbound */
- WORKER_NOT_RUNNING = WORKER_PREP | WORKER_ROGUE | WORKER_REBIND |
- WORKER_CPU_INTENSIVE | WORKER_UNBOUND,
-
- /* gcwq->trustee_state */
- TRUSTEE_START = 0, /* start */
- TRUSTEE_IN_CHARGE = 1, /* trustee in charge of gcwq */
- TRUSTEE_BUTCHER = 2, /* butcher workers */
- TRUSTEE_RELEASE = 3, /* release workers */
- TRUSTEE_DONE = 4, /* trustee is done */
+ WORKER_NOT_RUNNING = WORKER_PREP | WORKER_CPU_INTENSIVE | WORKER_UNBOUND,
BUSY_WORKER_HASH_ORDER = 6, /* 64 pointers */
BUSY_WORKER_HASH_SIZE = 1 << BUSY_WORKER_HASH_ORDER,
@@ -84,7 +75,6 @@ enum {
(min two ticks) */
MAYDAY_INTERVAL = HZ / 10, /* and then every 100ms */
CREATE_COOLDOWN = HZ, /* time to breath after fail */
- TRUSTEE_COOLDOWN = HZ / 10, /* for trustee draining */
/*
* Rescue workers are used only on emergencies and shared by
@@ -136,7 +126,7 @@ struct worker {
unsigned long last_active; /* L: last active timestamp */
unsigned int flags; /* X: flags */
int id; /* I: worker id */
- struct work_struct rebind_work; /* L: rebind worker to cpu */
+ int sleeping; /* None */
};
/*
@@ -163,10 +153,8 @@ struct global_cwq {
struct ida worker_ida; /* L: for worker IDs */
- struct task_struct *trustee; /* L: for gcwq shutdown */
- unsigned int trustee_state; /* L: trustee state */
- wait_queue_head_t trustee_wait; /* trustee wait */
struct worker *first_idle; /* L: first idle worker */
+ wait_queue_head_t idle_wait;
} ____cacheline_aligned_in_smp;
/*
@@ -655,66 +643,58 @@ static void wake_up_worker(struct global_cwq *gcwq)
}
/**
- * wq_worker_waking_up - a worker is waking up
- * @task: task waking up
- * @cpu: CPU @task is waking up to
- *
- * This function is called during try_to_wake_up() when a worker is
- * being awoken.
+ * wq_worker_running - a worker is running again
+ * @task: task returning from sleep
*
- * CONTEXT:
- * spin_lock_irq(rq->lock)
+ * This function is called when a worker returns from schedule()
*/
-void wq_worker_waking_up(struct task_struct *task, unsigned int cpu)
+void wq_worker_running(struct task_struct *task)
{
struct worker *worker = kthread_data(task);
+ if (!worker->sleeping)
+ return;
if (!(worker->flags & WORKER_NOT_RUNNING))
- atomic_inc(get_gcwq_nr_running(cpu));
+ atomic_inc(get_gcwq_nr_running(smp_processor_id()));
+ worker->sleeping = 0;
}
/**
* wq_worker_sleeping - a worker is going to sleep
* @task: task going to sleep
- * @cpu: CPU in question, must be the current CPU number
- *
- * This function is called during schedule() when a busy worker is
- * going to sleep. Worker on the same cpu can be woken up by
- * returning pointer to its task.
*
- * CONTEXT:
- * spin_lock_irq(rq->lock)
- *
- * RETURNS:
- * Worker task on @cpu to wake up, %NULL if none.
+ * This function is called from schedule() when a busy worker is
+ * going to sleep.
*/
-struct task_struct *wq_worker_sleeping(struct task_struct *task,
- unsigned int cpu)
+void wq_worker_sleeping(struct task_struct *task)
{
- struct worker *worker = kthread_data(task), *to_wakeup = NULL;
- struct global_cwq *gcwq = get_gcwq(cpu);
- atomic_t *nr_running = get_gcwq_nr_running(cpu);
+ struct worker *worker = kthread_data(task);
+ struct global_cwq *gcwq;
+ int cpu;
if (worker->flags & WORKER_NOT_RUNNING)
- return NULL;
+ return;
- /* this can only happen on the local cpu */
- BUG_ON(cpu != raw_smp_processor_id());
+ if (WARN_ON_ONCE(worker->sleeping))
+ return;
+
+ worker->sleeping = 1;
+ cpu = smp_processor_id();
+ gcwq = get_gcwq(cpu);
+ spin_lock_irq(&gcwq->lock);
/*
* The counterpart of the following dec_and_test, implied mb,
* worklist not empty test sequence is in insert_work().
* Please read comment there.
- *
- * NOT_RUNNING is clear. This means that trustee is not in
- * charge and we're running on the local cpu w/ rq lock held
- * and preemption disabled, which in turn means that none else
- * could be manipulating idle_list, so dereferencing idle_list
- * without gcwq lock is safe.
*/
- if (atomic_dec_and_test(nr_running) && !list_empty(&gcwq->worklist))
- to_wakeup = first_worker(gcwq);
- return to_wakeup ? to_wakeup->task : NULL;
+ if (atomic_dec_and_test(get_gcwq_nr_running(cpu)) &&
+ !list_empty(&gcwq->worklist)) {
+ worker = first_worker(gcwq);
+ if (worker)
+ wake_up_process(worker->task);
+ }
+ spin_unlock_irq(&gcwq->lock);
}
/**
@@ -976,13 +956,38 @@ static bool is_chained_work(struct workqueue_struct *wq)
return false;
}
-static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
- struct work_struct *work)
+static void ___queue_work(struct workqueue_struct *wq, struct global_cwq *gcwq,
+ struct work_struct *work)
{
- struct global_cwq *gcwq;
struct cpu_workqueue_struct *cwq;
struct list_head *worklist;
unsigned int work_flags;
+
+ /* gcwq determined, get cwq and queue */
+ cwq = get_cwq(gcwq->cpu, wq);
+ trace_workqueue_queue_work(gcwq->cpu, cwq, work);
+
+ BUG_ON(!list_empty(&work->entry));
+
+ cwq->nr_in_flight[cwq->work_color]++;
+ work_flags = work_color_to_flags(cwq->work_color);
+
+ if (likely(cwq->nr_active < cwq->max_active)) {
+ trace_workqueue_activate_work(work);
+ cwq->nr_active++;
+ worklist = gcwq_determine_ins_pos(gcwq, cwq);
+ } else {
+ work_flags |= WORK_STRUCT_DELAYED;
+ worklist = &cwq->delayed_works;
+ }
+
+ insert_work(cwq, work, worklist, work_flags);
+}
+
+static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
+ struct work_struct *work)
+{
+ struct global_cwq *gcwq;
unsigned long flags;
debug_work_activate(work);
@@ -1028,27 +1033,32 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
spin_lock_irqsave(&gcwq->lock, flags);
}
- /* gcwq determined, get cwq and queue */
- cwq = get_cwq(gcwq->cpu, wq);
- trace_workqueue_queue_work(cpu, cwq, work);
+ ___queue_work(wq, gcwq, work);
- BUG_ON(!list_empty(&work->entry));
+ spin_unlock_irqrestore(&gcwq->lock, flags);
+}
- cwq->nr_in_flight[cwq->work_color]++;
- work_flags = work_color_to_flags(cwq->work_color);
+/**
+ * queue_work_on - queue work on specific cpu
+ * @cpu: CPU number to execute work on
+ * @wq: workqueue to use
+ * @work: work to queue
+ *
+ * Returns 0 if @work was already on a queue, non-zero otherwise.
+ *
+ * We queue the work to a specific CPU, the caller must ensure it
+ * can't go away.
+ */
+static int
+__queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work)
+{
+ int ret = 0;
- if (likely(cwq->nr_active < cwq->max_active)) {
- trace_workqueue_activate_work(work);
- cwq->nr_active++;
- worklist = gcwq_determine_ins_pos(gcwq, cwq);
- } else {
- work_flags |= WORK_STRUCT_DELAYED;
- worklist = &cwq->delayed_works;
+ if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
+ __queue_work(cpu, wq, work);
+ ret = 1;
}
-
- insert_work(cwq, work, worklist, work_flags);
-
- spin_unlock_irqrestore(&gcwq->lock, flags);
+ return ret;
}
/**
@@ -1065,34 +1075,19 @@ int queue_work(struct workqueue_struct *wq, struct work_struct *work)
{
int ret;
- ret = queue_work_on(get_cpu(), wq, work);
- put_cpu();
+ ret = __queue_work_on(get_cpu_light(), wq, work);
+ put_cpu_light();
return ret;
}
EXPORT_SYMBOL_GPL(queue_work);
-/**
- * queue_work_on - queue work on specific cpu
- * @cpu: CPU number to execute work on
- * @wq: workqueue to use
- * @work: work to queue
- *
- * Returns 0 if @work was already on a queue, non-zero otherwise.
- *
- * We queue the work to a specific CPU, the caller must ensure it
- * can't go away.
- */
int
queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work)
{
- int ret = 0;
+ WARN_ON(wq->flags & WQ_NON_AFFINE);
- if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
- __queue_work(cpu, wq, work);
- ret = 1;
- }
- return ret;
+ return __queue_work_on(cpu, wq, work);
}
EXPORT_SYMBOL_GPL(queue_work_on);
@@ -1138,6 +1133,8 @@ int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
struct timer_list *timer = &dwork->timer;
struct work_struct *work = &dwork->work;
+ WARN_ON((wq->flags & WQ_NON_AFFINE) && cpu != -1);
+
if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
unsigned int lcpu;
@@ -1203,12 +1200,13 @@ static void worker_enter_idle(struct worker *worker)
/* idle_list is LIFO */
list_add(&worker->entry, &gcwq->idle_list);
- if (likely(!(worker->flags & WORKER_ROGUE))) {
- if (too_many_workers(gcwq) && !timer_pending(&gcwq->idle_timer))
- mod_timer(&gcwq->idle_timer,
- jiffies + IDLE_WORKER_TIMEOUT);
- } else
- wake_up_all(&gcwq->trustee_wait);
+ if (gcwq->nr_idle == gcwq->nr_workers)
+ wake_up_all(&gcwq->idle_wait);
+
+ if (too_many_workers(gcwq) && !timer_pending(&gcwq->idle_timer)) {
+ mod_timer(&gcwq->idle_timer,
+ jiffies + IDLE_WORKER_TIMEOUT);
+ }
/* sanity check nr_running */
WARN_ON_ONCE(gcwq->nr_workers == gcwq->nr_idle &&
@@ -1285,8 +1283,14 @@ __acquires(&gcwq->lock)
return false;
if (task_cpu(task) == gcwq->cpu &&
cpumask_equal(&current->cpus_allowed,
- get_cpu_mask(gcwq->cpu)))
+ get_cpu_mask(gcwq->cpu))) {
+ /*
+ * Since we're binding to a particular cpu and need to
+ * stay there for correctness, mark us PF_THREAD_BOUND.
+ */
+ task->flags |= PF_THREAD_BOUND;
return true;
+ }
spin_unlock_irq(&gcwq->lock);
/*
@@ -1300,20 +1304,15 @@ __acquires(&gcwq->lock)
}
}
-/*
- * Function for worker->rebind_work used to rebind rogue busy workers
- * to the associated cpu which is coming back online. This is
- * scheduled by cpu up but can race with other cpu hotplug operations
- * and may be executed twice without intervening cpu down.
- */
-static void worker_rebind_fn(struct work_struct *work)
+static void worker_unbind_and_unlock(struct worker *worker)
{
- struct worker *worker = container_of(work, struct worker, rebind_work);
struct global_cwq *gcwq = worker->gcwq;
+ struct task_struct *task = worker->task;
- if (worker_maybe_bind_and_lock(worker))
- worker_clr_flags(worker, WORKER_REBIND);
-
+ /*
+ * Its no longer required we're PF_THREAD_BOUND, the work is done.
+ */
+ task->flags &= ~PF_THREAD_BOUND;
spin_unlock_irq(&gcwq->lock);
}
@@ -1325,7 +1324,6 @@ static struct worker *alloc_worker(void)
if (worker) {
INIT_LIST_HEAD(&worker->entry);
INIT_LIST_HEAD(&worker->scheduled);
- INIT_WORK(&worker->rebind_work, worker_rebind_fn);
/* on creation a worker is in !idle && prep state */
worker->flags = WORKER_PREP;
}
@@ -1380,15 +1378,9 @@ static struct worker *create_worker(struct global_cwq *gcwq, bool bind)
if (IS_ERR(worker->task))
goto fail;
- /*
- * A rogue worker will become a regular one if CPU comes
- * online later on. Make sure every worker has
- * PF_THREAD_BOUND set.
- */
if (bind && !on_unbound_cpu)
kthread_bind(worker->task, gcwq->cpu);
else {
- worker->task->flags |= PF_THREAD_BOUND;
if (on_unbound_cpu)
worker->flags |= WORKER_UNBOUND;
}
@@ -1665,13 +1657,6 @@ static bool manage_workers(struct worker *worker)
gcwq->flags &= ~GCWQ_MANAGING_WORKERS;
- /*
- * The trustee might be waiting to take over the manager
- * position, tell it we're done.
- */
- if (unlikely(gcwq->trustee))
- wake_up_all(&gcwq->trustee_wait);
-
return ret;
}
@@ -2072,7 +2057,7 @@ repeat:
if (keep_working(gcwq))
wake_up_worker(gcwq);
- spin_unlock_irq(&gcwq->lock);
+ worker_unbind_and_unlock(rescuer);
}
schedule();
@@ -3022,7 +3007,6 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
if (IS_ERR(rescuer->task))
goto err;
- rescuer->task->flags |= PF_THREAD_BOUND;
wake_up_process(rescuer->task);
}
@@ -3212,171 +3196,76 @@ EXPORT_SYMBOL_GPL(work_busy);
* gcwqs serve mix of short, long and very long running works making
* blocked draining impractical.
*
- * This is solved by allowing a gcwq to be detached from CPU, running
- * it with unbound (rogue) workers and allowing it to be reattached
- * later if the cpu comes back online. A separate thread is created
- * to govern a gcwq in such state and is called the trustee of the
- * gcwq.
- *
- * Trustee states and their descriptions.
- *
- * START Command state used on startup. On CPU_DOWN_PREPARE, a
- * new trustee is started with this state.
- *
- * IN_CHARGE Once started, trustee will enter this state after
- * assuming the manager role and making all existing
- * workers rogue. DOWN_PREPARE waits for trustee to
- * enter this state. After reaching IN_CHARGE, trustee
- * tries to execute the pending worklist until it's empty
- * and the state is set to BUTCHER, or the state is set
- * to RELEASE.
- *
- * BUTCHER Command state which is set by the cpu callback after
- * the cpu has went down. Once this state is set trustee
- * knows that there will be no new works on the worklist
- * and once the worklist is empty it can proceed to
- * killing idle workers.
- *
- * RELEASE Command state which is set by the cpu callback if the
- * cpu down has been canceled or it has come online
- * again. After recognizing this state, trustee stops
- * trying to drain or butcher and clears ROGUE, rebinds
- * all remaining workers back to the cpu and releases
- * manager role.
- *
- * DONE Trustee will enter this state after BUTCHER or RELEASE
- * is complete.
- *
- * trustee CPU draining
- * took over down complete
- * START -----------> IN_CHARGE -----------> BUTCHER -----------> DONE
- * | | ^
- * | CPU is back online v return workers |
- * ----------------> RELEASE --------------
*/
-/**
- * trustee_wait_event_timeout - timed event wait for trustee
- * @cond: condition to wait for
- * @timeout: timeout in jiffies
- *
- * wait_event_timeout() for trustee to use. Handles locking and
- * checks for RELEASE request.
- *
- * CONTEXT:
- * spin_lock_irq(gcwq->lock) which may be released and regrabbed
- * multiple times. To be used by trustee.
- *
- * RETURNS:
- * Positive indicating left time if @cond is satisfied, 0 if timed
- * out, -1 if canceled.
- */
-#define trustee_wait_event_timeout(cond, timeout) ({ \
- long __ret = (timeout); \
- while (!((cond) || (gcwq->trustee_state == TRUSTEE_RELEASE)) && \
- __ret) { \
- spin_unlock_irq(&gcwq->lock); \
- __wait_event_timeout(gcwq->trustee_wait, (cond) || \
- (gcwq->trustee_state == TRUSTEE_RELEASE), \
- __ret); \
- spin_lock_irq(&gcwq->lock); \
- } \
- gcwq->trustee_state == TRUSTEE_RELEASE ? -1 : (__ret); \
-})
+static int __devinit workqueue_cpu_up_callback(struct notifier_block *nfb,
+ unsigned long action,
+ void *hcpu)
+{
+ unsigned int cpu = (unsigned long)hcpu;
+ struct global_cwq *gcwq = get_gcwq(cpu);
+ struct worker *uninitialized_var(new_worker);
+ unsigned long flags;
-/**
- * trustee_wait_event - event wait for trustee
- * @cond: condition to wait for
- *
- * wait_event() for trustee to use. Automatically handles locking and
- * checks for CANCEL request.
- *
- * CONTEXT:
- * spin_lock_irq(gcwq->lock) which may be released and regrabbed
- * multiple times. To be used by trustee.
- *
- * RETURNS:
- * 0 if @cond is satisfied, -1 if canceled.
- */
-#define trustee_wait_event(cond) ({ \
- long __ret1; \
- __ret1 = trustee_wait_event_timeout(cond, MAX_SCHEDULE_TIMEOUT);\
- __ret1 < 0 ? -1 : 0; \
-})
+ action &= ~CPU_TASKS_FROZEN;
-static int __cpuinit trustee_thread(void *__gcwq)
-{
- struct global_cwq *gcwq = __gcwq;
- struct worker *worker;
- struct work_struct *work;
- struct hlist_node *pos;
- long rc;
- int i;
+ switch (action) {
+ case CPU_UP_PREPARE:
+ BUG_ON(gcwq->first_idle);
+ new_worker = create_worker(gcwq, false);
+ if (!new_worker)
+ return NOTIFY_BAD;
+ case CPU_UP_CANCELED:
+ case CPU_ONLINE:
+ break;
+ default:
+ return notifier_from_errno(0);
+ }
- BUG_ON(gcwq->cpu != smp_processor_id());
+ /* some are called w/ irq disabled, don't disturb irq status */
+ spin_lock_irqsave(&gcwq->lock, flags);
- spin_lock_irq(&gcwq->lock);
- /*
- * Claim the manager position and make all workers rogue.
- * Trustee must be bound to the target cpu and can't be
- * cancelled.
- */
- BUG_ON(gcwq->cpu != smp_processor_id());
- rc = trustee_wait_event(!(gcwq->flags & GCWQ_MANAGING_WORKERS));
- BUG_ON(rc < 0);
+ switch (action) {
+ case CPU_UP_PREPARE:
+ BUG_ON(gcwq->first_idle);
+ gcwq->first_idle = new_worker;
+ break;
- gcwq->flags |= GCWQ_MANAGING_WORKERS;
+ case CPU_UP_CANCELED:
+ destroy_worker(gcwq->first_idle);
+ gcwq->first_idle = NULL;
+ break;
- list_for_each_entry(worker, &gcwq->idle_list, entry)
- worker->flags |= WORKER_ROGUE;
+ case CPU_ONLINE:
+ spin_unlock_irq(&gcwq->lock);
+ kthread_bind(gcwq->first_idle->task, cpu);
+ spin_lock_irq(&gcwq->lock);
+ gcwq->flags |= GCWQ_MANAGE_WORKERS;
+ start_worker(gcwq->first_idle);
+ gcwq->first_idle = NULL;
+ break;
+ }
- for_each_busy_worker(worker, i, pos, gcwq)
- worker->flags |= WORKER_ROGUE;
+ spin_unlock_irqrestore(&gcwq->lock, flags);
- /*
- * Call schedule() so that we cross rq->lock and thus can
- * guarantee sched callbacks see the rogue flag. This is
- * necessary as scheduler callbacks may be invoked from other
- * cpus.
- */
- spin_unlock_irq(&gcwq->lock);
- schedule();
- spin_lock_irq(&gcwq->lock);
+ return notifier_from_errno(0);
+}
- /*
- * Sched callbacks are disabled now. Zap nr_running. After
- * this, nr_running stays zero and need_more_worker() and
- * keep_working() are always true as long as the worklist is
- * not empty.
- */
- atomic_set(get_gcwq_nr_running(gcwq->cpu), 0);
+static void flush_gcwq(struct global_cwq *gcwq)
+{
+ struct work_struct *work, *nw;
+ struct worker *worker, *n;
+ LIST_HEAD(non_affine_works);
- spin_unlock_irq(&gcwq->lock);
- del_timer_sync(&gcwq->idle_timer);
spin_lock_irq(&gcwq->lock);
+ list_for_each_entry_safe(work, nw, &gcwq->worklist, entry) {
+ struct workqueue_struct *wq = get_work_cwq(work)->wq;
- /*
- * We're now in charge. Notify and proceed to drain. We need
- * to keep the gcwq running during the whole CPU down
- * procedure as other cpu hotunplug callbacks may need to
- * flush currently running tasks.
- */
- gcwq->trustee_state = TRUSTEE_IN_CHARGE;
- wake_up_all(&gcwq->trustee_wait);
+ if (wq->flags & WQ_NON_AFFINE)
+ list_move(&work->entry, &non_affine_works);
+ }
- /*
- * The original cpu is in the process of dying and may go away
- * anytime now. When that happens, we and all workers would
- * be migrated to other cpus. Try draining any left work. We
- * want to get it over with ASAP - spam rescuers, wake up as
- * many idlers as necessary and create new ones till the
- * worklist is empty. Note that if the gcwq is frozen, there
- * may be frozen works in freezable cwqs. Don't declare
- * completion while frozen.
- */
- while (gcwq->nr_workers != gcwq->nr_idle ||
- gcwq->flags & GCWQ_FREEZING ||
- gcwq->trustee_state == TRUSTEE_IN_CHARGE) {
+ while (!list_empty(&gcwq->worklist)) {
int nr_works = 0;
list_for_each_entry(work, &gcwq->worklist, entry) {
@@ -3390,189 +3279,54 @@ static int __cpuinit trustee_thread(void *__gcwq)
wake_up_process(worker->task);
}
+ spin_unlock_irq(&gcwq->lock);
+
if (need_to_create_worker(gcwq)) {
- spin_unlock_irq(&gcwq->lock);
- worker = create_worker(gcwq, false);
- spin_lock_irq(&gcwq->lock);
- if (worker) {
- worker->flags |= WORKER_ROGUE;
+ worker = create_worker(gcwq, true);
+ if (worker)
start_worker(worker);
- }
}
- /* give a breather */
- if (trustee_wait_event_timeout(false, TRUSTEE_COOLDOWN) < 0)
- break;
- }
-
- /*
- * Either all works have been scheduled and cpu is down, or
- * cpu down has already been canceled. Wait for and butcher
- * all workers till we're canceled.
- */
- do {
- rc = trustee_wait_event(!list_empty(&gcwq->idle_list));
- while (!list_empty(&gcwq->idle_list))
- destroy_worker(list_first_entry(&gcwq->idle_list,
- struct worker, entry));
- } while (gcwq->nr_workers && rc >= 0);
+ wait_event_timeout(gcwq->idle_wait,
+ gcwq->nr_idle == gcwq->nr_workers, HZ/10);
- /*
- * At this point, either draining has completed and no worker
- * is left, or cpu down has been canceled or the cpu is being
- * brought back up. There shouldn't be any idle one left.
- * Tell the remaining busy ones to rebind once it finishes the
- * currently scheduled works by scheduling the rebind_work.
- */
- WARN_ON(!list_empty(&gcwq->idle_list));
-
- for_each_busy_worker(worker, i, pos, gcwq) {
- struct work_struct *rebind_work = &worker->rebind_work;
-
- /*
- * Rebind_work may race with future cpu hotplug
- * operations. Use a separate flag to mark that
- * rebinding is scheduled.
- */
- worker->flags |= WORKER_REBIND;
- worker->flags &= ~WORKER_ROGUE;
+ spin_lock_irq(&gcwq->lock);
+ }
- /* queue rebind_work, wq doesn't matter, use the default one */
- if (test_and_set_bit(WORK_STRUCT_PENDING_BIT,
- work_data_bits(rebind_work)))
- continue;
+ WARN_ON(gcwq->nr_workers != gcwq->nr_idle);
- debug_work_activate(rebind_work);
- insert_work(get_cwq(gcwq->cpu, system_wq), rebind_work,
- worker->scheduled.next,
- work_color_to_flags(WORK_NO_COLOR));
- }
+ list_for_each_entry_safe(worker, n, &gcwq->idle_list, entry)
+ destroy_worker(worker);
- /* relinquish manager role */
- gcwq->flags &= ~GCWQ_MANAGING_WORKERS;
+ WARN_ON(gcwq->nr_workers || gcwq->nr_idle);
- /* notify completion */
- gcwq->trustee = NULL;
- gcwq->trustee_state = TRUSTEE_DONE;
- wake_up_all(&gcwq->trustee_wait);
spin_unlock_irq(&gcwq->lock);
- return 0;
-}
-/**
- * wait_trustee_state - wait for trustee to enter the specified state
- * @gcwq: gcwq the trustee of interest belongs to
- * @state: target state to wait for
- *
- * Wait for the trustee to reach @state. DONE is already matched.
- *
- * CONTEXT:
- * spin_lock_irq(gcwq->lock) which may be released and regrabbed
- * multiple times. To be used by cpu_callback.
- */
-static void __cpuinit wait_trustee_state(struct global_cwq *gcwq, int state)
-__releases(&gcwq->lock)
-__acquires(&gcwq->lock)
-{
- if (!(gcwq->trustee_state == state ||
- gcwq->trustee_state == TRUSTEE_DONE)) {
- spin_unlock_irq(&gcwq->lock);
- __wait_event(gcwq->trustee_wait,
- gcwq->trustee_state == state ||
- gcwq->trustee_state == TRUSTEE_DONE);
- spin_lock_irq(&gcwq->lock);
+ gcwq = get_gcwq(get_cpu_light());
+ spin_lock_irq(&gcwq->lock);
+ list_for_each_entry_safe(work, nw, &non_affine_works, entry) {
+ list_del_init(&work->entry);
+ ___queue_work(get_work_cwq(work)->wq, gcwq, work);
}
+ spin_unlock_irq(&gcwq->lock);
+ put_cpu_light();
}
-static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
+static int __devinit workqueue_cpu_down_callback(struct notifier_block *nfb,
unsigned long action,
void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;
struct global_cwq *gcwq = get_gcwq(cpu);
- struct task_struct *new_trustee = NULL;
- struct worker *uninitialized_var(new_worker);
- unsigned long flags;
action &= ~CPU_TASKS_FROZEN;
- switch (action) {
- case CPU_DOWN_PREPARE:
- new_trustee = kthread_create(trustee_thread, gcwq,
- "workqueue_trustee/%d\n", cpu);
- if (IS_ERR(new_trustee))
- return notifier_from_errno(PTR_ERR(new_trustee));
- kthread_bind(new_trustee, cpu);
- /* fall through */
- case CPU_UP_PREPARE:
- BUG_ON(gcwq->first_idle);
- new_worker = create_worker(gcwq, false);
- if (!new_worker) {
- if (new_trustee)
- kthread_stop(new_trustee);
- return NOTIFY_BAD;
- }
- }
+ switch (action) {
+ case CPU_DOWN_PREPARE:
+ flush_gcwq(gcwq);
+ break;
+ }
- /* some are called w/ irq disabled, don't disturb irq status */
- spin_lock_irqsave(&gcwq->lock, flags);
-
- switch (action) {
- case CPU_DOWN_PREPARE:
- /* initialize trustee and tell it to acquire the gcwq */
- BUG_ON(gcwq->trustee || gcwq->trustee_state != TRUSTEE_DONE);
- gcwq->trustee = new_trustee;
- gcwq->trustee_state = TRUSTEE_START;
- wake_up_process(gcwq->trustee);
- wait_trustee_state(gcwq, TRUSTEE_IN_CHARGE);
- /* fall through */
- case CPU_UP_PREPARE:
- BUG_ON(gcwq->first_idle);
- gcwq->first_idle = new_worker;
- break;
-
- case CPU_DYING:
- /*
- * Before this, the trustee and all workers except for
- * the ones which are still executing works from
- * before the last CPU down must be on the cpu. After
- * this, they'll all be diasporas.
- */
- gcwq->flags |= GCWQ_DISASSOCIATED;
- break;
-
- case CPU_POST_DEAD:
- gcwq->trustee_state = TRUSTEE_BUTCHER;
- /* fall through */
- case CPU_UP_CANCELED:
- destroy_worker(gcwq->first_idle);
- gcwq->first_idle = NULL;
- break;
-
- case CPU_DOWN_FAILED:
- case CPU_ONLINE:
- gcwq->flags &= ~GCWQ_DISASSOCIATED;
- if (gcwq->trustee_state != TRUSTEE_DONE) {
- gcwq->trustee_state = TRUSTEE_RELEASE;
- wake_up_process(gcwq->trustee);
- wait_trustee_state(gcwq, TRUSTEE_DONE);
- }
-
- /*
- * Trustee is done and there might be no worker left.
- * Put the first_idle in and request a real manager to
- * take a look.
- */
- spin_unlock_irq(&gcwq->lock);
- kthread_bind(gcwq->first_idle->task, cpu);
- spin_lock_irq(&gcwq->lock);
- gcwq->flags |= GCWQ_MANAGE_WORKERS;
- start_worker(gcwq->first_idle);
- gcwq->first_idle = NULL;
- break;
- }
-
- spin_unlock_irqrestore(&gcwq->lock, flags);
return notifier_from_errno(0);
}
@@ -3770,7 +3524,8 @@ static int __init init_workqueues(void)
unsigned int cpu;
int i;
- cpu_notifier(workqueue_cpu_callback, CPU_PRI_WORKQUEUE);
+ cpu_notifier(workqueue_cpu_up_callback, CPU_PRI_WORKQUEUE_ACTIVE);
+ hotcpu_notifier(workqueue_cpu_down_callback, CPU_PRI_WORKQUEUE_INACTIVE);
/* initialize gcwqs */
for_each_gcwq_cpu(cpu) {
@@ -3793,9 +3548,7 @@ static int __init init_workqueues(void)
(unsigned long)gcwq);
ida_init(&gcwq->worker_ida);
-
- gcwq->trustee_state = TRUSTEE_DONE;
- init_waitqueue_head(&gcwq->trustee_wait);
+ init_waitqueue_head(&gcwq->idle_wait);
}
/* create the initial worker */
diff --git a/kernel/workqueue_sched.h b/kernel/workqueue_sched.h
index 2d10fc98dc79..3bf73e24e1ea 100644
--- a/kernel/workqueue_sched.h
+++ b/kernel/workqueue_sched.h
@@ -4,6 +4,5 @@
* Scheduler hooks for concurrency managed workqueue. Only to be
* included from sched.c and workqueue.c.
*/
-void wq_worker_waking_up(struct task_struct *task, unsigned int cpu);
-struct task_struct *wq_worker_sleeping(struct task_struct *task,
- unsigned int cpu);
+void wq_worker_running(struct task_struct *task);
+void wq_worker_sleeping(struct task_struct *task);