summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/Makefile1
-rw-r--r--kernel/bpf/core.c21
-rw-r--r--kernel/bpf/devmap.c9
-rw-r--r--kernel/bpf/lpm_trie.c9
-rw-r--r--kernel/bpf/syscall.c8
-rw-r--r--kernel/bpf/verifier.c14
-rw-r--r--kernel/cgroup/cpuset.c15
-rw-r--r--kernel/cpu.c3
-rw-r--r--kernel/cred.c9
-rw-r--r--kernel/events/ring_buffer.c39
-rw-r--r--kernel/futex.c231
-rw-r--r--kernel/irq/manage.c24
-rw-r--r--kernel/kthread.c14
-rw-r--r--kernel/livepatch/core.c6
-rw-r--r--kernel/locking/rtmutex.c65
-rw-r--r--kernel/locking/rtmutex_common.h3
-rw-r--r--kernel/ptrace.c24
-rw-r--r--kernel/rcu/update.c2
-rw-r--r--kernel/sched/core.c9
-rw-r--r--kernel/sys.c2
-rw-r--r--kernel/sysctl.c6
-rw-r--r--kernel/time/alarmtimer.c2
-rw-r--r--kernel/time/hrtimer.c36
-rw-r--r--kernel/time/itimer.c2
-rw-r--r--kernel/time/ntp.c2
-rw-r--r--kernel/time/posix-cpu-timers.c23
-rw-r--r--kernel/time/posix-timers.c69
-rw-r--r--kernel/time/posix-timers.h2
-rw-r--r--kernel/time/timekeeping.c5
-rw-r--r--kernel/time/timer.c96
-rw-r--r--kernel/trace/bpf_trace.c100
-rw-r--r--kernel/trace/ftrace.c12
-rw-r--r--kernel/trace/trace.c16
-rw-r--r--kernel/trace/trace.h18
-rw-r--r--kernel/trace/trace_branch.c4
-rw-r--r--kernel/trace/trace_events_hist.c3
-rw-r--r--kernel/trace/trace_kdb.c6
37 files changed, 497 insertions, 413 deletions
diff --git a/kernel/Makefile b/kernel/Makefile
index 7a63d567fdb5..df5e3ca30acd 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -30,6 +30,7 @@ KCOV_INSTRUMENT_extable.o := n
# Don't self-instrument.
KCOV_INSTRUMENT_kcov.o := n
KASAN_SANITIZE_kcov.o := n
+CFLAGS_kcov.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector)
# cond_syscall is currently not LTO compatible
CFLAGS_sys_ni.o = $(DISABLE_LTO)
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index bad9985b8a08..36be400c3e65 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -366,13 +366,11 @@ void bpf_prog_kallsyms_del_all(struct bpf_prog *fp)
}
#ifdef CONFIG_BPF_JIT
-# define BPF_JIT_LIMIT_DEFAULT (PAGE_SIZE * 40000)
-
/* All BPF JIT sysctl knobs here. */
int bpf_jit_enable __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_ALWAYS_ON);
int bpf_jit_harden __read_mostly;
int bpf_jit_kallsyms __read_mostly;
-int bpf_jit_limit __read_mostly = BPF_JIT_LIMIT_DEFAULT;
+long bpf_jit_limit __read_mostly;
static __always_inline void
bpf_get_prog_addr_region(const struct bpf_prog *prog,
@@ -583,16 +581,27 @@ int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
static atomic_long_t bpf_jit_current;
+/* Can be overridden by an arch's JIT compiler if it has a custom,
+ * dedicated BPF backend memory area, or if neither of the two
+ * below apply.
+ */
+u64 __weak bpf_jit_alloc_exec_limit(void)
+{
#if defined(MODULES_VADDR)
+ return MODULES_END - MODULES_VADDR;
+#else
+ return VMALLOC_END - VMALLOC_START;
+#endif
+}
+
static int __init bpf_jit_charge_init(void)
{
/* Only used as heuristic here to derive limit. */
- bpf_jit_limit = min_t(u64, round_up((MODULES_END - MODULES_VADDR) >> 2,
- PAGE_SIZE), INT_MAX);
+ bpf_jit_limit = min_t(u64, round_up(bpf_jit_alloc_exec_limit() >> 2,
+ PAGE_SIZE), LONG_MAX);
return 0;
}
pure_initcall(bpf_jit_charge_init);
-#endif
static int bpf_jit_charge_modmem(u32 pages)
{
diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c
index 2faad033715f..fc500ca464d0 100644
--- a/kernel/bpf/devmap.c
+++ b/kernel/bpf/devmap.c
@@ -186,6 +186,7 @@ static void dev_map_free(struct bpf_map *map)
if (!dev)
continue;
+ free_percpu(dev->bulkq);
dev_put(dev->dev);
kfree(dev);
}
@@ -281,6 +282,7 @@ void __dev_map_flush(struct bpf_map *map)
unsigned long *bitmap = this_cpu_ptr(dtab->flush_needed);
u32 bit;
+ rcu_read_lock();
for_each_set_bit(bit, bitmap, map->max_entries) {
struct bpf_dtab_netdev *dev = READ_ONCE(dtab->netdev_map[bit]);
struct xdp_bulk_queue *bq;
@@ -291,11 +293,12 @@ void __dev_map_flush(struct bpf_map *map)
if (unlikely(!dev))
continue;
- __clear_bit(bit, bitmap);
-
bq = this_cpu_ptr(dev->bulkq);
bq_xmit_all(dev, bq, XDP_XMIT_FLUSH, true);
+
+ __clear_bit(bit, bitmap);
}
+ rcu_read_unlock();
}
/* rcu_read_lock (from syscall and BPF contexts) ensures that if a delete and/or
@@ -388,6 +391,7 @@ static void dev_map_flush_old(struct bpf_dtab_netdev *dev)
int cpu;
+ rcu_read_lock();
for_each_online_cpu(cpu) {
bitmap = per_cpu_ptr(dev->dtab->flush_needed, cpu);
__clear_bit(dev->bit, bitmap);
@@ -395,6 +399,7 @@ static void dev_map_flush_old(struct bpf_dtab_netdev *dev)
bq = per_cpu_ptr(dev->bulkq, cpu);
bq_xmit_all(dev, bq, XDP_XMIT_FLUSH, false);
}
+ rcu_read_unlock();
}
}
diff --git a/kernel/bpf/lpm_trie.c b/kernel/bpf/lpm_trie.c
index 4f3138e6ecb2..1a8b208f6c55 100644
--- a/kernel/bpf/lpm_trie.c
+++ b/kernel/bpf/lpm_trie.c
@@ -676,9 +676,14 @@ find_leftmost:
* have exact two children, so this function will never return NULL.
*/
for (node = search_root; node;) {
- if (!(node->flags & LPM_TREE_NODE_FLAG_IM))
+ if (node->flags & LPM_TREE_NODE_FLAG_IM) {
+ node = rcu_dereference(node->child[0]);
+ } else {
next_node = node;
- node = rcu_dereference(node->child[0]);
+ node = rcu_dereference(node->child[0]);
+ if (!node)
+ node = rcu_dereference(next_node->child[1]);
+ }
}
do_copy:
next_key->prefixlen = next_node->prefixlen;
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index ede82382dd32..118e3a8fc764 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -1342,6 +1342,8 @@ bpf_prog_load_check_attach_type(enum bpf_prog_type prog_type,
case BPF_CGROUP_INET6_CONNECT:
case BPF_CGROUP_UDP4_SENDMSG:
case BPF_CGROUP_UDP6_SENDMSG:
+ case BPF_CGROUP_UDP4_RECVMSG:
+ case BPF_CGROUP_UDP6_RECVMSG:
return 0;
default:
return -EINVAL;
@@ -1622,6 +1624,8 @@ static int bpf_prog_attach(const union bpf_attr *attr)
case BPF_CGROUP_INET6_CONNECT:
case BPF_CGROUP_UDP4_SENDMSG:
case BPF_CGROUP_UDP6_SENDMSG:
+ case BPF_CGROUP_UDP4_RECVMSG:
+ case BPF_CGROUP_UDP6_RECVMSG:
ptype = BPF_PROG_TYPE_CGROUP_SOCK_ADDR;
break;
case BPF_CGROUP_SOCK_OPS:
@@ -1698,6 +1702,8 @@ static int bpf_prog_detach(const union bpf_attr *attr)
case BPF_CGROUP_INET6_CONNECT:
case BPF_CGROUP_UDP4_SENDMSG:
case BPF_CGROUP_UDP6_SENDMSG:
+ case BPF_CGROUP_UDP4_RECVMSG:
+ case BPF_CGROUP_UDP6_RECVMSG:
ptype = BPF_PROG_TYPE_CGROUP_SOCK_ADDR;
break;
case BPF_CGROUP_SOCK_OPS:
@@ -1744,6 +1750,8 @@ static int bpf_prog_query(const union bpf_attr *attr,
case BPF_CGROUP_INET6_CONNECT:
case BPF_CGROUP_UDP4_SENDMSG:
case BPF_CGROUP_UDP6_SENDMSG:
+ case BPF_CGROUP_UDP4_RECVMSG:
+ case BPF_CGROUP_UDP6_RECVMSG:
case BPF_CGROUP_SOCK_OPS:
case BPF_CGROUP_DEVICE:
break;
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index acc2305ad895..1dff5f7323cc 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -4342,9 +4342,12 @@ static int check_return_code(struct bpf_verifier_env *env)
struct tnum range = tnum_range(0, 1);
switch (env->prog->type) {
+ case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
+ if (env->prog->expected_attach_type == BPF_CGROUP_UDP4_RECVMSG ||
+ env->prog->expected_attach_type == BPF_CGROUP_UDP6_RECVMSG)
+ range = tnum_range(1, 1);
case BPF_PROG_TYPE_CGROUP_SKB:
case BPF_PROG_TYPE_CGROUP_SOCK:
- case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
case BPF_PROG_TYPE_SOCK_OPS:
case BPF_PROG_TYPE_CGROUP_DEVICE:
break;
@@ -4360,16 +4363,17 @@ static int check_return_code(struct bpf_verifier_env *env)
}
if (!tnum_in(range, reg->var_off)) {
+ char tn_buf[48];
+
verbose(env, "At program exit the register R0 ");
if (!tnum_is_unknown(reg->var_off)) {
- char tn_buf[48];
-
tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
verbose(env, "has value %s", tn_buf);
} else {
verbose(env, "has unknown scalar value");
}
- verbose(env, " should have been 0 or 1\n");
+ tnum_strn(tn_buf, sizeof(tn_buf), range);
+ verbose(env, " should have been in %s\n", tn_buf);
return -EINVAL;
}
return 0;
@@ -5743,7 +5747,7 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
insn->dst_reg,
shift);
insn_buf[cnt++] = BPF_ALU64_IMM(BPF_AND, insn->dst_reg,
- (1 << size * 8) - 1);
+ (1ULL << size * 8) - 1);
}
}
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
index 3e5d90076368..92575cb9b493 100644
--- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c
@@ -2432,10 +2432,23 @@ void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
raw_spin_unlock_irqrestore(&callback_lock, flags);
}
+/**
+ * cpuset_cpus_allowed_fallback - final fallback before complete catastrophe.
+ * @tsk: pointer to task_struct with which the scheduler is struggling
+ *
+ * Description: In the case that the scheduler cannot find an allowed cpu in
+ * tsk->cpus_allowed, we fall back to task_cs(tsk)->cpus_allowed. In legacy
+ * mode however, this value is the same as task_cs(tsk)->effective_cpus,
+ * which will not contain a sane cpumask during cases such as cpu hotplugging.
+ * This is the absolute last resort for the scheduler and it is only used if
+ * _every_ other avenue has been traveled.
+ **/
+
void cpuset_cpus_allowed_fallback(struct task_struct *tsk)
{
rcu_read_lock();
- do_set_cpus_allowed(tsk, task_cs(tsk)->effective_cpus);
+ do_set_cpus_allowed(tsk, is_in_v2_mode() ?
+ task_cs(tsk)->cpus_allowed : cpu_possible_mask);
rcu_read_unlock();
/*
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 22a479fb720d..3857a0afdfbf 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -2356,6 +2356,9 @@ static int __init mitigations_parse_cmdline(char *arg)
cpu_mitigations = CPU_MITIGATIONS_AUTO;
else if (!strcmp(arg, "auto,nosmt"))
cpu_mitigations = CPU_MITIGATIONS_AUTO_NOSMT;
+ else
+ pr_crit("Unsupported mitigations=%s, system may still be vulnerable\n",
+ arg);
return 0;
}
diff --git a/kernel/cred.c b/kernel/cred.c
index ecf03657e71c..efd04b2ec84c 100644
--- a/kernel/cred.c
+++ b/kernel/cred.c
@@ -448,6 +448,15 @@ int commit_creds(struct cred *new)
if (task->mm)
set_dumpable(task->mm, suid_dumpable);
task->pdeath_signal = 0;
+ /*
+ * If a task drops privileges and becomes nondumpable,
+ * the dumpability change must become visible before
+ * the credential change; otherwise, a __ptrace_may_access()
+ * racing with this change may be able to attach to a task it
+ * shouldn't be able to attach to (as if the task had dropped
+ * privileges without becoming nondumpable).
+ * Pairs with a read barrier in __ptrace_may_access().
+ */
smp_wmb();
}
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
index 99c7f199f2d4..12f351b253bb 100644
--- a/kernel/events/ring_buffer.c
+++ b/kernel/events/ring_buffer.c
@@ -49,14 +49,30 @@ static void perf_output_put_handle(struct perf_output_handle *handle)
unsigned long head;
again:
+ /*
+ * In order to avoid publishing a head value that goes backwards,
+ * we must ensure the load of @rb->head happens after we've
+ * incremented @rb->nest.
+ *
+ * Otherwise we can observe a @rb->head value before one published
+ * by an IRQ/NMI happening between the load and the increment.
+ */
+ barrier();
head = local_read(&rb->head);
/*
- * IRQ/NMI can happen here, which means we can miss a head update.
+ * IRQ/NMI can happen here and advance @rb->head, causing our
+ * load above to be stale.
*/
- if (!local_dec_and_test(&rb->nest))
+ /*
+ * If this isn't the outermost nesting, we don't have to update
+ * @rb->user_page->data_head.
+ */
+ if (local_read(&rb->nest) > 1) {
+ local_dec(&rb->nest);
goto out;
+ }
/*
* Since the mmap() consumer (userspace) can run on a different CPU:
@@ -85,12 +101,21 @@ again:
* See perf_output_begin().
*/
smp_wmb(); /* B, matches C */
- rb->user_page->data_head = head;
+ WRITE_ONCE(rb->user_page->data_head, head);
+
+ /*
+ * We must publish the head before decrementing the nest count,
+ * otherwise an IRQ/NMI can publish a more recent head value and our
+ * write will (temporarily) publish a stale value.
+ */
+ barrier();
+ local_set(&rb->nest, 0);
/*
- * Now check if we missed an update -- rely on previous implied
- * compiler barriers to force a re-read.
+ * Ensure we decrement @rb->nest before we validate the @rb->head.
+ * Otherwise we cannot be sure we caught the 'last' nested update.
*/
+ barrier();
if (unlikely(head != local_read(&rb->head))) {
local_inc(&rb->nest);
goto again;
@@ -465,7 +490,7 @@ void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size)
handle->aux_flags);
}
- rb->user_page->aux_head = rb->aux_head;
+ WRITE_ONCE(rb->user_page->aux_head, rb->aux_head);
if (rb_need_aux_wakeup(rb))
wakeup = true;
@@ -497,7 +522,7 @@ int perf_aux_output_skip(struct perf_output_handle *handle, unsigned long size)
rb->aux_head += size;
- rb->user_page->aux_head = rb->aux_head;
+ WRITE_ONCE(rb->user_page->aux_head, rb->aux_head);
if (rb_need_aux_wakeup(rb)) {
perf_output_wakeup(handle);
handle->wakeup = rb->aux_wakeup + rb->aux_watermark;
diff --git a/kernel/futex.c b/kernel/futex.c
index fe90164aa6ec..688b6fcb79cb 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -240,7 +240,7 @@ struct futex_q {
struct plist_node list;
struct task_struct *task;
- spinlock_t *lock_ptr;
+ raw_spinlock_t *lock_ptr;
union futex_key key;
struct futex_pi_state *pi_state;
struct rt_mutex_waiter *rt_waiter;
@@ -261,7 +261,7 @@ static const struct futex_q futex_q_init = {
*/
struct futex_hash_bucket {
atomic_t waiters;
- spinlock_t lock;
+ raw_spinlock_t lock;
struct plist_head chain;
} ____cacheline_aligned_in_smp;
@@ -822,13 +822,13 @@ static void get_pi_state(struct futex_pi_state *pi_state)
* Drops a reference to the pi_state object and frees or caches it
* when the last reference is gone.
*/
-static void put_pi_state(struct futex_pi_state *pi_state)
+static struct futex_pi_state *__put_pi_state(struct futex_pi_state *pi_state)
{
if (!pi_state)
- return;
+ return NULL;
if (!atomic_dec_and_test(&pi_state->refcount))
- return;
+ return NULL;
/*
* If pi_state->owner is NULL, the owner is most probably dying
@@ -848,9 +848,7 @@ static void put_pi_state(struct futex_pi_state *pi_state)
raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
}
- if (current->pi_state_cache) {
- kfree(pi_state);
- } else {
+ if (!current->pi_state_cache) {
/*
* pi_state->list is already empty.
* clear pi_state->owner.
@@ -859,6 +857,30 @@ static void put_pi_state(struct futex_pi_state *pi_state)
pi_state->owner = NULL;
atomic_set(&pi_state->refcount, 1);
current->pi_state_cache = pi_state;
+ pi_state = NULL;
+ }
+ return pi_state;
+}
+
+static void put_pi_state(struct futex_pi_state *pi_state)
+{
+ kfree(__put_pi_state(pi_state));
+}
+
+static void put_pi_state_atomic(struct futex_pi_state *pi_state,
+ struct list_head *to_free)
+{
+ if (__put_pi_state(pi_state))
+ list_add(&pi_state->list, to_free);
+}
+
+static void free_pi_state_list(struct list_head *to_free)
+{
+ struct futex_pi_state *p, *next;
+
+ list_for_each_entry_safe(p, next, to_free, list) {
+ list_del(&p->list);
+ kfree(p);
}
}
@@ -875,6 +897,7 @@ void exit_pi_state_list(struct task_struct *curr)
struct futex_pi_state *pi_state;
struct futex_hash_bucket *hb;
union futex_key key = FUTEX_KEY_INIT;
+ LIST_HEAD(to_free);
if (!futex_cmpxchg_enabled)
return;
@@ -908,7 +931,7 @@ void exit_pi_state_list(struct task_struct *curr)
}
raw_spin_unlock_irq(&curr->pi_lock);
- spin_lock(&hb->lock);
+ raw_spin_lock(&hb->lock);
raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
raw_spin_lock(&curr->pi_lock);
/*
@@ -918,10 +941,8 @@ void exit_pi_state_list(struct task_struct *curr)
if (head->next != next) {
/* retain curr->pi_lock for the loop invariant */
raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
- raw_spin_unlock_irq(&curr->pi_lock);
- spin_unlock(&hb->lock);
- raw_spin_lock_irq(&curr->pi_lock);
- put_pi_state(pi_state);
+ raw_spin_unlock(&hb->lock);
+ put_pi_state_atomic(pi_state, &to_free);
continue;
}
@@ -932,7 +953,7 @@ void exit_pi_state_list(struct task_struct *curr)
raw_spin_unlock(&curr->pi_lock);
raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
- spin_unlock(&hb->lock);
+ raw_spin_unlock(&hb->lock);
rt_mutex_futex_unlock(&pi_state->pi_mutex);
put_pi_state(pi_state);
@@ -940,6 +961,8 @@ void exit_pi_state_list(struct task_struct *curr)
raw_spin_lock_irq(&curr->pi_lock);
}
raw_spin_unlock_irq(&curr->pi_lock);
+
+ free_pi_state_list(&to_free);
}
#endif
@@ -1426,7 +1449,7 @@ static void __unqueue_futex(struct futex_q *q)
{
struct futex_hash_bucket *hb;
- if (WARN_ON_SMP(!q->lock_ptr || !spin_is_locked(q->lock_ptr))
+ if (WARN_ON_SMP(!q->lock_ptr || !raw_spin_is_locked(q->lock_ptr))
|| WARN_ON(plist_node_empty(&q->list)))
return;
@@ -1554,21 +1577,21 @@ static inline void
double_lock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
{
if (hb1 <= hb2) {
- spin_lock(&hb1->lock);
+ raw_spin_lock(&hb1->lock);
if (hb1 < hb2)
- spin_lock_nested(&hb2->lock, SINGLE_DEPTH_NESTING);
+ raw_spin_lock_nested(&hb2->lock, SINGLE_DEPTH_NESTING);
} else { /* hb1 > hb2 */
- spin_lock(&hb2->lock);
- spin_lock_nested(&hb1->lock, SINGLE_DEPTH_NESTING);
+ raw_spin_lock(&hb2->lock);
+ raw_spin_lock_nested(&hb1->lock, SINGLE_DEPTH_NESTING);
}
}
static inline void
double_unlock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
{
- spin_unlock(&hb1->lock);
+ raw_spin_unlock(&hb1->lock);
if (hb1 != hb2)
- spin_unlock(&hb2->lock);
+ raw_spin_unlock(&hb2->lock);
}
/*
@@ -1596,7 +1619,7 @@ futex_wake(u32 __user *uaddr, unsigned int flags, int nr_wake, u32 bitset)
if (!hb_waiters_pending(hb))
goto out_put_key;
- spin_lock(&hb->lock);
+ raw_spin_lock(&hb->lock);
plist_for_each_entry_safe(this, next, &hb->chain, list) {
if (match_futex (&this->key, &key)) {
@@ -1615,7 +1638,7 @@ futex_wake(u32 __user *uaddr, unsigned int flags, int nr_wake, u32 bitset)
}
}
- spin_unlock(&hb->lock);
+ raw_spin_unlock(&hb->lock);
wake_up_q(&wake_q);
out_put_key:
put_futex_key(&key);
@@ -1922,6 +1945,7 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
struct futex_hash_bucket *hb1, *hb2;
struct futex_q *this, *next;
DEFINE_WAKE_Q(wake_q);
+ LIST_HEAD(to_free);
if (nr_wake < 0 || nr_requeue < 0)
return -EINVAL;
@@ -2149,16 +2173,6 @@ retry_private:
requeue_pi_wake_futex(this, &key2, hb2);
drop_count++;
continue;
- } else if (ret == -EAGAIN) {
- /*
- * Waiter was woken by timeout or
- * signal and has set pi_blocked_on to
- * PI_WAKEUP_INPROGRESS before we
- * tried to enqueue it on the rtmutex.
- */
- this->pi_state = NULL;
- put_pi_state(pi_state);
- continue;
} else if (ret) {
/*
* rt_mutex_start_proxy_lock() detected a
@@ -2169,7 +2183,7 @@ retry_private:
* object.
*/
this->pi_state = NULL;
- put_pi_state(pi_state);
+ put_pi_state_atomic(pi_state, &to_free);
/*
* We stop queueing more waiters and let user
* space deal with the mess.
@@ -2186,7 +2200,7 @@ retry_private:
* in futex_proxy_trylock_atomic() or in lookup_pi_state(). We
* need to drop it here again.
*/
- put_pi_state(pi_state);
+ put_pi_state_atomic(pi_state, &to_free);
out_unlock:
double_unlock_hb(hb1, hb2);
@@ -2207,6 +2221,7 @@ out_put_keys:
out_put_key1:
put_futex_key(&key1);
out:
+ free_pi_state_list(&to_free);
return ret ? ret : task_count;
}
@@ -2230,7 +2245,7 @@ static inline struct futex_hash_bucket *queue_lock(struct futex_q *q)
q->lock_ptr = &hb->lock;
- spin_lock(&hb->lock); /* implies smp_mb(); (A) */
+ raw_spin_lock(&hb->lock); /* implies smp_mb(); (A) */
return hb;
}
@@ -2238,7 +2253,7 @@ static inline void
queue_unlock(struct futex_hash_bucket *hb)
__releases(&hb->lock)
{
- spin_unlock(&hb->lock);
+ raw_spin_unlock(&hb->lock);
hb_waiters_dec(hb);
}
@@ -2277,7 +2292,7 @@ static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
__releases(&hb->lock)
{
__queue_me(q, hb);
- spin_unlock(&hb->lock);
+ raw_spin_unlock(&hb->lock);
}
/**
@@ -2293,41 +2308,41 @@ static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
*/
static int unqueue_me(struct futex_q *q)
{
- spinlock_t *lock_ptr;
+ raw_spinlock_t *lock_ptr;
int ret = 0;
/* In the common case we don't take the spinlock, which is nice. */
retry:
/*
- * q->lock_ptr can change between this read and the following spin_lock.
- * Use READ_ONCE to forbid the compiler from reloading q->lock_ptr and
- * optimizing lock_ptr out of the logic below.
+ * q->lock_ptr can change between this read and the following
+ * raw_spin_lock. Use READ_ONCE to forbid the compiler from reloading
+ * q->lock_ptr and optimizing lock_ptr out of the logic below.
*/
lock_ptr = READ_ONCE(q->lock_ptr);
if (lock_ptr != NULL) {
- spin_lock(lock_ptr);
+ raw_spin_lock(lock_ptr);
/*
* q->lock_ptr can change between reading it and
- * spin_lock(), causing us to take the wrong lock. This
+ * raw_spin_lock(), causing us to take the wrong lock. This
* corrects the race condition.
*
* Reasoning goes like this: if we have the wrong lock,
* q->lock_ptr must have changed (maybe several times)
- * between reading it and the spin_lock(). It can
- * change again after the spin_lock() but only if it was
- * already changed before the spin_lock(). It cannot,
+ * between reading it and the raw_spin_lock(). It can
+ * change again after the raw_spin_lock() but only if it was
+ * already changed before the raw_spin_lock(). It cannot,
* however, change back to the original value. Therefore
* we can detect whether we acquired the correct lock.
*/
if (unlikely(lock_ptr != q->lock_ptr)) {
- spin_unlock(lock_ptr);
+ raw_spin_unlock(lock_ptr);
goto retry;
}
__unqueue_futex(q);
BUG_ON(q->pi_state);
- spin_unlock(lock_ptr);
+ raw_spin_unlock(lock_ptr);
ret = 1;
}
@@ -2343,13 +2358,16 @@ retry:
static void unqueue_me_pi(struct futex_q *q)
__releases(q->lock_ptr)
{
+ struct futex_pi_state *ps;
+
__unqueue_futex(q);
BUG_ON(!q->pi_state);
- put_pi_state(q->pi_state);
+ ps = __put_pi_state(q->pi_state);
q->pi_state = NULL;
- spin_unlock(q->lock_ptr);
+ raw_spin_unlock(q->lock_ptr);
+ kfree(ps);
}
static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
@@ -2482,7 +2500,7 @@ retry:
*/
handle_err:
raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
- spin_unlock(q->lock_ptr);
+ raw_spin_unlock(q->lock_ptr);
switch (err) {
case -EFAULT:
@@ -2500,7 +2518,7 @@ handle_err:
break;
}
- spin_lock(q->lock_ptr);
+ raw_spin_lock(q->lock_ptr);
raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
/*
@@ -2596,7 +2614,7 @@ static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q,
/*
* The task state is guaranteed to be set before another task can
* wake it. set_current_state() is implemented using smp_store_mb() and
- * queue_me() calls spin_unlock() upon completion, both serializing
+ * queue_me() calls raw_spin_unlock() upon completion, both serializing
* access to the hash list and forcing another memory barrier.
*/
set_current_state(TASK_INTERRUPTIBLE);
@@ -2887,15 +2905,7 @@ retry_private:
* before __rt_mutex_start_proxy_lock() is done.
*/
raw_spin_lock_irq(&q.pi_state->pi_mutex.wait_lock);
- /*
- * the migrate_disable() here disables migration in the in_atomic() fast
- * path which is enabled again in the following spin_unlock(). We have
- * one migrate_disable() pending in the slow-path which is reversed
- * after the raw_spin_unlock_irq() where we leave the atomic context.
- */
- migrate_disable();
-
- spin_unlock(q.lock_ptr);
+ raw_spin_unlock(q.lock_ptr);
/*
* __rt_mutex_start_proxy_lock() unconditionally enqueues the @rt_waiter
* such that futex_unlock_pi() is guaranteed to observe the waiter when
@@ -2903,7 +2913,6 @@ retry_private:
*/
ret = __rt_mutex_start_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter, current);
raw_spin_unlock_irq(&q.pi_state->pi_mutex.wait_lock);
- migrate_enable();
if (ret) {
if (ret == 1)
@@ -2917,7 +2926,7 @@ retry_private:
ret = rt_mutex_wait_proxy_lock(&q.pi_state->pi_mutex, to, &rt_waiter);
cleanup:
- spin_lock(q.lock_ptr);
+ raw_spin_lock(q.lock_ptr);
/*
* If we failed to acquire the lock (deadlock/signal/timeout), we must
* first acquire the hb->lock before removing the lock from the
@@ -3018,7 +3027,7 @@ retry:
return ret;
hb = hash_futex(&key);
- spin_lock(&hb->lock);
+ raw_spin_lock(&hb->lock);
/*
* Check waiters first. We do not trust user space values at
@@ -3052,21 +3061,11 @@ retry:
* rt_waiter. Also see the WARN in wake_futex_pi().
*/
raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
- /*
- * Magic trickery for now to make the RT migrate disable
- * logic happy. The following spin_unlock() happens with
- * interrupts disabled so the internal migrate_enable()
- * won't undo the migrate_disable() which was issued when
- * locking hb->lock.
- */
- migrate_disable();
- spin_unlock(&hb->lock);
+ raw_spin_unlock(&hb->lock);
/* drops pi_state->pi_mutex.wait_lock */
ret = wake_futex_pi(uaddr, uval, pi_state);
- migrate_enable();
-
put_pi_state(pi_state);
/*
@@ -3101,7 +3100,7 @@ retry:
* owner.
*/
if ((ret = cmpxchg_futex_value_locked(&curval, uaddr, uval, 0))) {
- spin_unlock(&hb->lock);
+ raw_spin_unlock(&hb->lock);
switch (ret) {
case -EFAULT:
goto pi_faulted;
@@ -3121,7 +3120,7 @@ retry:
ret = (curval == uval) ? 0 : -EAGAIN;
out_unlock:
- spin_unlock(&hb->lock);
+ raw_spin_unlock(&hb->lock);
out_putkey:
put_futex_key(&key);
return ret;
@@ -3237,7 +3236,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
struct hrtimer_sleeper timeout, *to = NULL;
struct futex_pi_state *pi_state = NULL;
struct rt_mutex_waiter rt_waiter;
- struct futex_hash_bucket *hb, *hb2;
+ struct futex_hash_bucket *hb;
union futex_key key2 = FUTEX_KEY_INIT;
struct futex_q q = futex_q_init;
int res, ret;
@@ -3295,55 +3294,20 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
/* Queue the futex_q, drop the hb lock, wait for wakeup. */
futex_wait_queue_me(hb, &q, to);
- /*
- * On RT we must avoid races with requeue and trying to block
- * on two mutexes (hb->lock and uaddr2's rtmutex) by
- * serializing access to pi_blocked_on with pi_lock.
- */
- raw_spin_lock_irq(&current->pi_lock);
- if (current->pi_blocked_on) {
- /*
- * We have been requeued or are in the process of
- * being requeued.
- */
- raw_spin_unlock_irq(&current->pi_lock);
- } else {
- /*
- * Setting pi_blocked_on to PI_WAKEUP_INPROGRESS
- * prevents a concurrent requeue from moving us to the
- * uaddr2 rtmutex. After that we can safely acquire
- * (and possibly block on) hb->lock.
- */
- current->pi_blocked_on = PI_WAKEUP_INPROGRESS;
- raw_spin_unlock_irq(&current->pi_lock);
-
- spin_lock(&hb->lock);
-
- /*
- * Clean up pi_blocked_on. We might leak it otherwise
- * when we succeeded with the hb->lock in the fast
- * path.
- */
- raw_spin_lock_irq(&current->pi_lock);
- current->pi_blocked_on = NULL;
- raw_spin_unlock_irq(&current->pi_lock);
-
- ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to);
- spin_unlock(&hb->lock);
- if (ret)
- goto out_put_keys;
- }
+ raw_spin_lock(&hb->lock);
+ ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to);
+ raw_spin_unlock(&hb->lock);
+ if (ret)
+ goto out_put_keys;
/*
- * In order to be here, we have either been requeued, are in
- * the process of being requeued, or requeue successfully
- * acquired uaddr2 on our behalf. If pi_blocked_on was
- * non-null above, we may be racing with a requeue. Do not
- * rely on q->lock_ptr to be hb2->lock until after blocking on
- * hb->lock or hb2->lock. The futex_requeue dropped our key1
- * reference and incremented our key2 reference count.
+ * In order for us to be here, we know our q.key == key2, and since
+ * we took the hb->lock above, we also know that futex_requeue() has
+ * completed and we no longer have to concern ourselves with a wakeup
+ * race with the atomic proxy lock acquisition by the requeue code. The
+ * futex_requeue dropped our key1 reference and incremented our key2
+ * reference count.
*/
- hb2 = hash_futex(&key2);
/* Check if the requeue code acquired the second futex for us. */
if (!q.rt_waiter) {
@@ -3352,8 +3316,9 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
* did a lock-steal - fix up the PI-state in that case.
*/
if (q.pi_state && (q.pi_state->owner != current)) {
- spin_lock(&hb2->lock);
- BUG_ON(&hb2->lock != q.lock_ptr);
+ struct futex_pi_state *ps_free;
+
+ raw_spin_lock(q.lock_ptr);
ret = fixup_pi_state_owner(uaddr2, &q, current);
if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current) {
pi_state = q.pi_state;
@@ -3363,8 +3328,9 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
* Drop the reference to the pi state which
* the requeue_pi() code acquired for us.
*/
- put_pi_state(q.pi_state);
- spin_unlock(&hb2->lock);
+ ps_free = __put_pi_state(q.pi_state);
+ raw_spin_unlock(q.lock_ptr);
+ kfree(ps_free);
}
} else {
struct rt_mutex *pi_mutex;
@@ -3378,8 +3344,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
pi_mutex = &q.pi_state->pi_mutex;
ret = rt_mutex_wait_proxy_lock(pi_mutex, to, &rt_waiter);
- spin_lock(&hb2->lock);
- BUG_ON(&hb2->lock != q.lock_ptr);
+ raw_spin_lock(q.lock_ptr);
if (ret && !rt_mutex_cleanup_proxy_lock(pi_mutex, &rt_waiter))
ret = 0;
@@ -3816,7 +3781,7 @@ static int __init futex_init(void)
for (i = 0; i < futex_hashsize; i++) {
atomic_set(&futex_queues[i].waiters, 0);
plist_head_init(&futex_queues[i].chain);
- spin_lock_init(&futex_queues[i].lock);
+ raw_spin_lock_init(&futex_queues[i].lock);
}
return 0;
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 7f4041357d2f..b2736d7d863b 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -261,7 +261,7 @@ int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
kref_get(&desc->affinity_notify->kref);
#ifdef CONFIG_PREEMPT_RT_BASE
- swork_queue(&desc->affinity_notify->swork);
+ kthread_schedule_work(&desc->affinity_notify->work);
#else
schedule_work(&desc->affinity_notify->work);
#endif
@@ -326,21 +326,11 @@ out:
}
#ifdef CONFIG_PREEMPT_RT_BASE
-static void init_helper_thread(void)
-{
- static int init_sworker_once;
-
- if (init_sworker_once)
- return;
- if (WARN_ON(swork_get()))
- return;
- init_sworker_once = 1;
-}
-static void irq_affinity_notify(struct swork_event *swork)
+static void irq_affinity_notify(struct kthread_work *work)
{
struct irq_affinity_notify *notify =
- container_of(swork, struct irq_affinity_notify, swork);
+ container_of(work, struct irq_affinity_notify, work);
_irq_affinity_notify(notify);
}
@@ -383,8 +373,7 @@ irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
notify->irq = irq;
kref_init(&notify->kref);
#ifdef CONFIG_PREEMPT_RT_BASE
- INIT_SWORK(&notify->swork, irq_affinity_notify);
- init_helper_thread();
+ kthread_init_work(&notify->work, irq_affinity_notify);
#else
INIT_WORK(&notify->work, irq_affinity_notify);
#endif
@@ -396,8 +385,9 @@ irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
raw_spin_unlock_irqrestore(&desc->lock, flags);
if (old_notify) {
-#ifndef CONFIG_PREEMPT_RT_BASE
- /* Need to address this for PREEMPT_RT */
+#ifdef CONFIG_PREEMPT_RT_BASE
+ kthread_cancel_work_sync(&notify->work);
+#else
cancel_work_sync(&old_notify->work);
#endif
kref_put(&old_notify->kref, old_notify->release);
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 5641b55783a6..9db017761a1f 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -20,6 +20,7 @@
#include <linux/freezer.h>
#include <linux/ptrace.h>
#include <linux/uaccess.h>
+#include <linux/cgroup.h>
#include <trace/events/sched.h>
static DEFINE_SPINLOCK(kthread_create_lock);
@@ -1180,6 +1181,19 @@ void kthread_destroy_worker(struct kthread_worker *worker)
}
EXPORT_SYMBOL(kthread_destroy_worker);
+DEFINE_KTHREAD_WORKER(kthread_global_worker);
+EXPORT_SYMBOL(kthread_global_worker);
+
+__init void kthread_init_global_worker(void)
+{
+ kthread_global_worker.task = kthread_create(kthread_worker_fn,
+ &kthread_global_worker,
+ "kswork");
+ if (WARN_ON(IS_ERR(kthread_global_worker.task)))
+ return;
+ wake_up_process(kthread_global_worker.task);
+}
+
#ifdef CONFIG_BLK_CGROUP
/**
* kthread_associate_blkcg - associate blkcg to current kthread
diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c
index 5b77a7314e01..722c27c40e5b 100644
--- a/kernel/livepatch/core.c
+++ b/kernel/livepatch/core.c
@@ -30,6 +30,7 @@
#include <linux/elf.h>
#include <linux/moduleloader.h>
#include <linux/completion.h>
+#include <linux/memory.h>
#include <asm/cacheflush.h>
#include "core.h"
#include "patch.h"
@@ -708,16 +709,21 @@ static int klp_init_object_loaded(struct klp_patch *patch,
struct klp_func *func;
int ret;
+ mutex_lock(&text_mutex);
+
module_disable_ro(patch->mod);
ret = klp_write_object_relocations(patch->mod, obj);
if (ret) {
module_enable_ro(patch->mod, true);
+ mutex_unlock(&text_mutex);
return ret;
}
arch_klp_init_object_loaded(patch, obj);
module_enable_ro(patch->mod, true);
+ mutex_unlock(&text_mutex);
+
klp_for_each_func(obj, func) {
ret = klp_find_object_symbol(obj->name, func->old_name,
func->old_sympos,
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index 2a9bf2443acc..44a33057a83a 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -142,12 +142,6 @@ static void fixup_rt_mutex_waiters(struct rt_mutex *lock)
WRITE_ONCE(*p, owner & ~RT_MUTEX_HAS_WAITERS);
}
-static int rt_mutex_real_waiter(struct rt_mutex_waiter *waiter)
-{
- return waiter && waiter != PI_WAKEUP_INPROGRESS &&
- waiter != PI_REQUEUE_INPROGRESS;
-}
-
/*
* We can speed up the acquire/release, if there's no debugging state to be
* set up.
@@ -421,8 +415,7 @@ int max_lock_depth = 1024;
static inline struct rt_mutex *task_blocked_on_lock(struct task_struct *p)
{
- return rt_mutex_real_waiter(p->pi_blocked_on) ?
- p->pi_blocked_on->lock : NULL;
+ return p->pi_blocked_on ? p->pi_blocked_on->lock : NULL;
}
/*
@@ -558,7 +551,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
* reached or the state of the chain has changed while we
* dropped the locks.
*/
- if (!rt_mutex_real_waiter(waiter))
+ if (!waiter)
goto out_unlock_pi;
/*
@@ -1328,22 +1321,6 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
return -EDEADLK;
raw_spin_lock(&task->pi_lock);
- /*
- * In the case of futex requeue PI, this will be a proxy
- * lock. The task will wake unaware that it is enqueueed on
- * this lock. Avoid blocking on two locks and corrupting
- * pi_blocked_on via the PI_WAKEUP_INPROGRESS
- * flag. futex_wait_requeue_pi() sets this when it wakes up
- * before requeue (due to a signal or timeout). Do not enqueue
- * the task if PI_WAKEUP_INPROGRESS is set.
- */
- if (task != current && task->pi_blocked_on == PI_WAKEUP_INPROGRESS) {
- raw_spin_unlock(&task->pi_lock);
- return -EAGAIN;
- }
-
- BUG_ON(rt_mutex_real_waiter(task->pi_blocked_on));
-
waiter->task = task;
waiter->lock = lock;
waiter->prio = task->prio;
@@ -1367,7 +1344,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
rt_mutex_enqueue_pi(owner, waiter);
rt_mutex_adjust_prio(owner);
- if (rt_mutex_real_waiter(owner->pi_blocked_on))
+ if (owner->pi_blocked_on)
chain_walk = 1;
} else if (rt_mutex_cond_detect_deadlock(waiter, chwalk)) {
chain_walk = 1;
@@ -1467,7 +1444,7 @@ static void remove_waiter(struct rt_mutex *lock,
{
bool is_top_waiter = (waiter == rt_mutex_top_waiter(lock));
struct task_struct *owner = rt_mutex_owner(lock);
- struct rt_mutex *next_lock = NULL;
+ struct rt_mutex *next_lock;
lockdep_assert_held(&lock->wait_lock);
@@ -1493,8 +1470,7 @@ static void remove_waiter(struct rt_mutex *lock,
rt_mutex_adjust_prio(owner);
/* Store the lock on which owner is blocked or NULL */
- if (rt_mutex_real_waiter(owner->pi_blocked_on))
- next_lock = task_blocked_on_lock(owner);
+ next_lock = task_blocked_on_lock(owner);
raw_spin_unlock(&owner->pi_lock);
@@ -1530,8 +1506,7 @@ void rt_mutex_adjust_pi(struct task_struct *task)
raw_spin_lock_irqsave(&task->pi_lock, flags);
waiter = task->pi_blocked_on;
- if (!rt_mutex_real_waiter(waiter) ||
- rt_mutex_waiter_equal(waiter, task_to_waiter(task))) {
+ if (!waiter || rt_mutex_waiter_equal(waiter, task_to_waiter(task))) {
raw_spin_unlock_irqrestore(&task->pi_lock, flags);
return;
}
@@ -2350,34 +2325,6 @@ int __rt_mutex_start_proxy_lock(struct rt_mutex *lock,
if (try_to_take_rt_mutex(lock, task, NULL))
return 1;
-#ifdef CONFIG_PREEMPT_RT_FULL
- /*
- * In PREEMPT_RT there's an added race.
- * If the task, that we are about to requeue, times out,
- * it can set the PI_WAKEUP_INPROGRESS. This tells the requeue
- * to skip this task. But right after the task sets
- * its pi_blocked_on to PI_WAKEUP_INPROGRESS it can then
- * block on the spin_lock(&hb->lock), which in RT is an rtmutex.
- * This will replace the PI_WAKEUP_INPROGRESS with the actual
- * lock that it blocks on. We *must not* place this task
- * on this proxy lock in that case.
- *
- * To prevent this race, we first take the task's pi_lock
- * and check if it has updated its pi_blocked_on. If it has,
- * we assume that it woke up and we return -EAGAIN.
- * Otherwise, we set the task's pi_blocked_on to
- * PI_REQUEUE_INPROGRESS, so that if the task is waking up
- * it will know that we are in the process of requeuing it.
- */
- raw_spin_lock(&task->pi_lock);
- if (task->pi_blocked_on) {
- raw_spin_unlock(&task->pi_lock);
- return -EAGAIN;
- }
- task->pi_blocked_on = PI_REQUEUE_INPROGRESS;
- raw_spin_unlock(&task->pi_lock);
-#endif
-
/* We enforce deadlock detection for futexes */
ret = task_blocks_on_rt_mutex(lock, waiter, task,
RT_MUTEX_FULL_CHAINWALK);
diff --git a/kernel/locking/rtmutex_common.h b/kernel/locking/rtmutex_common.h
index 546aaf058b9e..758dc43872e5 100644
--- a/kernel/locking/rtmutex_common.h
+++ b/kernel/locking/rtmutex_common.h
@@ -132,9 +132,6 @@ enum rtmutex_chainwalk {
/*
* PI-futex support (proxy locking functions, etc.):
*/
-#define PI_WAKEUP_INPROGRESS ((struct rt_mutex_waiter *) 1)
-#define PI_REQUEUE_INPROGRESS ((struct rt_mutex_waiter *) 2)
-
extern struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock);
extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
struct task_struct *proxy_owner);
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 536a917ac6c0..ace2839323de 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -78,9 +78,7 @@ void __ptrace_link(struct task_struct *child, struct task_struct *new_parent,
*/
static void ptrace_link(struct task_struct *child, struct task_struct *new_parent)
{
- rcu_read_lock();
- __ptrace_link(child, new_parent, __task_cred(new_parent));
- rcu_read_unlock();
+ __ptrace_link(child, new_parent, current_cred());
}
/**
@@ -330,6 +328,16 @@ static int __ptrace_may_access(struct task_struct *task, unsigned int mode)
return -EPERM;
ok:
rcu_read_unlock();
+ /*
+ * If a task drops privileges and becomes nondumpable (through a syscall
+ * like setresuid()) while we are trying to access it, we must ensure
+ * that the dumpability is read after the credentials; otherwise,
+ * we may be able to attach to a task that we shouldn't be able to
+ * attach to (as if the task had dropped privileges without becoming
+ * nondumpable).
+ * Pairs with a write barrier in commit_creds().
+ */
+ smp_rmb();
mm = task->mm;
if (mm &&
((get_dumpable(mm) != SUID_DUMP_USER) &&
@@ -711,6 +719,10 @@ static int ptrace_peek_siginfo(struct task_struct *child,
if (arg.nr < 0)
return -EINVAL;
+ /* Ensure arg.off fits in an unsigned long */
+ if (arg.off > ULONG_MAX)
+ return 0;
+
if (arg.flags & PTRACE_PEEKSIGINFO_SHARED)
pending = &child->signal->shared_pending;
else
@@ -718,18 +730,20 @@ static int ptrace_peek_siginfo(struct task_struct *child,
for (i = 0; i < arg.nr; ) {
siginfo_t info;
- s32 off = arg.off + i;
+ unsigned long off = arg.off + i;
+ bool found = false;
spin_lock_irq(&child->sighand->siglock);
list_for_each_entry(q, &pending->list, list) {
if (!off--) {
+ found = true;
copy_siginfo(&info, &q->info);
break;
}
}
spin_unlock_irq(&child->sighand->siglock);
- if (off >= 0) /* beyond the end of the list */
+ if (!found) /* beyond the end of the list */
break;
#ifdef CONFIG_COMPAT
diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c
index 16d8dba23329..ed75addd3ccd 100644
--- a/kernel/rcu/update.c
+++ b/kernel/rcu/update.c
@@ -69,7 +69,9 @@ module_param(rcu_expedited, int, 0);
extern int rcu_normal; /* from sysctl */
module_param(rcu_normal, int, 0);
static int rcu_normal_after_boot = IS_ENABLED(CONFIG_PREEMPT_RT_FULL);
+#ifndef CONFIG_PREEMPT_RT_FULL
module_param(rcu_normal_after_boot, int, 0);
+#endif
#endif /* #ifndef CONFIG_TINY_RCU */
#ifdef CONFIG_DEBUG_LOCK_ALLOC
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 91a9b2556fb0..1b2503b87473 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -558,14 +558,11 @@ void resched_cpu(int cpu)
*/
int get_nohz_timer_target(void)
{
- int i, cpu;
+ int i, cpu = smp_processor_id();
struct sched_domain *sd;
- preempt_disable_rt();
- cpu = smp_processor_id();
-
if (!idle_cpu(cpu) && housekeeping_cpu(cpu, HK_FLAG_TIMER))
- goto preempt_en_rt;
+ return cpu;
rcu_read_lock();
for_each_domain(cpu, sd) {
@@ -584,8 +581,6 @@ int get_nohz_timer_target(void)
cpu = housekeeping_any_cpu(HK_FLAG_TIMER);
unlock:
rcu_read_unlock();
-preempt_en_rt:
- preempt_enable_rt();
return cpu;
}
diff --git a/kernel/sys.c b/kernel/sys.c
index 123bd73046ec..096932a45046 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -1919,7 +1919,7 @@ static int validate_prctl_map(struct prctl_mm_map *prctl_map)
((unsigned long)prctl_map->__m1 __op \
(unsigned long)prctl_map->__m2) ? 0 : -EINVAL
error = __prctl_check_order(start_code, <, end_code);
- error |= __prctl_check_order(start_data, <, end_data);
+ error |= __prctl_check_order(start_data,<=, end_data);
error |= __prctl_check_order(start_brk, <=, brk);
error |= __prctl_check_order(arg_start, <=, arg_end);
error |= __prctl_check_order(env_start, <=, env_end);
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 9a85c7ae7362..f8576509c7be 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -2791,8 +2791,10 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
if (neg)
continue;
val = convmul * val / convdiv;
- if ((min && val < *min) || (max && val > *max))
- continue;
+ if ((min && val < *min) || (max && val > *max)) {
+ err = -EINVAL;
+ break;
+ }
*i = val;
} else {
val = convdiv * (*i) / convmul;
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
index 966708e8ce14..efa1e433974b 100644
--- a/kernel/time/alarmtimer.c
+++ b/kernel/time/alarmtimer.c
@@ -436,7 +436,7 @@ int alarm_cancel(struct alarm *alarm)
int ret = alarm_try_to_cancel(alarm);
if (ret >= 0)
return ret;
- hrtimer_wait_for_timer(&alarm->timer);
+ hrtimer_grab_expiry_lock(&alarm->timer);
}
}
EXPORT_SYMBOL_GPL(alarm_cancel);
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index e1040b80362c..4534e7871c8c 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -963,33 +963,16 @@ u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval)
}
EXPORT_SYMBOL_GPL(hrtimer_forward);
-#ifdef CONFIG_PREEMPT_RT_BASE
-# define wake_up_timer_waiters(b) wake_up(&(b)->wait)
-
-/**
- * hrtimer_wait_for_timer - Wait for a running timer
- *
- * @timer: timer to wait for
- *
- * The function waits in case the timers callback function is
- * currently executed on the waitqueue of the timer base. The
- * waitqueue is woken up after the timer callback function has
- * finished execution.
- */
-void hrtimer_wait_for_timer(const struct hrtimer *timer)
+void hrtimer_grab_expiry_lock(const struct hrtimer *timer)
{
struct hrtimer_clock_base *base = timer->base;
- if (base && base->cpu_base &&
- base->index >= HRTIMER_BASE_MONOTONIC_SOFT)
- wait_event(base->cpu_base->wait,
- !(hrtimer_callback_running(timer)));
+ if (base && base->cpu_base) {
+ spin_lock(&base->cpu_base->softirq_expiry_lock);
+ spin_unlock(&base->cpu_base->softirq_expiry_lock);
+ }
}
-#else
-# define wake_up_timer_waiters(b) do { } while (0)
-#endif
-
/*
* enqueue_hrtimer - internal function to (re)start a timer
*
@@ -1224,7 +1207,7 @@ int hrtimer_cancel(struct hrtimer *timer)
if (ret >= 0)
return ret;
- hrtimer_wait_for_timer(timer);
+ hrtimer_grab_expiry_lock(timer);
}
}
EXPORT_SYMBOL_GPL(hrtimer_cancel);
@@ -1528,6 +1511,7 @@ static __latent_entropy void hrtimer_run_softirq(struct softirq_action *h)
unsigned long flags;
ktime_t now;
+ spin_lock(&cpu_base->softirq_expiry_lock);
raw_spin_lock_irqsave(&cpu_base->lock, flags);
now = hrtimer_update_base(cpu_base);
@@ -1537,7 +1521,7 @@ static __latent_entropy void hrtimer_run_softirq(struct softirq_action *h)
hrtimer_update_softirq_timer(cpu_base, true);
raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
- wake_up_timer_waiters(cpu_base);
+ spin_unlock(&cpu_base->softirq_expiry_lock);
}
#ifdef CONFIG_HIGH_RES_TIMERS
@@ -1947,9 +1931,7 @@ int hrtimers_prepare_cpu(unsigned int cpu)
cpu_base->softirq_next_timer = NULL;
cpu_base->expires_next = KTIME_MAX;
cpu_base->softirq_expires_next = KTIME_MAX;
-#ifdef CONFIG_PREEMPT_RT_BASE
- init_waitqueue_head(&cpu_base->wait);
-#endif
+ spin_lock_init(&cpu_base->softirq_expiry_lock);
return 0;
}
diff --git a/kernel/time/itimer.c b/kernel/time/itimer.c
index 55b0e58368bf..a5ff222df4c7 100644
--- a/kernel/time/itimer.c
+++ b/kernel/time/itimer.c
@@ -215,7 +215,7 @@ again:
/* We are sharing ->siglock with it_real_fn() */
if (hrtimer_try_to_cancel(timer) < 0) {
spin_unlock_irq(&tsk->sighand->siglock);
- hrtimer_wait_for_timer(&tsk->signal->real_timer);
+ hrtimer_grab_expiry_lock(timer);
goto again;
}
expires = timeval_to_ktime(value->it_value);
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
index c5e0cba3b39c..6b23cd584295 100644
--- a/kernel/time/ntp.c
+++ b/kernel/time/ntp.c
@@ -698,7 +698,7 @@ static inline void process_adjtimex_modes(const struct timex *txc, s32 *time_tai
time_constant = max(time_constant, 0l);
}
- if (txc->modes & ADJ_TAI && txc->constant > 0)
+ if (txc->modes & ADJ_TAI && txc->constant >= 0)
*time_tai = txc->constant;
if (txc->modes & ADJ_OFFSET)
diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
index baeeaef3b721..59ceedbb03f0 100644
--- a/kernel/time/posix-cpu-timers.c
+++ b/kernel/time/posix-cpu-timers.c
@@ -789,6 +789,7 @@ check_timers_list(struct list_head *timers,
return t->expires;
t->firing = 1;
+ t->firing_cpu = smp_processor_id();
list_move_tail(&t->entry, firing);
}
@@ -1134,6 +1135,20 @@ static inline int fastpath_timer_check(struct task_struct *tsk)
return 0;
}
+static DEFINE_PER_CPU(spinlock_t, cpu_timer_expiry_lock) = __SPIN_LOCK_UNLOCKED(cpu_timer_expiry_lock);
+
+void cpu_timers_grab_expiry_lock(struct k_itimer *timer)
+{
+ int cpu = timer->it.cpu.firing_cpu;
+
+ if (cpu >= 0) {
+ spinlock_t *expiry_lock = per_cpu_ptr(&cpu_timer_expiry_lock, cpu);
+
+ spin_lock_irq(expiry_lock);
+ spin_unlock_irq(expiry_lock);
+ }
+}
+
/*
* This is called from the timer interrupt handler. The irq handler has
* already updated our counts. We need to check if any timers fire now.
@@ -1144,6 +1159,7 @@ static void __run_posix_cpu_timers(struct task_struct *tsk)
LIST_HEAD(firing);
struct k_itimer *timer, *next;
unsigned long flags;
+ spinlock_t *expiry_lock;
/*
* The fast path checks that there are no expired thread or thread
@@ -1152,6 +1168,9 @@ static void __run_posix_cpu_timers(struct task_struct *tsk)
if (!fastpath_timer_check(tsk))
return;
+ expiry_lock = this_cpu_ptr(&cpu_timer_expiry_lock);
+ spin_lock(expiry_lock);
+
if (!lock_task_sighand(tsk, &flags))
return;
/*
@@ -1186,6 +1205,7 @@ static void __run_posix_cpu_timers(struct task_struct *tsk)
list_del_init(&timer->it.cpu.entry);
cpu_firing = timer->it.cpu.firing;
timer->it.cpu.firing = 0;
+ timer->it.cpu.firing_cpu = -1;
/*
* The firing flag is -1 if we collided with a reset
* of the timer, which already reported this
@@ -1195,6 +1215,7 @@ static void __run_posix_cpu_timers(struct task_struct *tsk)
cpu_timer_fire(timer);
spin_unlock(&timer->it_lock);
}
+ spin_unlock(expiry_lock);
}
#ifdef CONFIG_PREEMPT_RT_BASE
@@ -1460,6 +1481,8 @@ static int do_cpu_nanosleep(const clockid_t which_clock, int flags,
spin_unlock_irq(&timer.it_lock);
while (error == TIMER_RETRY) {
+
+ cpu_timers_grab_expiry_lock(&timer);
/*
* We need to handle case when timer was or is in the
* middle of firing. In other cases we already freed
diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c
index a5ec421e3437..c7e97d421590 100644
--- a/kernel/time/posix-timers.c
+++ b/kernel/time/posix-timers.c
@@ -821,25 +821,20 @@ static void common_hrtimer_arm(struct k_itimer *timr, ktime_t expires,
hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
}
-/*
- * Protected by RCU!
- */
-static void timer_wait_for_callback(const struct k_clock *kc, struct k_itimer *timr)
+static int common_hrtimer_try_to_cancel(struct k_itimer *timr)
{
-#ifdef CONFIG_PREEMPT_RT_FULL
- if (kc->timer_arm == common_hrtimer_arm)
- hrtimer_wait_for_timer(&timr->it.real.timer);
- else if (kc == &alarm_clock)
- hrtimer_wait_for_timer(&timr->it.alarm.alarmtimer.timer);
- else
- /* FIXME: Whacky hack for posix-cpu-timers */
- schedule_timeout(1);
-#endif
+ return hrtimer_try_to_cancel(&timr->it.real.timer);
}
-static int common_hrtimer_try_to_cancel(struct k_itimer *timr)
+static void timer_wait_for_callback(const struct k_clock *kc, struct k_itimer *timer)
{
- return hrtimer_try_to_cancel(&timr->it.real.timer);
+ if (kc->timer_arm == common_hrtimer_arm)
+ hrtimer_grab_expiry_lock(&timer->it.real.timer);
+ else if (kc == &alarm_clock)
+ hrtimer_grab_expiry_lock(&timer->it.alarm.alarmtimer.timer);
+ else
+ /* posix-cpu-timers */
+ cpu_timers_grab_expiry_lock(timer);
}
/* Set a POSIX.1b interval timer. */
@@ -901,21 +896,21 @@ retry:
if (!timr)
return -EINVAL;
- rcu_read_lock();
kc = timr->kclock;
if (WARN_ON_ONCE(!kc || !kc->timer_set))
error = -EINVAL;
else
error = kc->timer_set(timr, flags, new_spec64, old_spec64);
- unlock_timer(timr, flag);
if (error == TIMER_RETRY) {
+ rcu_read_lock();
+ unlock_timer(timr, flag);
timer_wait_for_callback(kc, timr);
- old_spec64 = NULL; // We already got the old time...
rcu_read_unlock();
+ old_spec64 = NULL; // We already got the old time...
goto retry;
}
- rcu_read_unlock();
+ unlock_timer(timr, flag);
return error;
}
@@ -977,13 +972,21 @@ int common_timer_del(struct k_itimer *timer)
return 0;
}
-static inline int timer_delete_hook(struct k_itimer *timer)
+static int timer_delete_hook(struct k_itimer *timer)
{
const struct k_clock *kc = timer->kclock;
+ int ret;
if (WARN_ON_ONCE(!kc || !kc->timer_del))
return -EINVAL;
- return kc->timer_del(timer);
+ ret = kc->timer_del(timer);
+ if (ret == TIMER_RETRY) {
+ rcu_read_lock();
+ spin_unlock_irq(&timer->it_lock);
+ timer_wait_for_callback(kc, timer);
+ rcu_read_unlock();
+ }
+ return ret;
}
/* Delete a POSIX.1b interval timer. */
@@ -997,15 +1000,8 @@ retry_delete:
if (!timer)
return -EINVAL;
- rcu_read_lock();
- if (timer_delete_hook(timer) == TIMER_RETRY) {
- unlock_timer(timer, flags);
- timer_wait_for_callback(clockid_to_kclock(timer->it_clock),
- timer);
- rcu_read_unlock();
+ if (timer_delete_hook(timer) == TIMER_RETRY)
goto retry_delete;
- }
- rcu_read_unlock();
spin_lock(&current->sighand->siglock);
list_del(&timer->list);
@@ -1031,20 +1027,9 @@ static void itimer_delete(struct k_itimer *timer)
retry_delete:
spin_lock_irqsave(&timer->it_lock, flags);
- /* On RT we can race with a deletion */
- if (!timer->it_signal) {
- unlock_timer(timer, flags);
- return;
- }
-
- if (timer_delete_hook(timer) == TIMER_RETRY) {
- rcu_read_lock();
- unlock_timer(timer, flags);
- timer_wait_for_callback(clockid_to_kclock(timer->it_clock),
- timer);
- rcu_read_unlock();
+ if (timer_delete_hook(timer) == TIMER_RETRY)
goto retry_delete;
- }
+
list_del(&timer->list);
/*
* This keeps any tasks waiting on the spin lock from thinking
diff --git a/kernel/time/posix-timers.h b/kernel/time/posix-timers.h
index ddb21145211a..725bd230a8db 100644
--- a/kernel/time/posix-timers.h
+++ b/kernel/time/posix-timers.h
@@ -32,6 +32,8 @@ extern const struct k_clock clock_process;
extern const struct k_clock clock_thread;
extern const struct k_clock alarm_clock;
+extern void cpu_timers_grab_expiry_lock(struct k_itimer *timer);
+
int posix_timer_event(struct k_itimer *timr, int si_private);
void common_timer_get(struct k_itimer *timr, struct itimerspec64 *cur_setting);
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 5e584cdebd24..0517bc42c6b6 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -812,17 +812,18 @@ ktime_t ktime_get_coarse_with_offset(enum tk_offsets offs)
struct timekeeper *tk = &tk_core.timekeeper;
unsigned int seq;
ktime_t base, *offset = offsets[offs];
+ u64 nsecs;
WARN_ON(timekeeping_suspended);
do {
seq = read_seqcount_begin(&tk_core.seq);
base = ktime_add(tk->tkr_mono.base, *offset);
+ nsecs = tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift;
} while (read_seqcount_retry(&tk_core.seq, seq));
- return base;
-
+ return base + nsecs;
}
EXPORT_SYMBOL_GPL(ktime_get_coarse_with_offset);
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index 781483c76b17..d6289d8df06b 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -44,7 +44,6 @@
#include <linux/sched/debug.h>
#include <linux/slab.h>
#include <linux/compat.h>
-#include <linux/swait.h>
#include <linux/uaccess.h>
#include <asm/unistd.h>
@@ -198,9 +197,7 @@ EXPORT_SYMBOL(jiffies_64);
struct timer_base {
raw_spinlock_t lock;
struct timer_list *running_timer;
-#ifdef CONFIG_PREEMPT_RT_FULL
- struct swait_queue_head wait_for_running_timer;
-#endif
+ spinlock_t expiry_lock;
unsigned long clk;
unsigned long next_expiry;
unsigned int cpu;
@@ -1189,33 +1186,6 @@ void add_timer_on(struct timer_list *timer, int cpu)
}
EXPORT_SYMBOL_GPL(add_timer_on);
-#ifdef CONFIG_PREEMPT_RT_FULL
-/*
- * Wait for a running timer
- */
-static void wait_for_running_timer(struct timer_list *timer)
-{
- struct timer_base *base;
- u32 tf = timer->flags;
-
- if (tf & TIMER_MIGRATING)
- return;
-
- base = get_timer_base(tf);
- swait_event_exclusive(base->wait_for_running_timer,
- base->running_timer != timer);
-}
-
-# define wakeup_timer_waiters(b) swake_up_all(&(b)->wait_for_running_timer)
-#else
-static inline void wait_for_running_timer(struct timer_list *timer)
-{
- cpu_relax();
-}
-
-# define wakeup_timer_waiters(b) do { } while (0)
-#endif
-
/**
* del_timer - deactivate a timer.
* @timer: the timer to be deactivated
@@ -1245,14 +1215,8 @@ int del_timer(struct timer_list *timer)
}
EXPORT_SYMBOL(del_timer);
-/**
- * try_to_del_timer_sync - Try to deactivate a timer
- * @timer: timer to delete
- *
- * This function tries to deactivate a timer. Upon successful (ret >= 0)
- * exit the timer is not queued and the handler is not running on any CPU.
- */
-int try_to_del_timer_sync(struct timer_list *timer)
+static int __try_to_del_timer_sync(struct timer_list *timer,
+ struct timer_base **basep)
{
struct timer_base *base;
unsigned long flags;
@@ -1260,7 +1224,7 @@ int try_to_del_timer_sync(struct timer_list *timer)
debug_assert_init(timer);
- base = lock_timer_base(timer, &flags);
+ *basep = base = lock_timer_base(timer, &flags);
if (base->running_timer != timer)
ret = detach_if_pending(timer, base, true);
@@ -1269,9 +1233,42 @@ int try_to_del_timer_sync(struct timer_list *timer)
return ret;
}
+
+/**
+ * try_to_del_timer_sync - Try to deactivate a timer
+ * @timer: timer to delete
+ *
+ * This function tries to deactivate a timer. Upon successful (ret >= 0)
+ * exit the timer is not queued and the handler is not running on any CPU.
+ */
+int try_to_del_timer_sync(struct timer_list *timer)
+{
+ struct timer_base *base;
+
+ return __try_to_del_timer_sync(timer, &base);
+}
EXPORT_SYMBOL(try_to_del_timer_sync);
#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL)
+static int __del_timer_sync(struct timer_list *timer)
+{
+ struct timer_base *base;
+ int ret;
+
+ for (;;) {
+ ret = __try_to_del_timer_sync(timer, &base);
+ if (ret >= 0)
+ return ret;
+
+ /*
+ * When accessing the lock, timers of base are no longer expired
+ * and so timer is no longer running.
+ */
+ spin_lock(&base->expiry_lock);
+ spin_unlock(&base->expiry_lock);
+ }
+}
+
/**
* del_timer_sync - deactivate a timer and wait for the handler to finish.
* @timer: the timer to be deactivated
@@ -1327,12 +1324,8 @@ int del_timer_sync(struct timer_list *timer)
* could lead to deadlock.
*/
WARN_ON(in_irq() && !(timer->flags & TIMER_IRQSAFE));
- for (;;) {
- int ret = try_to_del_timer_sync(timer);
- if (ret >= 0)
- return ret;
- wait_for_running_timer(timer);
- }
+
+ return __del_timer_sync(timer);
}
EXPORT_SYMBOL(del_timer_sync);
#endif
@@ -1397,11 +1390,15 @@ static void expire_timers(struct timer_base *base, struct hlist_head *head)
raw_spin_unlock(&base->lock);
call_timer_fn(timer, fn);
base->running_timer = NULL;
+ spin_unlock(&base->expiry_lock);
+ spin_lock(&base->expiry_lock);
raw_spin_lock(&base->lock);
} else {
raw_spin_unlock_irq(&base->lock);
call_timer_fn(timer, fn);
base->running_timer = NULL;
+ spin_unlock(&base->expiry_lock);
+ spin_lock(&base->expiry_lock);
raw_spin_lock_irq(&base->lock);
}
}
@@ -1696,6 +1693,7 @@ static inline void __run_timers(struct timer_base *base)
if (!time_after_eq(jiffies, base->clk))
return;
+ spin_lock(&base->expiry_lock);
raw_spin_lock_irq(&base->lock);
/*
@@ -1723,7 +1721,7 @@ static inline void __run_timers(struct timer_base *base)
expire_timers(base, heads + levels);
}
raw_spin_unlock_irq(&base->lock);
- wakeup_timer_waiters(base);
+ spin_unlock(&base->expiry_lock);
}
/*
@@ -1970,9 +1968,7 @@ static void __init init_timer_cpu(int cpu)
base->cpu = cpu;
raw_spin_lock_init(&base->lock);
base->clk = jiffies;
-#ifdef CONFIG_PREEMPT_RT_FULL
- init_swait_queue_head(&base->wait_for_running_timer);
-#endif
+ spin_lock_init(&base->expiry_lock);
}
}
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index 6c28d519447d..83c4e76f513a 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -365,8 +365,6 @@ static const struct bpf_func_proto bpf_perf_event_read_value_proto = {
.arg4_type = ARG_CONST_SIZE,
};
-static DEFINE_PER_CPU(struct perf_sample_data, bpf_trace_sd);
-
static __always_inline u64
__bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
u64 flags, struct perf_sample_data *sd)
@@ -398,24 +396,50 @@ __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
return 0;
}
+/*
+ * Support executing tracepoints in normal, irq, and nmi context that each call
+ * bpf_perf_event_output
+ */
+struct bpf_trace_sample_data {
+ struct perf_sample_data sds[3];
+};
+
+static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_trace_sds);
+static DEFINE_PER_CPU(int, bpf_trace_nest_level);
BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,
u64, flags, void *, data, u64, size)
{
- struct perf_sample_data *sd = this_cpu_ptr(&bpf_trace_sd);
+ struct bpf_trace_sample_data *sds = this_cpu_ptr(&bpf_trace_sds);
+ int nest_level = this_cpu_inc_return(bpf_trace_nest_level);
struct perf_raw_record raw = {
.frag = {
.size = size,
.data = data,
},
};
+ struct perf_sample_data *sd;
+ int err;
- if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
- return -EINVAL;
+ if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(sds->sds))) {
+ err = -EBUSY;
+ goto out;
+ }
+
+ sd = &sds->sds[nest_level - 1];
+
+ if (unlikely(flags & ~(BPF_F_INDEX_MASK))) {
+ err = -EINVAL;
+ goto out;
+ }
perf_sample_data_init(sd, 0, 0);
sd->raw = &raw;
- return __bpf_perf_event_output(regs, map, flags, sd);
+ err = __bpf_perf_event_output(regs, map, flags, sd);
+
+out:
+ this_cpu_dec(bpf_trace_nest_level);
+ return err;
}
static const struct bpf_func_proto bpf_perf_event_output_proto = {
@@ -772,16 +796,48 @@ pe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
/*
* bpf_raw_tp_regs are separate from bpf_pt_regs used from skb/xdp
* to avoid potential recursive reuse issue when/if tracepoints are added
- * inside bpf_*_event_output, bpf_get_stackid and/or bpf_get_stack
+ * inside bpf_*_event_output, bpf_get_stackid and/or bpf_get_stack.
+ *
+ * Since raw tracepoints run despite bpf_prog_active, support concurrent usage
+ * in normal, irq, and nmi context.
*/
-static DEFINE_PER_CPU(struct pt_regs, bpf_raw_tp_regs);
+struct bpf_raw_tp_regs {
+ struct pt_regs regs[3];
+};
+static DEFINE_PER_CPU(struct bpf_raw_tp_regs, bpf_raw_tp_regs);
+static DEFINE_PER_CPU(int, bpf_raw_tp_nest_level);
+static struct pt_regs *get_bpf_raw_tp_regs(void)
+{
+ struct bpf_raw_tp_regs *tp_regs = this_cpu_ptr(&bpf_raw_tp_regs);
+ int nest_level = this_cpu_inc_return(bpf_raw_tp_nest_level);
+
+ if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(tp_regs->regs))) {
+ this_cpu_dec(bpf_raw_tp_nest_level);
+ return ERR_PTR(-EBUSY);
+ }
+
+ return &tp_regs->regs[nest_level - 1];
+}
+
+static void put_bpf_raw_tp_regs(void)
+{
+ this_cpu_dec(bpf_raw_tp_nest_level);
+}
+
BPF_CALL_5(bpf_perf_event_output_raw_tp, struct bpf_raw_tracepoint_args *, args,
struct bpf_map *, map, u64, flags, void *, data, u64, size)
{
- struct pt_regs *regs = this_cpu_ptr(&bpf_raw_tp_regs);
+ struct pt_regs *regs = get_bpf_raw_tp_regs();
+ int ret;
+
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
perf_fetch_caller_regs(regs);
- return ____bpf_perf_event_output(regs, map, flags, data, size);
+ ret = ____bpf_perf_event_output(regs, map, flags, data, size);
+
+ put_bpf_raw_tp_regs();
+ return ret;
}
static const struct bpf_func_proto bpf_perf_event_output_proto_raw_tp = {
@@ -798,12 +854,18 @@ static const struct bpf_func_proto bpf_perf_event_output_proto_raw_tp = {
BPF_CALL_3(bpf_get_stackid_raw_tp, struct bpf_raw_tracepoint_args *, args,
struct bpf_map *, map, u64, flags)
{
- struct pt_regs *regs = this_cpu_ptr(&bpf_raw_tp_regs);
+ struct pt_regs *regs = get_bpf_raw_tp_regs();
+ int ret;
+
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
perf_fetch_caller_regs(regs);
/* similar to bpf_perf_event_output_tp, but pt_regs fetched differently */
- return bpf_get_stackid((unsigned long) regs, (unsigned long) map,
- flags, 0, 0);
+ ret = bpf_get_stackid((unsigned long) regs, (unsigned long) map,
+ flags, 0, 0);
+ put_bpf_raw_tp_regs();
+ return ret;
}
static const struct bpf_func_proto bpf_get_stackid_proto_raw_tp = {
@@ -818,11 +880,17 @@ static const struct bpf_func_proto bpf_get_stackid_proto_raw_tp = {
BPF_CALL_4(bpf_get_stack_raw_tp, struct bpf_raw_tracepoint_args *, args,
void *, buf, u32, size, u64, flags)
{
- struct pt_regs *regs = this_cpu_ptr(&bpf_raw_tp_regs);
+ struct pt_regs *regs = get_bpf_raw_tp_regs();
+ int ret;
+
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
perf_fetch_caller_regs(regs);
- return bpf_get_stack((unsigned long) regs, (unsigned long) buf,
- (unsigned long) size, flags, 0);
+ ret = bpf_get_stack((unsigned long) regs, (unsigned long) buf,
+ (unsigned long) size, flags, 0);
+ put_bpf_raw_tp_regs();
+ return ret;
}
static const struct bpf_func_proto bpf_get_stack_proto_raw_tp = {
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 1688782f3dfb..118ecce14386 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -2952,14 +2952,13 @@ static int ftrace_update_code(struct module *mod, struct ftrace_page *new_pgs)
p = &pg->records[i];
p->flags = rec_flags;
-#ifndef CC_USING_NOP_MCOUNT
/*
* Do the initial record conversion from mcount jump
* to the NOP instructions.
*/
- if (!ftrace_code_disable(mod, p))
+ if (!__is_defined(CC_USING_NOP_MCOUNT) &&
+ !ftrace_code_disable(mod, p))
break;
-#endif
update_cnt++;
}
@@ -4208,10 +4207,13 @@ void free_ftrace_func_mapper(struct ftrace_func_mapper *mapper,
struct ftrace_func_entry *entry;
struct ftrace_func_map *map;
struct hlist_head *hhd;
- int size = 1 << mapper->hash.size_bits;
- int i;
+ int size, i;
+
+ if (!mapper)
+ return;
if (free_func && mapper->hash.count) {
+ size = 1 << mapper->hash.size_bits;
for (i = 0; i < size; i++) {
hhd = &mapper->hash.buckets[i];
hlist_for_each_entry(entry, hhd, hlist) {
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 2ce8428d0f45..f82da65109eb 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -6480,11 +6480,13 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
break;
}
#endif
- if (!tr->allocated_snapshot) {
+ if (tr->allocated_snapshot)
+ ret = resize_buffer_duplicate_size(&tr->max_buffer,
+ &tr->trace_buffer, iter->cpu_file);
+ else
ret = tracing_alloc_snapshot_instance(tr);
- if (ret < 0)
- break;
- }
+ if (ret < 0)
+ break;
local_irq_disable();
/* Now, we're going to swap */
if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
@@ -8360,12 +8362,8 @@ void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
cnt++;
- /* reset all but tr, trace, and overruns */
- memset(&iter.seq, 0,
- sizeof(struct trace_iterator) -
- offsetof(struct trace_iterator, seq));
+ trace_iterator_reset(&iter);
iter.iter_flags |= TRACE_FILE_LAT_FMT;
- iter.pos = -1;
if (trace_find_next_entry_inc(&iter) != NULL) {
int ret;
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 65afd0c04622..6eba8c96f4bc 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -1897,4 +1897,22 @@ static inline void tracer_hardirqs_off(unsigned long a0, unsigned long a1) { }
extern struct trace_iterator *tracepoint_print_iter;
+/*
+ * Reset the state of the trace_iterator so that it can read consumed data.
+ * Normally, the trace_iterator is used for reading the data when it is not
+ * consumed, and must retain state.
+ */
+static __always_inline void trace_iterator_reset(struct trace_iterator *iter)
+{
+ const size_t offset = offsetof(struct trace_iterator, seq);
+
+ /*
+ * Keep gcc from complaining about overwriting more than just one
+ * member in the structure.
+ */
+ memset((char *)iter + offset, 0, sizeof(struct trace_iterator) - offset);
+
+ iter->pos = -1;
+}
+
#endif /* _LINUX_KERNEL_TRACE_H */
diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c
index 3ea65cdff30d..4ad967453b6f 100644
--- a/kernel/trace/trace_branch.c
+++ b/kernel/trace/trace_branch.c
@@ -205,8 +205,6 @@ void trace_likely_condition(struct ftrace_likely_data *f, int val, int expect)
void ftrace_likely_update(struct ftrace_likely_data *f, int val,
int expect, int is_constant)
{
- unsigned long flags = user_access_save();
-
/* A constant is always correct */
if (is_constant) {
f->constant++;
@@ -225,8 +223,6 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
f->data.correct++;
else
f->data.incorrect++;
-
- user_access_restore(flags);
}
EXPORT_SYMBOL(ftrace_likely_update);
diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
index 11853e90b649..3f34cfb66a85 100644
--- a/kernel/trace/trace_events_hist.c
+++ b/kernel/trace/trace_events_hist.c
@@ -1632,6 +1632,9 @@ static u64 hist_field_var_ref(struct hist_field *hist_field,
struct hist_elt_data *elt_data;
u64 var_val = 0;
+ if (WARN_ON_ONCE(!elt))
+ return var_val;
+
elt_data = elt->private_data;
var_val = elt_data->var_ref_vals[hist_field->var_ref_idx];
diff --git a/kernel/trace/trace_kdb.c b/kernel/trace/trace_kdb.c
index 810d78a8d14c..2905a3dd94c1 100644
--- a/kernel/trace/trace_kdb.c
+++ b/kernel/trace/trace_kdb.c
@@ -41,12 +41,8 @@ static void ftrace_dump_buf(int skip_lines, long cpu_file)
kdb_printf("Dumping ftrace buffer:\n");
- /* reset all but tr, trace, and overruns */
- memset(&iter.seq, 0,
- sizeof(struct trace_iterator) -
- offsetof(struct trace_iterator, seq));
+ trace_iterator_reset(&iter);
iter.iter_flags |= TRACE_FILE_LAT_FMT;
- iter.pos = -1;
if (cpu_file == RING_BUFFER_ALL_CPUS) {
for_each_tracing_cpu(cpu) {