From c5ed5145591774bd9a2960ba4ca45a02fc70aad1 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 17 Jan 2011 13:45:37 +0100 Subject: perf: Fix contexted inheritance Linus reported that the RCU lockdep annotation bits triggered for this rcu_dereference() because we're not holding rcu_read_lock(). Going over the code I cannot convince myself its correct: - holding a ref on the parent_ctx, doesn't avoid it being uncloned concurrently (as the comment says), so we can race with a free. - holding parent_ctx->mutex doesn't avoid the above free from taking place either, it would at best avoid parent_ctx from being freed. I.e. the warning is correct. To fix the bug, serialize against the unclone_ctx() call by extending the reach of the parent_ctx->lock. Reported-by: Linus Torvalds Signed-off-by: Peter Zijlstra Cc: Paul Mackerras Cc: Paul E. McKenney LKML-Reference: Signed-off-by: Ingo Molnar --- kernel/perf_event.c | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) (limited to 'kernel') diff --git a/kernel/perf_event.c b/kernel/perf_event.c index b782b7a79f00..76be4c7bf08e 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -6494,7 +6494,6 @@ int perf_event_init_context(struct task_struct *child, int ctxn) raw_spin_lock_irqsave(&parent_ctx->lock, flags); parent_ctx->rotate_disable = 0; - raw_spin_unlock_irqrestore(&parent_ctx->lock, flags); child_ctx = child->perf_event_ctxp[ctxn]; @@ -6502,12 +6501,11 @@ int perf_event_init_context(struct task_struct *child, int ctxn) /* * Mark the child context as a clone of the parent * context, or of whatever the parent is a clone of. - * Note that if the parent is a clone, it could get - * uncloned at any point, but that doesn't matter - * because the list of events and the generation - * count can't have changed since we took the mutex. + * + * Note that if the parent is a clone, the holding of + * parent_ctx->lock avoids it from being uncloned. */ - cloned_ctx = rcu_dereference(parent_ctx->parent_ctx); + cloned_ctx = parent_ctx->parent_ctx; if (cloned_ctx) { child_ctx->parent_ctx = cloned_ctx; child_ctx->parent_gen = parent_ctx->parent_gen; @@ -6518,6 +6516,7 @@ int perf_event_init_context(struct task_struct *child, int ctxn) get_ctx(child_ctx->parent_ctx); } + raw_spin_unlock_irqrestore(&parent_ctx->lock, flags); mutex_unlock(&parent_ctx->mutex); perf_unpin_context(parent_ctx); -- cgit v1.2.3 From 22a4ec729017ba613337a28f306f94ba5023fe2e Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Tue, 18 Jan 2011 17:10:08 +0100 Subject: perf: Find_get_context: fix the per-cpu-counter check If task == NULL, find_get_context() should always check that cpu is correct. Afaics, the bug was introduced by 38a81da2 "perf events: Clean up pid passing", but even before that commit "&& cpu != -1" was not exactly right, -ESRCH from find_task_by_vpid() is not accurate. Signed-off-by: Oleg Nesterov Acked-by: Peter Zijlstra Cc: Alan Stern Cc: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker Cc: Paul Mackerras Cc: Prasad Cc: Roland McGrath Cc: gregkh@suse.de Cc: stable@kernel.org LKML-Reference: <20110118161008.GB693@redhat.com> Signed-off-by: Ingo Molnar --- kernel/perf_event.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 76be4c7bf08e..a962b1962eee 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -2228,7 +2228,7 @@ find_get_context(struct pmu *pmu, struct task_struct *task, int cpu) unsigned long flags; int ctxn, err; - if (!task && cpu != -1) { + if (!task) { /* Must be root to operate on a CPU event: */ if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN)) return ERR_PTR(-EACCES); -- cgit v1.2.3 From 66832eb4baaaa9abe4c993ddf9113a79e39b9915 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Tue, 18 Jan 2011 17:10:32 +0100 Subject: perf: Validate cpu early in perf_event_alloc() Starting from perf_event_alloc()->perf_init_event(), the kernel assumes that event->cpu is either -1 or the valid CPU number. Change perf_event_alloc() to validate this argument early. This also means we can remove the similar check in find_get_context(). Signed-off-by: Oleg Nesterov Acked-by: Peter Zijlstra Cc: Alan Stern Cc: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker Cc: Paul Mackerras Cc: Prasad Cc: Roland McGrath Cc: gregkh@suse.de Cc: stable@kernel.org LKML-Reference: <20110118161032.GC693@redhat.com> Signed-off-by: Ingo Molnar --- kernel/perf_event.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) (limited to 'kernel') diff --git a/kernel/perf_event.c b/kernel/perf_event.c index a962b1962eee..67d9bd70b746 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -2233,9 +2233,6 @@ find_get_context(struct pmu *pmu, struct task_struct *task, int cpu) if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN)) return ERR_PTR(-EACCES); - if (cpu < 0 || cpu >= nr_cpumask_bits) - return ERR_PTR(-EINVAL); - /* * We could be clever and allow to attach a event to an * offline CPU and activate it when the CPU comes up, but @@ -5541,6 +5538,11 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu, struct hw_perf_event *hwc; long err; + if ((unsigned)cpu >= nr_cpu_ids) { + if (!task || cpu != -1) + return ERR_PTR(-EINVAL); + } + event = kzalloc(sizeof(*event), GFP_KERNEL); if (!event) return ERR_PTR(-ENOMEM); @@ -5589,7 +5591,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu, if (!overflow_handler && parent_event) overflow_handler = parent_event->overflow_handler; - + event->overflow_handler = overflow_handler; if (attr->disabled) -- cgit v1.2.3