1
0
Fork 0

perf: Robustify event->owner usage and SMP ordering

Use smp_store_release() to clear event->owner and
lockless_dereference() to observe it. Further use READ_ONCE() for all
lockless reads.

This changes perf_remove_from_owner() to leave event->owner cleared.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vince Weaver <vincent.weaver@maine.edu>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
hifive-unleashed-5.1
Peter Zijlstra 2016-01-26 12:30:14 +01:00 committed by Ingo Molnar
parent 6e801e0169
commit f47c02c0c8
1 changed files with 10 additions and 10 deletions

View File

@ -152,7 +152,7 @@ static void perf_ctx_unlock(struct perf_cpu_context *cpuctx,
static bool is_kernel_event(struct perf_event *event) static bool is_kernel_event(struct perf_event *event)
{ {
return event->owner == TASK_TOMBSTONE; return READ_ONCE(event->owner) == TASK_TOMBSTONE;
} }
/* /*
@ -1651,7 +1651,7 @@ out:
*/ */
static bool is_orphaned_event(struct perf_event *event) static bool is_orphaned_event(struct perf_event *event)
{ {
return event && !is_kernel_event(event) && !event->owner; return event && !is_kernel_event(event) && !READ_ONCE(event->owner);
} }
/* /*
@ -3733,14 +3733,13 @@ static void perf_remove_from_owner(struct perf_event *event)
struct task_struct *owner; struct task_struct *owner;
rcu_read_lock(); rcu_read_lock();
owner = ACCESS_ONCE(event->owner);
/* /*
* Matches the smp_wmb() in perf_event_exit_task(). If we observe * Matches the smp_store_release() in perf_event_exit_task(). If we
* !owner it means the list deletion is complete and we can indeed * observe !owner it means the list deletion is complete and we can
* free this event, otherwise we need to serialize on * indeed free this event, otherwise we need to serialize on
* owner->perf_event_mutex. * owner->perf_event_mutex.
*/ */
smp_read_barrier_depends(); owner = lockless_dereference(event->owner);
if (owner) { if (owner) {
/* /*
* Since delayed_put_task_struct() also drops the last * Since delayed_put_task_struct() also drops the last
@ -3768,8 +3767,10 @@ static void perf_remove_from_owner(struct perf_event *event)
* ensured they're done, and we can proceed with freeing the * ensured they're done, and we can proceed with freeing the
* event. * event.
*/ */
if (event->owner) if (event->owner) {
list_del_init(&event->owner_entry); list_del_init(&event->owner_entry);
smp_store_release(&event->owner, NULL);
}
mutex_unlock(&owner->perf_event_mutex); mutex_unlock(&owner->perf_event_mutex);
put_task_struct(owner); put_task_struct(owner);
} }
@ -8829,8 +8830,7 @@ void perf_event_exit_task(struct task_struct *child)
* the owner, closes a race against perf_release() where * the owner, closes a race against perf_release() where
* we need to serialize on the owner->perf_event_mutex. * we need to serialize on the owner->perf_event_mutex.
*/ */
smp_wmb(); smp_store_release(&event->owner, NULL);
event->owner = NULL;
} }
mutex_unlock(&child->perf_event_mutex); mutex_unlock(&child->perf_event_mutex);