perf: Fix perf mmap bugs

Vince reported a problem found by his perf specific trinity
fuzzer.

Al noticed 2 problems with perf's mmap():

 - it has issues against fork() since we use vma->vm_mm for accounting.
 - it has an rb refcount leak on double mmap().

We fix the issues against fork() by using VM_DONTCOPY; I don't
think there's code out there that uses this; we didn't hear
about weird accounting problems/crashes. If we do need this to
work, the previously proposed VM_PINNED could make this work.

Aside from the rb reference leak spotted by Al, Vince's example
prog was indeed doing a double mmap() through the use of
perf_event_set_output().

This exposes another problem, since we now have 2 events with
one buffer, the accounting gets screwy because we account per
event. Fix this by making the buffer responsible for its own
accounting.

Reported-by: Vince Weaver <vincent.weaver@maine.edu>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
Link: http://lkml.kernel.org/r/20130528085548.GA12193@twins.programming.kicks-ass.net
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Peter Zijlstra 2013-05-28 10:55:48 +02:00 committed by Ingo Molnar
parent 7b959fc582
commit 26cb63ad11
3 changed files with 24 additions and 19 deletions

View file

@ -389,8 +389,7 @@ struct perf_event {
/* mmap bits */ /* mmap bits */
struct mutex mmap_mutex; struct mutex mmap_mutex;
atomic_t mmap_count; atomic_t mmap_count;
int mmap_locked;
struct user_struct *mmap_user;
struct ring_buffer *rb; struct ring_buffer *rb;
struct list_head rb_entry; struct list_head rb_entry;

View file

@ -2917,7 +2917,7 @@ static void free_event_rcu(struct rcu_head *head)
kfree(event); kfree(event);
} }
static void ring_buffer_put(struct ring_buffer *rb); static bool ring_buffer_put(struct ring_buffer *rb);
static void free_event(struct perf_event *event) static void free_event(struct perf_event *event)
{ {
@ -3582,13 +3582,13 @@ static struct ring_buffer *ring_buffer_get(struct perf_event *event)
return rb; return rb;
} }
static void ring_buffer_put(struct ring_buffer *rb) static bool ring_buffer_put(struct ring_buffer *rb)
{ {
struct perf_event *event, *n; struct perf_event *event, *n;
unsigned long flags; unsigned long flags;
if (!atomic_dec_and_test(&rb->refcount)) if (!atomic_dec_and_test(&rb->refcount))
return; return false;
spin_lock_irqsave(&rb->event_lock, flags); spin_lock_irqsave(&rb->event_lock, flags);
list_for_each_entry_safe(event, n, &rb->event_list, rb_entry) { list_for_each_entry_safe(event, n, &rb->event_list, rb_entry) {
@ -3598,6 +3598,7 @@ static void ring_buffer_put(struct ring_buffer *rb)
spin_unlock_irqrestore(&rb->event_lock, flags); spin_unlock_irqrestore(&rb->event_lock, flags);
call_rcu(&rb->rcu_head, rb_free_rcu); call_rcu(&rb->rcu_head, rb_free_rcu);
return true;
} }
static void perf_mmap_open(struct vm_area_struct *vma) static void perf_mmap_open(struct vm_area_struct *vma)
@ -3612,18 +3613,20 @@ static void perf_mmap_close(struct vm_area_struct *vma)
struct perf_event *event = vma->vm_file->private_data; struct perf_event *event = vma->vm_file->private_data;
if (atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) { if (atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) {
unsigned long size = perf_data_size(event->rb);
struct user_struct *user = event->mmap_user;
struct ring_buffer *rb = event->rb; struct ring_buffer *rb = event->rb;
struct user_struct *mmap_user = rb->mmap_user;
int mmap_locked = rb->mmap_locked;
unsigned long size = perf_data_size(rb);
atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm);
vma->vm_mm->pinned_vm -= event->mmap_locked;
rcu_assign_pointer(event->rb, NULL); rcu_assign_pointer(event->rb, NULL);
ring_buffer_detach(event, rb); ring_buffer_detach(event, rb);
mutex_unlock(&event->mmap_mutex); mutex_unlock(&event->mmap_mutex);
ring_buffer_put(rb); if (ring_buffer_put(rb)) {
free_uid(user); atomic_long_sub((size >> PAGE_SHIFT) + 1, &mmap_user->locked_vm);
vma->vm_mm->pinned_vm -= mmap_locked;
free_uid(mmap_user);
}
} }
} }
@ -3676,9 +3679,7 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
WARN_ON_ONCE(event->ctx->parent_ctx); WARN_ON_ONCE(event->ctx->parent_ctx);
mutex_lock(&event->mmap_mutex); mutex_lock(&event->mmap_mutex);
if (event->rb) { if (event->rb) {
if (event->rb->nr_pages == nr_pages) if (event->rb->nr_pages != nr_pages)
atomic_inc(&event->rb->refcount);
else
ret = -EINVAL; ret = -EINVAL;
goto unlock; goto unlock;
} }
@ -3720,12 +3721,14 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
ret = -ENOMEM; ret = -ENOMEM;
goto unlock; goto unlock;
} }
rcu_assign_pointer(event->rb, rb);
rb->mmap_locked = extra;
rb->mmap_user = get_current_user();
atomic_long_add(user_extra, &user->locked_vm); atomic_long_add(user_extra, &user->locked_vm);
event->mmap_locked = extra; vma->vm_mm->pinned_vm += extra;
event->mmap_user = get_current_user();
vma->vm_mm->pinned_vm += event->mmap_locked; rcu_assign_pointer(event->rb, rb);
perf_event_update_userpage(event); perf_event_update_userpage(event);
@ -3734,7 +3737,7 @@ unlock:
atomic_inc(&event->mmap_count); atomic_inc(&event->mmap_count);
mutex_unlock(&event->mmap_mutex); mutex_unlock(&event->mmap_mutex);
vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP;
vma->vm_ops = &perf_mmap_vmops; vma->vm_ops = &perf_mmap_vmops;
return ret; return ret;

View file

@ -31,6 +31,9 @@ struct ring_buffer {
spinlock_t event_lock; spinlock_t event_lock;
struct list_head event_list; struct list_head event_list;
int mmap_locked;
struct user_struct *mmap_user;
struct perf_event_mmap_page *user_page; struct perf_event_mmap_page *user_page;
void *data_pages[0]; void *data_pages[0];
}; };