1
0
Fork 0

drm: Drop dev->event_lock spinlock around faulting copy_to_user()

In

commit cdd1cf799b
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date:   Thu Dec 4 21:03:25 2014 +0000

    drm: Make drm_read() more robust against multithreaded races

I fixed the races by serialising the use of the event by extending the
dev->event_lock. However, as Thomas pointed out, the copy_to_user() may
fault (even the __copy_to_user_inatomic() variant used here) and calling
into the driver backend with the spinlock held is bad news. Therefore we
have to drop the spinlock before the copy, but that exposes us to the
old race whereby a second reader could see an out-of-order event (as the
first reader may claim the first request but fail to copy it back to
userspace and so on returning it to the event list it will be behind the
current event being copied by the second reader).

Reported-by: Thomas Hellstrom <thellstrom@vmware.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Thomas Hellstrom <thellstrom@vmware.com>
Cc: Takashi Iwai <tiwai@suse.de>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: http://patchwork.freedesktop.org/patch/msgid/1448462343-2072-1-git-send-email-chris@chris-wilson.co.uk
Reviewed-by: Thomas Hellstrom <thellstrom@vmware.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
hifive-unleashed-5.1
Chris Wilson 2015-11-25 14:39:02 +00:00 committed by Daniel Vetter
parent 87069f4493
commit 83eb64c85b
1 changed files with 26 additions and 18 deletions

View File

@ -488,9 +488,19 @@ ssize_t drm_read(struct file *filp, char __user *buffer,
if (!access_ok(VERIFY_WRITE, buffer, count))
return -EFAULT;
spin_lock_irq(&dev->event_lock);
for (;;) {
if (list_empty(&file_priv->event_list)) {
struct drm_pending_event *e = NULL;
spin_lock_irq(&dev->event_lock);
if (!list_empty(&file_priv->event_list)) {
e = list_first_entry(&file_priv->event_list,
struct drm_pending_event, link);
file_priv->event_space += e->event->length;
list_del(&e->link);
}
spin_unlock_irq(&dev->event_lock);
if (e == NULL) {
if (ret)
break;
@ -499,36 +509,34 @@ ssize_t drm_read(struct file *filp, char __user *buffer,
break;
}
spin_unlock_irq(&dev->event_lock);
ret = wait_event_interruptible(file_priv->event_wait,
!list_empty(&file_priv->event_list));
spin_lock_irq(&dev->event_lock);
if (ret < 0)
break;
ret = 0;
} else {
struct drm_pending_event *e;
unsigned length = e->event->length;
e = list_first_entry(&file_priv->event_list,
struct drm_pending_event, link);
if (e->event->length + ret > count)
break;
if (__copy_to_user_inatomic(buffer + ret,
e->event, e->event->length)) {
if (ret == 0)
ret = -EFAULT;
if (length > count - ret) {
put_back_event:
spin_lock_irq(&dev->event_lock);
file_priv->event_space -= length;
list_add(&e->link, &file_priv->event_list);
spin_unlock_irq(&dev->event_lock);
break;
}
file_priv->event_space += e->event->length;
ret += e->event->length;
list_del(&e->link);
if (copy_to_user(buffer + ret, e->event, length)) {
if (ret == 0)
ret = -EFAULT;
goto put_back_event;
}
ret += length;
e->destroy(e);
}
}
spin_unlock_irq(&dev->event_lock);
return ret;
}