1
0
Fork 0

splice: use get_user_pages_fast

Use get_user_pages_fast in splice.  This reverts some mmap_sem batching
there, however the biggest problem with mmap_sem tends to be hold times
blocking out other threads rather than cacheline bouncing.  Further: on
architectures that implement get_user_pages_fast without locks, mmap_sem
can be avoided completely anyway.

Signed-off-by: Nick Piggin <npiggin@suse.de>
Cc: Dave Kleikamp <shaggy@austin.ibm.com>
Cc: Andy Whitcroft <apw@shadowen.org>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Dave Kleikamp <shaggy@austin.ibm.com>
Cc: Badari Pulavarty <pbadari@us.ibm.com>
Cc: Zach Brown <zach.brown@oracle.com>
Cc: Jens Axboe <jens.axboe@oracle.com>
Reviewed-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
hifive-unleashed-5.1
Nick Piggin 2008-07-25 19:45:26 -07:00 committed by Linus Torvalds
parent f5dd33c494
commit bc40d73c95
1 changed files with 3 additions and 38 deletions

View File

@ -1160,36 +1160,6 @@ static long do_splice(struct file *in, loff_t __user *off_in,
return -EINVAL;
}
/*
* Do a copy-from-user while holding the mmap_semaphore for reading, in a
* manner safe from deadlocking with simultaneous mmap() (grabbing mmap_sem
* for writing) and page faulting on the user memory pointed to by src.
* This assumes that we will very rarely hit the partial != 0 path, or this
* will not be a win.
*/
static int copy_from_user_mmap_sem(void *dst, const void __user *src, size_t n)
{
int partial;
if (!access_ok(VERIFY_READ, src, n))
return -EFAULT;
pagefault_disable();
partial = __copy_from_user_inatomic(dst, src, n);
pagefault_enable();
/*
* Didn't copy everything, drop the mmap_sem and do a faulting copy
*/
if (unlikely(partial)) {
up_read(&current->mm->mmap_sem);
partial = copy_from_user(dst, src, n);
down_read(&current->mm->mmap_sem);
}
return partial;
}
/*
* Map an iov into an array of pages and offset/length tupples. With the
* partial_page structure, we can map several non-contiguous ranges into
@ -1203,8 +1173,6 @@ static int get_iovec_page_array(const struct iovec __user *iov,
{
int buffers = 0, error = 0;
down_read(&current->mm->mmap_sem);
while (nr_vecs) {
unsigned long off, npages;
struct iovec entry;
@ -1213,7 +1181,7 @@ static int get_iovec_page_array(const struct iovec __user *iov,
int i;
error = -EFAULT;
if (copy_from_user_mmap_sem(&entry, iov, sizeof(entry)))
if (copy_from_user(&entry, iov, sizeof(entry)))
break;
base = entry.iov_base;
@ -1247,9 +1215,8 @@ static int get_iovec_page_array(const struct iovec __user *iov,
if (npages > PIPE_BUFFERS - buffers)
npages = PIPE_BUFFERS - buffers;
error = get_user_pages(current, current->mm,
(unsigned long) base, npages, 0, 0,
&pages[buffers], NULL);
error = get_user_pages_fast((unsigned long)base, npages,
0, &pages[buffers]);
if (unlikely(error <= 0))
break;
@ -1288,8 +1255,6 @@ static int get_iovec_page_array(const struct iovec __user *iov,
iov++;
}
up_read(&current->mm->mmap_sem);
if (buffers)
return buffers;