From 77000bc43da17d5d6bc4ebfaf44d52d43bb69492 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 4 Feb 2019 16:31:04 +0100 Subject: [PATCH 1/2] uio: remove the unused iov_for_each macro Signed-off-by: Christoph Hellwig Signed-off-by: Al Viro --- .clang-format | 1 - include/linux/uio.h | 8 -------- 2 files changed, 9 deletions(-) diff --git a/.clang-format b/.clang-format index e6080f5834a3..c144d9c24d5d 100644 --- a/.clang-format +++ b/.clang-format @@ -259,7 +259,6 @@ ForEachMacros: - 'idr_for_each_entry_ul' - 'inet_bind_bucket_for_each' - 'inet_lhash2_for_each_icsk_rcu' - - 'iov_for_each' - 'key_for_each' - 'key_for_each_safe' - 'klp_for_each_func' diff --git a/include/linux/uio.h b/include/linux/uio.h index ecf584f6b82d..87477e1640f9 100644 --- a/include/linux/uio.h +++ b/include/linux/uio.h @@ -110,14 +110,6 @@ static inline struct iovec iov_iter_iovec(const struct iov_iter *iter) }; } -#define iov_for_each(iov, iter, start) \ - if (iov_iter_type(start) == ITER_IOVEC || \ - iov_iter_type(start) == ITER_KVEC) \ - for (iter = (start); \ - (iter).count && \ - ((iov = iov_iter_iovec(&(iter))), 1); \ - iov_iter_advance(&(iter), (iov).iov_len)) - size_t iov_iter_copy_from_user_atomic(struct page *page, struct iov_iter *i, unsigned long offset, size_t bytes); void iov_iter_advance(struct iov_iter *i, size_t bytes); From 6daef95b8c914866a46247232a048447fff97279 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Tue, 26 Feb 2019 10:42:39 -0800 Subject: [PATCH 2/2] iov_iter: optimize page_copy_sane() Avoid cache line miss dereferencing struct page if we can. page_copy_sane() mostly deals with order-0 pages. Extra cache line miss is visible on TCP recvmsg() calls dealing with GRO packets (typically 45 page frags are attached to one skb). Bringing the 45 struct pages into cpu cache while copying the data is not free, since the freeing of the skb (and associated page frags put_page()) can happen after cache lines have been evicted. Signed-off-by: Eric Dumazet Cc: Al Viro Signed-off-by: Al Viro --- lib/iov_iter.c | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/lib/iov_iter.c b/lib/iov_iter.c index be4bd627caf0..ea36dc355da1 100644 --- a/lib/iov_iter.c +++ b/lib/iov_iter.c @@ -861,8 +861,21 @@ EXPORT_SYMBOL(_copy_from_iter_full_nocache); static inline bool page_copy_sane(struct page *page, size_t offset, size_t n) { - struct page *head = compound_head(page); - size_t v = n + offset + page_address(page) - page_address(head); + struct page *head; + size_t v = n + offset; + + /* + * The general case needs to access the page order in order + * to compute the page size. + * However, we mostly deal with order-0 pages and thus can + * avoid a possible cache line miss for requests that fit all + * page orders. + */ + if (n <= v && v <= PAGE_SIZE) + return true; + + head = compound_head(page); + v += (page - head) << PAGE_SHIFT; if (likely(n <= v && v <= (PAGE_SIZE << compound_order(head)))) return true;