1
0
Fork 0

mm: make pagevec_lookup() update index

Make pagevec_lookup() (and underlying find_get_pages()) update index to
the next page where iteration should continue.  Most callers want this
and also pagevec_lookup_tag() already does this.

Link: http://lkml.kernel.org/r/20170726114704.7626-3-jack@suse.cz
Signed-off-by: Jan Kara <jack@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
zero-colors
Jan Kara 2017-09-06 16:21:18 -07:00 committed by Linus Torvalds
parent 26b433d0da
commit d72dc8a25a
11 changed files with 32 additions and 37 deletions

View File

@ -1633,13 +1633,12 @@ void clean_bdev_aliases(struct block_device *bdev, sector_t block, sector_t len)
end = (block + len - 1) >> (PAGE_SHIFT - bd_inode->i_blkbits); end = (block + len - 1) >> (PAGE_SHIFT - bd_inode->i_blkbits);
pagevec_init(&pvec, 0); pagevec_init(&pvec, 0);
while (index <= end && pagevec_lookup(&pvec, bd_mapping, index, while (index <= end && pagevec_lookup(&pvec, bd_mapping, &index,
min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) { min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) {
for (i = 0; i < pagevec_count(&pvec); i++) { for (i = 0; i < pagevec_count(&pvec); i++) {
struct page *page = pvec.pages[i]; struct page *page = pvec.pages[i];
index = page->index; if (page->index > end)
if (index > end)
break; break;
if (!page_has_buffers(page)) if (!page_has_buffers(page))
continue; continue;
@ -1670,7 +1669,6 @@ unlock_page:
} }
pagevec_release(&pvec); pagevec_release(&pvec);
cond_resched(); cond_resched();
index++;
} }
} }
EXPORT_SYMBOL(clean_bdev_aliases); EXPORT_SYMBOL(clean_bdev_aliases);
@ -3552,7 +3550,8 @@ page_cache_seek_hole_data(struct inode *inode, loff_t offset, loff_t length,
unsigned want, nr_pages, i; unsigned want, nr_pages, i;
want = min_t(unsigned, end - index, PAGEVEC_SIZE); want = min_t(unsigned, end - index, PAGEVEC_SIZE);
nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index, want); nr_pages = pagevec_lookup(&pvec, inode->i_mapping, &index,
want);
if (nr_pages == 0) if (nr_pages == 0)
break; break;
@ -3594,7 +3593,6 @@ page_cache_seek_hole_data(struct inode *inode, loff_t offset, loff_t length,
if (nr_pages < want) if (nr_pages < want)
break; break;
index = pvec.pages[i - 1]->index + 1;
pagevec_release(&pvec); pagevec_release(&pvec);
} while (index < end); } while (index < end);

View File

@ -468,7 +468,7 @@ static int ext4_find_unwritten_pgoff(struct inode *inode,
unsigned long nr_pages; unsigned long nr_pages;
num = min_t(pgoff_t, end - index, PAGEVEC_SIZE - 1) + 1; num = min_t(pgoff_t, end - index, PAGEVEC_SIZE - 1) + 1;
nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index, nr_pages = pagevec_lookup(&pvec, inode->i_mapping, &index,
(pgoff_t)num); (pgoff_t)num);
if (nr_pages == 0) if (nr_pages == 0)
break; break;
@ -536,8 +536,6 @@ next:
/* The no. of pages is less than our desired, we are done. */ /* The no. of pages is less than our desired, we are done. */
if (nr_pages < num) if (nr_pages < num)
break; break;
index = pvec.pages[i - 1]->index + 1;
pagevec_release(&pvec); pagevec_release(&pvec);
} while (index <= end); } while (index <= end);

View File

@ -1720,7 +1720,7 @@ static void mpage_release_unused_pages(struct mpage_da_data *mpd,
pagevec_init(&pvec, 0); pagevec_init(&pvec, 0);
while (index <= end) { while (index <= end) {
nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE); nr_pages = pagevec_lookup(&pvec, mapping, &index, PAGEVEC_SIZE);
if (nr_pages == 0) if (nr_pages == 0)
break; break;
for (i = 0; i < nr_pages; i++) { for (i = 0; i < nr_pages; i++) {
@ -1737,7 +1737,6 @@ static void mpage_release_unused_pages(struct mpage_da_data *mpd,
} }
unlock_page(page); unlock_page(page);
} }
index = pvec.pages[nr_pages - 1]->index + 1;
pagevec_release(&pvec); pagevec_release(&pvec);
} }
} }
@ -2348,7 +2347,7 @@ static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd)
pagevec_init(&pvec, 0); pagevec_init(&pvec, 0);
while (start <= end) { while (start <= end) {
nr_pages = pagevec_lookup(&pvec, inode->i_mapping, start, nr_pages = pagevec_lookup(&pvec, inode->i_mapping, &start,
PAGEVEC_SIZE); PAGEVEC_SIZE);
if (nr_pages == 0) if (nr_pages == 0)
break; break;
@ -2357,8 +2356,6 @@ static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd)
if (page->index > end) if (page->index > end)
break; break;
/* Up to 'end' pages must be contiguous */
BUG_ON(page->index != start);
bh = head = page_buffers(page); bh = head = page_buffers(page);
do { do {
if (lblk < mpd->map.m_lblk) if (lblk < mpd->map.m_lblk)
@ -2403,7 +2400,6 @@ static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd)
pagevec_release(&pvec); pagevec_release(&pvec);
return err; return err;
} }
start++;
} }
pagevec_release(&pvec); pagevec_release(&pvec);
} }

View File

@ -1178,11 +1178,10 @@ void __fscache_uncache_all_inode_pages(struct fscache_cookie *cookie,
pagevec_init(&pvec, 0); pagevec_init(&pvec, 0);
next = 0; next = 0;
do { do {
if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) if (!pagevec_lookup(&pvec, mapping, &next, PAGEVEC_SIZE))
break; break;
for (i = 0; i < pagevec_count(&pvec); i++) { for (i = 0; i < pagevec_count(&pvec); i++) {
struct page *page = pvec.pages[i]; struct page *page = pvec.pages[i];
next = page->index;
if (PageFsCache(page)) { if (PageFsCache(page)) {
__fscache_wait_on_page_write(cookie, page); __fscache_wait_on_page_write(cookie, page);
__fscache_uncache_page(cookie, page); __fscache_uncache_page(cookie, page);
@ -1190,7 +1189,7 @@ void __fscache_uncache_all_inode_pages(struct fscache_cookie *cookie,
} }
pagevec_release(&pvec); pagevec_release(&pvec);
cond_resched(); cond_resched();
} while (++next); } while (next);
_leave(""); _leave("");
} }

View File

@ -401,7 +401,7 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
const pgoff_t end = lend >> huge_page_shift(h); const pgoff_t end = lend >> huge_page_shift(h);
struct vm_area_struct pseudo_vma; struct vm_area_struct pseudo_vma;
struct pagevec pvec; struct pagevec pvec;
pgoff_t next; pgoff_t next, index;
int i, freed = 0; int i, freed = 0;
long lookup_nr = PAGEVEC_SIZE; long lookup_nr = PAGEVEC_SIZE;
bool truncate_op = (lend == LLONG_MAX); bool truncate_op = (lend == LLONG_MAX);
@ -420,7 +420,7 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
/* /*
* When no more pages are found, we are done. * When no more pages are found, we are done.
*/ */
if (!pagevec_lookup(&pvec, mapping, next, lookup_nr)) if (!pagevec_lookup(&pvec, mapping, &next, lookup_nr))
break; break;
for (i = 0; i < pagevec_count(&pvec); ++i) { for (i = 0; i < pagevec_count(&pvec); ++i) {
@ -432,13 +432,13 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
* only possible in the punch hole case as end is * only possible in the punch hole case as end is
* max page offset in the truncate case. * max page offset in the truncate case.
*/ */
next = page->index; index = page->index;
if (next >= end) if (index >= end)
break; break;
hash = hugetlb_fault_mutex_hash(h, current->mm, hash = hugetlb_fault_mutex_hash(h, current->mm,
&pseudo_vma, &pseudo_vma,
mapping, next, 0); mapping, index, 0);
mutex_lock(&hugetlb_fault_mutex_table[hash]); mutex_lock(&hugetlb_fault_mutex_table[hash]);
/* /*
@ -455,8 +455,8 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
i_mmap_lock_write(mapping); i_mmap_lock_write(mapping);
hugetlb_vmdelete_list(&mapping->i_mmap, hugetlb_vmdelete_list(&mapping->i_mmap,
next * pages_per_huge_page(h), index * pages_per_huge_page(h),
(next + 1) * pages_per_huge_page(h)); (index + 1) * pages_per_huge_page(h));
i_mmap_unlock_write(mapping); i_mmap_unlock_write(mapping);
} }
@ -475,14 +475,13 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
freed++; freed++;
if (!truncate_op) { if (!truncate_op) {
if (unlikely(hugetlb_unreserve_pages(inode, if (unlikely(hugetlb_unreserve_pages(inode,
next, next + 1, 1))) index, index + 1, 1)))
hugetlb_fix_reserve_counts(inode); hugetlb_fix_reserve_counts(inode);
} }
unlock_page(page); unlock_page(page);
mutex_unlock(&hugetlb_fault_mutex_table[hash]); mutex_unlock(&hugetlb_fault_mutex_table[hash]);
} }
++next;
huge_pagevec_release(&pvec); huge_pagevec_release(&pvec);
cond_resched(); cond_resched();
} }

View File

@ -312,10 +312,9 @@ void nilfs_copy_back_pages(struct address_space *dmap,
pagevec_init(&pvec, 0); pagevec_init(&pvec, 0);
repeat: repeat:
n = pagevec_lookup(&pvec, smap, index, PAGEVEC_SIZE); n = pagevec_lookup(&pvec, smap, &index, PAGEVEC_SIZE);
if (!n) if (!n)
return; return;
index = pvec.pages[n - 1]->index + 1;
for (i = 0; i < pagevec_count(&pvec); i++) { for (i = 0; i < pagevec_count(&pvec); i++) {
struct page *page = pvec.pages[i], *dpage; struct page *page = pvec.pages[i], *dpage;

View File

@ -228,7 +228,7 @@ static unsigned long ramfs_nommu_get_unmapped_area(struct file *file,
if (!pages) if (!pages)
goto out_free; goto out_free;
nr = find_get_pages(inode->i_mapping, pgoff, lpages, pages); nr = find_get_pages(inode->i_mapping, &pgoff, lpages, pages);
if (nr != lpages) if (nr != lpages)
goto out_free_pages; /* leave if some pages were missing */ goto out_free_pages; /* leave if some pages were missing */

View File

@ -353,7 +353,7 @@ struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset);
unsigned find_get_entries(struct address_space *mapping, pgoff_t start, unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
unsigned int nr_entries, struct page **entries, unsigned int nr_entries, struct page **entries,
pgoff_t *indices); pgoff_t *indices);
unsigned find_get_pages(struct address_space *mapping, pgoff_t start, unsigned find_get_pages(struct address_space *mapping, pgoff_t *start,
unsigned int nr_pages, struct page **pages); unsigned int nr_pages, struct page **pages);
unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start, unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
unsigned int nr_pages, struct page **pages); unsigned int nr_pages, struct page **pages);

View File

@ -28,7 +28,7 @@ unsigned pagevec_lookup_entries(struct pagevec *pvec,
pgoff_t *indices); pgoff_t *indices);
void pagevec_remove_exceptionals(struct pagevec *pvec); void pagevec_remove_exceptionals(struct pagevec *pvec);
unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping, unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping,
pgoff_t start, unsigned nr_pages); pgoff_t *start, unsigned nr_pages);
unsigned pagevec_lookup_tag(struct pagevec *pvec, unsigned pagevec_lookup_tag(struct pagevec *pvec,
struct address_space *mapping, pgoff_t *index, int tag, struct address_space *mapping, pgoff_t *index, int tag,
unsigned nr_pages); unsigned nr_pages);

View File

@ -403,7 +403,7 @@ bool filemap_range_has_page(struct address_space *mapping,
return false; return false;
pagevec_init(&pvec, 0); pagevec_init(&pvec, 0);
if (!pagevec_lookup(&pvec, mapping, index, 1)) if (!pagevec_lookup(&pvec, mapping, &index, 1))
return false; return false;
ret = (pvec.pages[0]->index <= end); ret = (pvec.pages[0]->index <= end);
pagevec_release(&pvec); pagevec_release(&pvec);
@ -1569,10 +1569,11 @@ export:
* *
* The search returns a group of mapping-contiguous pages with ascending * The search returns a group of mapping-contiguous pages with ascending
* indexes. There may be holes in the indices due to not-present pages. * indexes. There may be holes in the indices due to not-present pages.
* We also update @start to index the next page for the traversal.
* *
* find_get_pages() returns the number of pages which were found. * find_get_pages() returns the number of pages which were found.
*/ */
unsigned find_get_pages(struct address_space *mapping, pgoff_t start, unsigned find_get_pages(struct address_space *mapping, pgoff_t *start,
unsigned int nr_pages, struct page **pages) unsigned int nr_pages, struct page **pages)
{ {
struct radix_tree_iter iter; struct radix_tree_iter iter;
@ -1583,7 +1584,7 @@ unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
return 0; return 0;
rcu_read_lock(); rcu_read_lock();
radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) { radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, *start) {
struct page *head, *page; struct page *head, *page;
repeat: repeat:
page = radix_tree_deref_slot(slot); page = radix_tree_deref_slot(slot);
@ -1625,6 +1626,10 @@ repeat:
} }
rcu_read_unlock(); rcu_read_unlock();
if (ret)
*start = pages[ret - 1]->index + 1;
return ret; return ret;
} }

View File

@ -957,12 +957,13 @@ void pagevec_remove_exceptionals(struct pagevec *pvec)
* reference against the pages in @pvec. * reference against the pages in @pvec.
* *
* The search returns a group of mapping-contiguous pages with ascending * The search returns a group of mapping-contiguous pages with ascending
* indexes. There may be holes in the indices due to not-present pages. * indexes. There may be holes in the indices due to not-present pages. We
* also update @start to index the next page for the traversal.
* *
* pagevec_lookup() returns the number of pages which were found. * pagevec_lookup() returns the number of pages which were found.
*/ */
unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping, unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping,
pgoff_t start, unsigned nr_pages) pgoff_t *start, unsigned nr_pages)
{ {
pvec->nr = find_get_pages(mapping, start, nr_pages, pvec->pages); pvec->nr = find_get_pages(mapping, start, nr_pages, pvec->pages);
return pagevec_count(pvec); return pagevec_count(pvec);