1
0
Fork 0

page cache: Convert find_get_entries_tag to XArray

Slightly shorter and simpler code.

Signed-off-by: Matthew Wilcox <willy@infradead.org>
hifive-unleashed-5.1
Matthew Wilcox 2018-05-16 23:56:04 -04:00
parent a6906972fe
commit c1901cd33c
2 changed files with 25 additions and 31 deletions

View File

@ -373,7 +373,7 @@ static inline unsigned find_get_pages_tag(struct address_space *mapping,
nr_pages, pages); nr_pages, pages);
} }
unsigned find_get_entries_tag(struct address_space *mapping, pgoff_t start, unsigned find_get_entries_tag(struct address_space *mapping, pgoff_t start,
int tag, unsigned int nr_entries, xa_mark_t tag, unsigned int nr_entries,
struct page **entries, pgoff_t *indices); struct page **entries, pgoff_t *indices);
struct page *grab_cache_page_write_begin(struct address_space *mapping, struct page *grab_cache_page_write_begin(struct address_space *mapping,

View File

@ -1866,57 +1866,51 @@ EXPORT_SYMBOL(find_get_pages_range_tag);
* @tag. * @tag.
*/ */
unsigned find_get_entries_tag(struct address_space *mapping, pgoff_t start, unsigned find_get_entries_tag(struct address_space *mapping, pgoff_t start,
int tag, unsigned int nr_entries, xa_mark_t tag, unsigned int nr_entries,
struct page **entries, pgoff_t *indices) struct page **entries, pgoff_t *indices)
{ {
void **slot; XA_STATE(xas, &mapping->i_pages, start);
struct page *page;
unsigned int ret = 0; unsigned int ret = 0;
struct radix_tree_iter iter;
if (!nr_entries) if (!nr_entries)
return 0; return 0;
rcu_read_lock(); rcu_read_lock();
radix_tree_for_each_tagged(slot, &mapping->i_pages, &iter, start, tag) { xas_for_each_marked(&xas, page, ULONG_MAX, tag) {
struct page *head, *page; struct page *head;
repeat: if (xas_retry(&xas, page))
page = radix_tree_deref_slot(slot);
if (unlikely(!page))
continue; continue;
if (radix_tree_exception(page)) { /*
if (radix_tree_deref_retry(page)) { * A shadow entry of a recently evicted page, a swap
slot = radix_tree_iter_retry(&iter); * entry from shmem/tmpfs or a DAX entry. Return it
continue; * without attempting to raise page count.
} */
if (xa_is_value(page))
/*
* A shadow entry of a recently evicted page, a swap
* entry from shmem/tmpfs or a DAX entry. Return it
* without attempting to raise page count.
*/
goto export; goto export;
}
head = compound_head(page); head = compound_head(page);
if (!page_cache_get_speculative(head)) if (!page_cache_get_speculative(head))
goto repeat; goto retry;
/* The page was split under us? */ /* The page was split under us? */
if (compound_head(page) != head) { if (compound_head(page) != head)
put_page(head); goto put_page;
goto repeat;
}
/* Has the page moved? */ /* Has the page moved? */
if (unlikely(page != *slot)) { if (unlikely(page != xas_reload(&xas)))
put_page(head); goto put_page;
goto repeat;
}
export: export:
indices[ret] = iter.index; indices[ret] = xas.xa_index;
entries[ret] = page; entries[ret] = page;
if (++ret == nr_entries) if (++ret == nr_entries)
break; break;
continue;
put_page:
put_page(head);
retry:
xas_reset(&xas);
} }
rcu_read_unlock(); rcu_read_unlock();
return ret; return ret;