1
0
Fork 0

mm: Convert truncate to XArray

This is essentially xa_cmpxchg() with the locking handled above us,
and it doesn't have to handle replacing a NULL entry.

Signed-off-by: Matthew Wilcox <willy@infradead.org>
hifive-unleashed-5.1
Matthew Wilcox 2017-11-25 22:52:46 -05:00
parent a97e7904c0
commit 69b6c1319b
1 changed files with 6 additions and 9 deletions

View File

@ -33,15 +33,12 @@
static inline void __clear_shadow_entry(struct address_space *mapping, static inline void __clear_shadow_entry(struct address_space *mapping,
pgoff_t index, void *entry) pgoff_t index, void *entry)
{ {
struct radix_tree_node *node; XA_STATE(xas, &mapping->i_pages, index);
void **slot;
if (!__radix_tree_lookup(&mapping->i_pages, index, &node, &slot)) xas_set_update(&xas, workingset_update_node);
if (xas_load(&xas) != entry)
return; return;
if (*slot != entry) xas_store(&xas, NULL);
return;
__radix_tree_replace(&mapping->i_pages, node, slot, NULL,
workingset_update_node);
mapping->nrexceptional--; mapping->nrexceptional--;
} }
@ -738,10 +735,10 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
index++; index++;
} }
/* /*
* For DAX we invalidate page tables after invalidating radix tree. We * For DAX we invalidate page tables after invalidating page cache. We
* could invalidate page tables while invalidating each entry however * could invalidate page tables while invalidating each entry however
* that would be expensive. And doing range unmapping before doesn't * that would be expensive. And doing range unmapping before doesn't
* work as we have no cheap way to find whether radix tree entry didn't * work as we have no cheap way to find whether page cache entry didn't
* get remapped later. * get remapped later.
*/ */
if (dax_mapping(mapping)) { if (dax_mapping(mapping)) {