mm/xip: share the i_mmap_rwsem

__xip_unmap() will remove the xip sparse page from the cache and take down
pte mapping, without altering the interval tree, thus share the
i_mmap_rwsem when searching for the ptes to unmap.

Additionally, tidy up the function a bit and make variables only local to
the interval tree walk loop.

Signed-off-by: Davidlohr Bueso <dbueso@suse.de>
Acked-by: "Kirill A. Shutemov" <kirill@shutemov.name>
Acked-by: Hugh Dickins <hughd@google.com>
Cc: Oleg Nesterov <oleg@redhat.com>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
Acked-by: Mel Gorman <mgorman@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Davidlohr Bueso 2014-12-12 16:54:33 -08:00 committed by Linus Torvalds
parent 4a23717a23
commit 874bfcaf79

View file

@ -155,22 +155,14 @@ xip_file_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos)
EXPORT_SYMBOL_GPL(xip_file_read); EXPORT_SYMBOL_GPL(xip_file_read);
/* /*
* __xip_unmap is invoked from xip_unmap and * __xip_unmap is invoked from xip_unmap and xip_write
* xip_write
* *
* This function walks all vmas of the address_space and unmaps the * This function walks all vmas of the address_space and unmaps the
* __xip_sparse_page when found at pgoff. * __xip_sparse_page when found at pgoff.
*/ */
static void static void __xip_unmap(struct address_space * mapping, unsigned long pgoff)
__xip_unmap (struct address_space * mapping,
unsigned long pgoff)
{ {
struct vm_area_struct *vma; struct vm_area_struct *vma;
struct mm_struct *mm;
unsigned long address;
pte_t *pte;
pte_t pteval;
spinlock_t *ptl;
struct page *page; struct page *page;
unsigned count; unsigned count;
int locked = 0; int locked = 0;
@ -182,11 +174,14 @@ __xip_unmap (struct address_space * mapping,
return; return;
retry: retry:
i_mmap_lock_write(mapping); i_mmap_lock_read(mapping);
vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
mm = vma->vm_mm; pte_t *pte, pteval;
address = vma->vm_start + spinlock_t *ptl;
struct mm_struct *mm = vma->vm_mm;
unsigned long address = vma->vm_start +
((pgoff - vma->vm_pgoff) << PAGE_SHIFT); ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
BUG_ON(address < vma->vm_start || address >= vma->vm_end); BUG_ON(address < vma->vm_start || address >= vma->vm_end);
pte = page_check_address(page, mm, address, &ptl, 1); pte = page_check_address(page, mm, address, &ptl, 1);
if (pte) { if (pte) {
@ -202,7 +197,7 @@ retry:
page_cache_release(page); page_cache_release(page);
} }
} }
i_mmap_unlock_write(mapping); i_mmap_unlock_read(mapping);
if (locked) { if (locked) {
mutex_unlock(&xip_sparse_mutex); mutex_unlock(&xip_sparse_mutex);