1
0
Fork 0

dax: Allow dax_iomap_fault() to return pfn

For synchronous page fault dax_iomap_fault() will need to return PFN
which will then need to be inserted into page tables after fsync()
completes. Add necessary parameter to dax_iomap_fault().

Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Ross Zwisler <ross.zwisler@linux.intel.com>
Signed-off-by: Jan Kara <jack@suse.cz>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
hifive-unleashed-5.1
Jan Kara 2017-11-01 16:36:39 +01:00 committed by Dan Williams
parent cec04e8c82
commit 9a0dd42251
5 changed files with 12 additions and 11 deletions

View File

@ -1079,7 +1079,7 @@ static int dax_fault_return(int error)
return VM_FAULT_SIGBUS;
}
static int dax_iomap_pte_fault(struct vm_fault *vmf,
static int dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
const struct iomap_ops *ops)
{
struct vm_area_struct *vma = vmf->vma;
@ -1280,7 +1280,7 @@ fallback:
return VM_FAULT_FALLBACK;
}
static int dax_iomap_pmd_fault(struct vm_fault *vmf,
static int dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
const struct iomap_ops *ops)
{
struct vm_area_struct *vma = vmf->vma;
@ -1425,7 +1425,7 @@ out:
return result;
}
#else
static int dax_iomap_pmd_fault(struct vm_fault *vmf,
static int dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
const struct iomap_ops *ops)
{
return VM_FAULT_FALLBACK;
@ -1436,6 +1436,7 @@ static int dax_iomap_pmd_fault(struct vm_fault *vmf,
* dax_iomap_fault - handle a page fault on a DAX file
* @vmf: The description of the fault
* @pe_size: Size of the page to fault in
* @pfnp: PFN to insert for synchronous faults if fsync is required
* @ops: Iomap ops passed from the file system
*
* When a page fault occurs, filesystems may call this helper in
@ -1444,13 +1445,13 @@ static int dax_iomap_pmd_fault(struct vm_fault *vmf,
* successfully.
*/
int dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
const struct iomap_ops *ops)
pfn_t *pfnp, const struct iomap_ops *ops)
{
switch (pe_size) {
case PE_SIZE_PTE:
return dax_iomap_pte_fault(vmf, ops);
return dax_iomap_pte_fault(vmf, pfnp, ops);
case PE_SIZE_PMD:
return dax_iomap_pmd_fault(vmf, ops);
return dax_iomap_pmd_fault(vmf, pfnp, ops);
default:
return VM_FAULT_FALLBACK;
}

View File

@ -99,7 +99,7 @@ static int ext2_dax_fault(struct vm_fault *vmf)
}
down_read(&ei->dax_sem);
ret = dax_iomap_fault(vmf, PE_SIZE_PTE, &ext2_iomap_ops);
ret = dax_iomap_fault(vmf, PE_SIZE_PTE, NULL, &ext2_iomap_ops);
up_read(&ei->dax_sem);
if (vmf->flags & FAULT_FLAG_WRITE)

View File

@ -306,7 +306,7 @@ static int ext4_dax_huge_fault(struct vm_fault *vmf,
down_read(&EXT4_I(inode)->i_mmap_sem);
}
if (!IS_ERR(handle))
result = dax_iomap_fault(vmf, pe_size, &ext4_iomap_ops);
result = dax_iomap_fault(vmf, pe_size, NULL, &ext4_iomap_ops);
else
result = VM_FAULT_SIGBUS;
if (write) {

View File

@ -1040,7 +1040,7 @@ __xfs_filemap_fault(
xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
if (IS_DAX(inode)) {
ret = dax_iomap_fault(vmf, pe_size, &xfs_iomap_ops);
ret = dax_iomap_fault(vmf, pe_size, NULL, &xfs_iomap_ops);
} else {
if (write_fault)
ret = iomap_page_mkwrite(vmf, &xfs_iomap_ops);
@ -1111,7 +1111,7 @@ xfs_filemap_pfn_mkwrite(
if (vmf->pgoff >= size)
ret = VM_FAULT_SIGBUS;
else if (IS_DAX(inode))
ret = dax_iomap_fault(vmf, PE_SIZE_PTE, &xfs_iomap_ops);
ret = dax_iomap_fault(vmf, PE_SIZE_PTE, NULL, &xfs_iomap_ops);
xfs_iunlock(ip, XFS_MMAPLOCK_SHARED);
sb_end_pagefault(inode->i_sb);
return ret;

View File

@ -95,7 +95,7 @@ bool dax_write_cache_enabled(struct dax_device *dax_dev);
ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
const struct iomap_ops *ops);
int dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
const struct iomap_ops *ops);
pfn_t *pfnp, const struct iomap_ops *ops);
int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index);
int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
pgoff_t index);