1
0
Fork 0

mm: change invalidatepage prototype to accept length

Currently there is no way to truncate partial page where the end
truncate point is not at the end of the page. This is because it was not
needed and the functionality was enough for file system truncate
operation to work properly. However more file systems now support punch
hole feature and it can benefit from mm supporting truncating page just
up to the certain point.

Specifically, with this functionality truncate_inode_pages_range() can
be changed so it supports truncating partial page at the end of the
range (currently it will BUG_ON() if 'end' is not at the end of the
page).

This commit changes the invalidatepage() address space operation
prototype to accept range to be invalidated and update all the instances
for it.

We also change the block_invalidatepage() in the same way and actually
make a use of the new length argument implementing range invalidation.

Actual file system implementations will follow except the file systems
where the changes are really simple and should not change the behaviour
in any way .Implementation for truncate_page_range() which will be able
to accept page unaligned ranges will follow as well.

Signed-off-by: Lukas Czerner <lczerner@redhat.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Hugh Dickins <hughd@google.com>
hifive-unleashed-5.1
Lukas Czerner 2013-05-21 23:17:23 -04:00 committed by Theodore Ts'o
parent c7788792a5
commit d47992f86b
30 changed files with 116 additions and 69 deletions

View File

@ -189,7 +189,7 @@ prototypes:
loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata);
sector_t (*bmap)(struct address_space *, sector_t);
int (*invalidatepage) (struct page *, unsigned long);
void (*invalidatepage) (struct page *, unsigned int, unsigned int);
int (*releasepage) (struct page *, int);
void (*freepage)(struct page *);
int (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
@ -310,8 +310,8 @@ filesystems and by the swapper. The latter will eventually go away. Please,
keep it that way and don't breed new callers.
->invalidatepage() is called when the filesystem must attempt to drop
some or all of the buffers from the page when it is being truncated. It
returns zero on success. If ->invalidatepage is zero, the kernel uses
some or all of the buffers from the page when it is being truncated. It
returns zero on success. If ->invalidatepage is zero, the kernel uses
block_invalidatepage() instead.
->releasepage() is called when the kernel is about to try to drop the

View File

@ -549,7 +549,7 @@ struct address_space_operations
-------------------------------
This describes how the VFS can manipulate mapping of a file to page cache in
your filesystem. As of kernel 2.6.22, the following members are defined:
your filesystem. The following members are defined:
struct address_space_operations {
int (*writepage)(struct page *page, struct writeback_control *wbc);
@ -566,7 +566,7 @@ struct address_space_operations {
loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata);
sector_t (*bmap)(struct address_space *, sector_t);
int (*invalidatepage) (struct page *, unsigned long);
void (*invalidatepage) (struct page *, unsigned int, unsigned int);
int (*releasepage) (struct page *, int);
void (*freepage)(struct page *);
ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
@ -685,14 +685,14 @@ struct address_space_operations {
invalidatepage: If a page has PagePrivate set, then invalidatepage
will be called when part or all of the page is to be removed
from the address space. This generally corresponds to either a
truncation or a complete invalidation of the address space
(in the latter case 'offset' will always be 0).
Any private data associated with the page should be updated
to reflect this truncation. If offset is 0, then
the private data should be released, because the page
must be able to be completely discarded. This may be done by
calling the ->releasepage function, but in this case the
release MUST succeed.
truncation, punch hole or a complete invalidation of the address
space (in the latter case 'offset' will always be 0 and 'length'
will be PAGE_CACHE_SIZE). Any private data associated with the page
should be updated to reflect this truncation. If offset is 0 and
length is PAGE_CACHE_SIZE, then the private data should be released,
because the page must be able to be completely discarded. This may
be done by calling the ->releasepage function, but in this case the
release MUST succeed.
releasepage: releasepage is called on PagePrivate pages to indicate
that the page should be freed if possible. ->releasepage

View File

@ -148,13 +148,14 @@ static int v9fs_release_page(struct page *page, gfp_t gfp)
* @offset: offset in the page
*/
static void v9fs_invalidate_page(struct page *page, unsigned long offset)
static void v9fs_invalidate_page(struct page *page, unsigned int offset,
unsigned int length)
{
/*
* If called with zero offset, we should release
* the private state assocated with the page
*/
if (offset == 0)
if (offset == 0 && length == PAGE_CACHE_SIZE)
v9fs_fscache_invalidate_page(page);
}

View File

@ -19,7 +19,8 @@
#include "internal.h"
static int afs_readpage(struct file *file, struct page *page);
static void afs_invalidatepage(struct page *page, unsigned long offset);
static void afs_invalidatepage(struct page *page, unsigned int offset,
unsigned int length);
static int afs_releasepage(struct page *page, gfp_t gfp_flags);
static int afs_launder_page(struct page *page);
@ -310,16 +311,17 @@ static int afs_launder_page(struct page *page)
* - release a page and clean up its private data if offset is 0 (indicating
* the entire page)
*/
static void afs_invalidatepage(struct page *page, unsigned long offset)
static void afs_invalidatepage(struct page *page, unsigned int offset,
unsigned int length)
{
struct afs_writeback *wb = (struct afs_writeback *) page_private(page);
_enter("{%lu},%lu", page->index, offset);
_enter("{%lu},%u,%u", page->index, offset, length);
BUG_ON(!PageLocked(page));
/* we clean up only if the entire page is being invalidated */
if (offset == 0) {
if (offset == 0 && length == PAGE_CACHE_SIZE) {
#ifdef CONFIG_AFS_FSCACHE
if (PageFsCache(page)) {
struct afs_vnode *vnode = AFS_FS_I(page->mapping->host);

View File

@ -1013,7 +1013,8 @@ static int btree_releasepage(struct page *page, gfp_t gfp_flags)
return try_release_extent_buffer(page);
}
static void btree_invalidatepage(struct page *page, unsigned long offset)
static void btree_invalidatepage(struct page *page, unsigned int offset,
unsigned int length)
{
struct extent_io_tree *tree;
tree = &BTRFS_I(page->mapping->host)->io_tree;

View File

@ -2957,7 +2957,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
pg_offset = i_size & (PAGE_CACHE_SIZE - 1);
if (page->index > end_index ||
(page->index == end_index && !pg_offset)) {
page->mapping->a_ops->invalidatepage(page, 0);
page->mapping->a_ops->invalidatepage(page, 0, PAGE_CACHE_SIZE);
unlock_page(page);
return 0;
}

View File

@ -7510,7 +7510,8 @@ static int btrfs_releasepage(struct page *page, gfp_t gfp_flags)
return __btrfs_releasepage(page, gfp_flags & GFP_NOFS);
}
static void btrfs_invalidatepage(struct page *page, unsigned long offset)
static void btrfs_invalidatepage(struct page *page, unsigned int offset,
unsigned int length)
{
struct inode *inode = page->mapping->host;
struct extent_io_tree *tree;

View File

@ -1454,7 +1454,8 @@ static void discard_buffer(struct buffer_head * bh)
* block_invalidatepage - invalidate part or all of a buffer-backed page
*
* @page: the page which is affected
* @offset: the index of the truncation point
* @offset: start of the range to invalidate
* @length: length of the range to invalidate
*
* block_invalidatepage() is called when all or part of the page has become
* invalidated by a truncate operation.
@ -1465,21 +1466,34 @@ static void discard_buffer(struct buffer_head * bh)
* point. Because the caller is about to free (and possibly reuse) those
* blocks on-disk.
*/
void block_invalidatepage(struct page *page, unsigned long offset)
void block_invalidatepage(struct page *page, unsigned int offset,
unsigned int length)
{
struct buffer_head *head, *bh, *next;
unsigned int curr_off = 0;
unsigned int stop = length + offset;
BUG_ON(!PageLocked(page));
if (!page_has_buffers(page))
goto out;
/*
* Check for overflow
*/
BUG_ON(stop > PAGE_CACHE_SIZE || stop < length);
head = page_buffers(page);
bh = head;
do {
unsigned int next_off = curr_off + bh->b_size;
next = bh->b_this_page;
/*
* Are we still fully in range ?
*/
if (next_off > stop)
goto out;
/*
* is this block fully invalidated?
*/
@ -1501,6 +1515,7 @@ out:
}
EXPORT_SYMBOL(block_invalidatepage);
/*
* We attach and possibly dirty the buffers atomically wrt
* __set_page_dirty_buffers() via private_lock. try_to_free_buffers
@ -2841,7 +2856,7 @@ int block_write_full_page_endio(struct page *page, get_block_t *get_block,
* they may have been added in ext3_writepage(). Make them
* freeable here, so the page does not leak.
*/
do_invalidatepage(page, 0);
do_invalidatepage(page, 0, PAGE_CACHE_SIZE);
unlock_page(page);
return 0; /* don't care */
}

View File

@ -143,7 +143,8 @@ static int ceph_set_page_dirty(struct page *page)
* dirty page counters appropriately. Only called if there is private
* data on the page.
*/
static void ceph_invalidatepage(struct page *page, unsigned long offset)
static void ceph_invalidatepage(struct page *page, unsigned int offset,
unsigned int length)
{
struct inode *inode;
struct ceph_inode_info *ci;
@ -168,7 +169,7 @@ static void ceph_invalidatepage(struct page *page, unsigned long offset)
ci = ceph_inode(inode);
if (offset == 0) {
dout("%p invalidatepage %p idx %lu full dirty page %lu\n",
dout("%p invalidatepage %p idx %lu full dirty page %u\n",
inode, page, page->index, offset);
ceph_put_wrbuffer_cap_refs(ci, 1, snapc);
ceph_put_snap_context(snapc);

View File

@ -3546,11 +3546,12 @@ static int cifs_release_page(struct page *page, gfp_t gfp)
return cifs_fscache_release_page(page, gfp);
}
static void cifs_invalidate_page(struct page *page, unsigned long offset)
static void cifs_invalidate_page(struct page *page, unsigned int offset,
unsigned int length)
{
struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
if (offset == 0)
if (offset == 0 && length == PAGE_CACHE_SIZE)
cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
}

View File

@ -953,9 +953,11 @@ static int exofs_releasepage(struct page *page, gfp_t gfp)
return 0;
}
static void exofs_invalidatepage(struct page *page, unsigned long offset)
static void exofs_invalidatepage(struct page *page, unsigned int offset,
unsigned int length)
{
EXOFS_DBGMSG("page 0x%lx offset 0x%lx\n", page->index, offset);
EXOFS_DBGMSG("page 0x%lx offset 0x%x length 0x%x\n",
page->index, offset, length);
WARN_ON(1);
}

View File

@ -1825,7 +1825,8 @@ ext3_readpages(struct file *file, struct address_space *mapping,
return mpage_readpages(mapping, pages, nr_pages, ext3_get_block);
}
static void ext3_invalidatepage(struct page *page, unsigned long offset)
static void ext3_invalidatepage(struct page *page, unsigned int offset,
unsigned int length)
{
journal_t *journal = EXT3_JOURNAL(page->mapping->host);

View File

@ -132,7 +132,8 @@ static inline int ext4_begin_ordered_truncate(struct inode *inode,
new_size);
}
static void ext4_invalidatepage(struct page *page, unsigned long offset);
static void ext4_invalidatepage(struct page *page, unsigned int offset,
unsigned int length);
static int __ext4_journalled_writepage(struct page *page, unsigned int len);
static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh);
static int ext4_discard_partial_page_buffers_no_lock(handle_t *handle,
@ -1606,7 +1607,7 @@ static void ext4_da_block_invalidatepages(struct mpage_da_data *mpd)
break;
BUG_ON(!PageLocked(page));
BUG_ON(PageWriteback(page));
block_invalidatepage(page, 0);
block_invalidatepage(page, 0, PAGE_CACHE_SIZE);
ClearPageUptodate(page);
unlock_page(page);
}
@ -2829,7 +2830,8 @@ static int ext4_da_write_end(struct file *file,
return ret ? ret : copied;
}
static void ext4_da_invalidatepage(struct page *page, unsigned long offset)
static void ext4_da_invalidatepage(struct page *page, unsigned int offset,
unsigned int length)
{
/*
* Drop reserved blocks
@ -2841,7 +2843,7 @@ static void ext4_da_invalidatepage(struct page *page, unsigned long offset)
ext4_da_page_release_reservation(page, offset);
out:
ext4_invalidatepage(page, offset);
ext4_invalidatepage(page, offset, length);
return;
}
@ -2989,14 +2991,15 @@ ext4_readpages(struct file *file, struct address_space *mapping,
return mpage_readpages(mapping, pages, nr_pages, ext4_get_block);
}
static void ext4_invalidatepage(struct page *page, unsigned long offset)
static void ext4_invalidatepage(struct page *page, unsigned int offset,
unsigned int length)
{
trace_ext4_invalidatepage(page, offset);
/* No journalling happens on data buffers when this function is used */
WARN_ON(page_has_buffers(page) && buffer_jbd(page_buffers(page)));
block_invalidatepage(page, offset);
block_invalidatepage(page, offset, PAGE_CACHE_SIZE - offset);
}
static int __ext4_journalled_invalidatepage(struct page *page,
@ -3017,7 +3020,8 @@ static int __ext4_journalled_invalidatepage(struct page *page,
/* Wrapper for aops... */
static void ext4_journalled_invalidatepage(struct page *page,
unsigned long offset)
unsigned int offset,
unsigned int length)
{
WARN_ON(__ext4_journalled_invalidatepage(page, offset) < 0);
}

View File

@ -698,7 +698,8 @@ static ssize_t f2fs_direct_IO(int rw, struct kiocb *iocb,
get_data_block_ro);
}
static void f2fs_invalidate_data_page(struct page *page, unsigned long offset)
static void f2fs_invalidate_data_page(struct page *page, unsigned int offset,
unsigned int length)
{
struct inode *inode = page->mapping->host;
struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);

View File

@ -1205,7 +1205,8 @@ static int f2fs_set_node_page_dirty(struct page *page)
return 0;
}
static void f2fs_invalidate_node_page(struct page *page, unsigned long offset)
static void f2fs_invalidate_node_page(struct page *page, unsigned int offset,
unsigned int length)
{
struct inode *inode = page->mapping->host;
struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);

View File

@ -110,7 +110,7 @@ static int gfs2_writepage_common(struct page *page,
/* Is the page fully outside i_size? (truncate in progress) */
offset = i_size & (PAGE_CACHE_SIZE-1);
if (page->index > end_index || (page->index == end_index && !offset)) {
page->mapping->a_ops->invalidatepage(page, 0);
page->mapping->a_ops->invalidatepage(page, 0, PAGE_CACHE_SIZE);
goto out;
}
return 1;
@ -299,7 +299,8 @@ static int gfs2_write_jdata_pagevec(struct address_space *mapping,
/* Is the page fully outside i_size? (truncate in progress) */
if (page->index > end_index || (page->index == end_index && !offset)) {
page->mapping->a_ops->invalidatepage(page, 0);
page->mapping->a_ops->invalidatepage(page, 0,
PAGE_CACHE_SIZE);
unlock_page(page);
continue;
}
@ -943,7 +944,8 @@ static void gfs2_discard(struct gfs2_sbd *sdp, struct buffer_head *bh)
unlock_buffer(bh);
}
static void gfs2_invalidatepage(struct page *page, unsigned long offset)
static void gfs2_invalidatepage(struct page *page, unsigned int offset,
unsigned int length)
{
struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
struct buffer_head *bh, *head;

View File

@ -571,9 +571,10 @@ static int metapage_releasepage(struct page *page, gfp_t gfp_mask)
return ret;
}
static void metapage_invalidatepage(struct page *page, unsigned long offset)
static void metapage_invalidatepage(struct page *page, unsigned int offset,
unsigned int length)
{
BUG_ON(offset);
BUG_ON(offset || length < PAGE_CACHE_SIZE);
BUG_ON(PageWriteback(page));

View File

@ -159,7 +159,8 @@ static int logfs_writepage(struct page *page, struct writeback_control *wbc)
return __logfs_writepage(page);
}
static void logfs_invalidatepage(struct page *page, unsigned long offset)
static void logfs_invalidatepage(struct page *page, unsigned int offset,
unsigned int length)
{
struct logfs_block *block = logfs_block(page);

View File

@ -884,7 +884,8 @@ static struct logfs_area *alloc_area(struct super_block *sb)
return area;
}
static void map_invalidatepage(struct page *page, unsigned long l)
static void map_invalidatepage(struct page *page, unsigned int o,
unsigned int l)
{
return;
}

View File

@ -451,11 +451,13 @@ static int nfs_write_end(struct file *file, struct address_space *mapping,
* - Called if either PG_private or PG_fscache is set on the page
* - Caller holds page lock
*/
static void nfs_invalidate_page(struct page *page, unsigned long offset)
static void nfs_invalidate_page(struct page *page, unsigned int offset,
unsigned int length)
{
dfprintk(PAGECACHE, "NFS: invalidate_page(%p, %lu)\n", page, offset);
dfprintk(PAGECACHE, "NFS: invalidate_page(%p, %u, %u)\n",
page, offset, length);
if (offset != 0)
if (offset != 0 || length < PAGE_CACHE_SIZE)
return;
/* Cancel any unstarted writes on this page */
nfs_wb_page_cancel(page_file_mapping(page)->host, page);

View File

@ -1372,7 +1372,7 @@ retry_writepage:
* The page may have dirty, unmapped buffers. Make them
* freeable here, so the page does not leak.
*/
block_invalidatepage(page, 0);
block_invalidatepage(page, 0, PAGE_CACHE_SIZE);
unlock_page(page);
ntfs_debug("Write outside i_size - truncated?");
return 0;

View File

@ -603,7 +603,8 @@ static void ocfs2_dio_end_io(struct kiocb *iocb,
* from ext3. PageChecked() bits have been removed as OCFS2 does not
* do journalled data.
*/
static void ocfs2_invalidatepage(struct page *page, unsigned long offset)
static void ocfs2_invalidatepage(struct page *page, unsigned int offset,
unsigned int length)
{
journal_t *journal = OCFS2_SB(page->mapping->host->i_sb)->journal->j_journal;

View File

@ -2970,7 +2970,8 @@ static int invalidatepage_can_drop(struct inode *inode, struct buffer_head *bh)
}
/* clm -- taken from fs/buffer.c:block_invalidate_page */
static void reiserfs_invalidatepage(struct page *page, unsigned long offset)
static void reiserfs_invalidatepage(struct page *page, unsigned int offset,
unsigned int length)
{
struct buffer_head *head, *bh, *next;
struct inode *inode = page->mapping->host;

View File

@ -1277,13 +1277,14 @@ int ubifs_setattr(struct dentry *dentry, struct iattr *attr)
return err;
}
static void ubifs_invalidatepage(struct page *page, unsigned long offset)
static void ubifs_invalidatepage(struct page *page, unsigned int offset,
unsigned int length)
{
struct inode *inode = page->mapping->host;
struct ubifs_info *c = inode->i_sb->s_fs_info;
ubifs_assert(PagePrivate(page));
if (offset)
if (offset || length < PAGE_CACHE_SIZE)
/* Partial page remains dirty */
return;

View File

@ -824,10 +824,11 @@ xfs_cluster_write(
STATIC void
xfs_vm_invalidatepage(
struct page *page,
unsigned long offset)
unsigned int offset,
unsigned int length)
{
trace_xfs_invalidatepage(page->mapping->host, page, offset);
block_invalidatepage(page, offset);
block_invalidatepage(page, offset, PAGE_CACHE_SIZE - offset);
}
/*
@ -891,7 +892,7 @@ next_buffer:
xfs_iunlock(ip, XFS_ILOCK_EXCL);
out_invalidate:
xfs_vm_invalidatepage(page, 0);
xfs_vm_invalidatepage(page, 0, PAGE_CACHE_SIZE);
return;
}

View File

@ -198,7 +198,8 @@ extern int buffer_heads_over_limit;
* Generic address_space_operations implementations for buffer_head-backed
* address_spaces.
*/
void block_invalidatepage(struct page *page, unsigned long offset);
void block_invalidatepage(struct page *page, unsigned int offset,
unsigned int length);
int block_write_full_page(struct page *page, get_block_t *get_block,
struct writeback_control *wbc);
int block_write_full_page_endio(struct page *page, get_block_t *get_block,

View File

@ -364,7 +364,7 @@ struct address_space_operations {
/* Unfortunately this kludge is needed for FIBMAP. Don't use it */
sector_t (*bmap)(struct address_space *, sector_t);
void (*invalidatepage) (struct page *, unsigned long);
void (*invalidatepage) (struct page *, unsigned int, unsigned int);
int (*releasepage) (struct page *, gfp_t);
void (*freepage)(struct page *);
ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov,

View File

@ -1041,7 +1041,8 @@ int get_kernel_page(unsigned long start, int write, struct page **pages);
struct page *get_dump_page(unsigned long addr);
extern int try_to_release_page(struct page * page, gfp_t gfp_mask);
extern void do_invalidatepage(struct page *page, unsigned long offset);
extern void do_invalidatepage(struct page *page, unsigned int offset,
unsigned int length);
int __set_page_dirty_nobuffers(struct page *page);
int __set_page_dirty_no_writeback(struct page *page);

View File

@ -48,7 +48,7 @@ static void read_cache_pages_invalidate_page(struct address_space *mapping,
if (!trylock_page(page))
BUG();
page->mapping = mapping;
do_invalidatepage(page, 0);
do_invalidatepage(page, 0, PAGE_CACHE_SIZE);
page->mapping = NULL;
unlock_page(page);
}

View File

@ -26,7 +26,8 @@
/**
* do_invalidatepage - invalidate part or all of a page
* @page: the page which is affected
* @offset: the index of the truncation point
* @offset: start of the range to invalidate
* @length: length of the range to invalidate
*
* do_invalidatepage() is called when all or part of the page has become
* invalidated by a truncate operation.
@ -37,16 +38,18 @@
* point. Because the caller is about to free (and possibly reuse) those
* blocks on-disk.
*/
void do_invalidatepage(struct page *page, unsigned long offset)
void do_invalidatepage(struct page *page, unsigned int offset,
unsigned int length)
{
void (*invalidatepage)(struct page *, unsigned long);
void (*invalidatepage)(struct page *, unsigned int, unsigned int);
invalidatepage = page->mapping->a_ops->invalidatepage;
#ifdef CONFIG_BLOCK
if (!invalidatepage)
invalidatepage = block_invalidatepage;
#endif
if (invalidatepage)
(*invalidatepage)(page, offset);
(*invalidatepage)(page, offset, length);
}
static inline void truncate_partial_page(struct page *page, unsigned partial)
@ -54,7 +57,7 @@ static inline void truncate_partial_page(struct page *page, unsigned partial)
zero_user_segment(page, partial, PAGE_CACHE_SIZE);
cleancache_invalidate_page(page->mapping, page);
if (page_has_private(page))
do_invalidatepage(page, partial);
do_invalidatepage(page, partial, PAGE_CACHE_SIZE - partial);
}
/*
@ -103,7 +106,7 @@ truncate_complete_page(struct address_space *mapping, struct page *page)
return -EIO;
if (page_has_private(page))
do_invalidatepage(page, 0);
do_invalidatepage(page, 0, PAGE_CACHE_SIZE);
cancel_dirty_page(page, PAGE_CACHE_SIZE);