1
0
Fork 0

GFS2: directly write blocks past i_size

GFS2 was relying on the writepage code to write out the zeroed data for
fallocate.  However, with FALLOC_FL_KEEP_SIZE set, this may be past i_size.
If it is, it will be ignored.  To work around this, gfs2 now calls
write_dirty_buffer directly on the buffer_heads when FALLOC_FL_KEEP_SIZE
is set, and it's writing past i_size.

This version is just a cleanup of my last version

Signed-off-by: Benjamin Marzinski <bmarzins@redhat.com>
Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
hifive-unleashed-5.1
Benjamin Marzinski 2011-03-17 21:54:46 -05:00 committed by Steven Whitehouse
parent deab72d379
commit 0ee532062f
1 changed files with 48 additions and 10 deletions

View File

@ -617,18 +617,51 @@ static ssize_t gfs2_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
return generic_file_aio_write(iocb, iov, nr_segs, pos); return generic_file_aio_write(iocb, iov, nr_segs, pos);
} }
static void empty_write_end(struct page *page, unsigned from, static int empty_write_end(struct page *page, unsigned from,
unsigned to) unsigned to, int mode)
{ {
struct gfs2_inode *ip = GFS2_I(page->mapping->host); struct inode *inode = page->mapping->host;
struct gfs2_inode *ip = GFS2_I(inode);
struct buffer_head *bh;
unsigned offset, blksize = 1 << inode->i_blkbits;
pgoff_t end_index = i_size_read(inode) >> PAGE_CACHE_SHIFT;
zero_user(page, from, to-from); zero_user(page, from, to-from);
mark_page_accessed(page); mark_page_accessed(page);
if (!gfs2_is_writeback(ip)) if (page->index < end_index || !(mode & FALLOC_FL_KEEP_SIZE)) {
gfs2_page_add_databufs(ip, page, from, to); if (!gfs2_is_writeback(ip))
gfs2_page_add_databufs(ip, page, from, to);
block_commit_write(page, from, to); block_commit_write(page, from, to);
return 0;
}
offset = 0;
bh = page_buffers(page);
while (offset < to) {
if (offset >= from) {
set_buffer_uptodate(bh);
mark_buffer_dirty(bh);
clear_buffer_new(bh);
write_dirty_buffer(bh, WRITE);
}
offset += blksize;
bh = bh->b_this_page;
}
offset = 0;
bh = page_buffers(page);
while (offset < to) {
if (offset >= from) {
wait_on_buffer(bh);
if (!buffer_uptodate(bh))
return -EIO;
}
offset += blksize;
bh = bh->b_this_page;
}
return 0;
} }
static int needs_empty_write(sector_t block, struct inode *inode) static int needs_empty_write(sector_t block, struct inode *inode)
@ -643,7 +676,8 @@ static int needs_empty_write(sector_t block, struct inode *inode)
return !buffer_mapped(&bh_map); return !buffer_mapped(&bh_map);
} }
static int write_empty_blocks(struct page *page, unsigned from, unsigned to) static int write_empty_blocks(struct page *page, unsigned from, unsigned to,
int mode)
{ {
struct inode *inode = page->mapping->host; struct inode *inode = page->mapping->host;
unsigned start, end, next, blksize; unsigned start, end, next, blksize;
@ -668,7 +702,9 @@ static int write_empty_blocks(struct page *page, unsigned from, unsigned to)
gfs2_block_map); gfs2_block_map);
if (unlikely(ret)) if (unlikely(ret))
return ret; return ret;
empty_write_end(page, start, end); ret = empty_write_end(page, start, end, mode);
if (unlikely(ret))
return ret;
end = 0; end = 0;
} }
start = next; start = next;
@ -682,7 +718,9 @@ static int write_empty_blocks(struct page *page, unsigned from, unsigned to)
ret = __block_write_begin(page, start, end - start, gfs2_block_map); ret = __block_write_begin(page, start, end - start, gfs2_block_map);
if (unlikely(ret)) if (unlikely(ret))
return ret; return ret;
empty_write_end(page, start, end); ret = empty_write_end(page, start, end, mode);
if (unlikely(ret))
return ret;
} }
return 0; return 0;
@ -731,7 +769,7 @@ static int fallocate_chunk(struct inode *inode, loff_t offset, loff_t len,
if (curr == end) if (curr == end)
to = end_offset; to = end_offset;
error = write_empty_blocks(page, from, to); error = write_empty_blocks(page, from, to, mode);
if (!error && offset + to > inode->i_size && if (!error && offset + to > inode->i_size &&
!(mode & FALLOC_FL_KEEP_SIZE)) { !(mode & FALLOC_FL_KEEP_SIZE)) {
i_size_write(inode, offset + to); i_size_write(inode, offset + to);