1
0
Fork 0

gfs2: use iomap for buffered I/O in ordered and writeback mode

Switch to using the iomap readpage and writepage helpers for all I/O in
the ordered and writeback modes, and thus eliminate using buffer_heads
for I/O in these cases.  The journaled data mode is left untouched.

(Andreas Gruenbacher: In gfs2_unstuffer_page, switch from mark_buffer_dirty
to set_page_dirty instead of accidentally leaving the page / buffer clean.)

Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
pull/193/head
Christoph Hellwig 2019-07-01 23:54:39 +02:00 committed by Andreas Gruenbacher
parent ee1e2c773e
commit 2164f9b918
3 changed files with 57 additions and 34 deletions

View File

@ -91,22 +91,13 @@ static int gfs2_writepage(struct page *page, struct writeback_control *wbc)
struct inode *inode = page->mapping->host;
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_sbd *sdp = GFS2_SB(inode);
loff_t i_size = i_size_read(inode);
pgoff_t end_index = i_size >> PAGE_SHIFT;
unsigned offset;
struct iomap_writepage_ctx wpc = { };
if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl)))
goto out;
if (current->journal_info)
goto redirty;
/* Is the page fully outside i_size? (truncate in progress) */
offset = i_size & (PAGE_SIZE-1);
if (page->index > end_index || (page->index == end_index && !offset)) {
page->mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE);
goto out;
}
return nobh_writepage(page, gfs2_get_block_noalloc, wbc);
return iomap_writepage(page, wbc, &wpc, &gfs2_writeback_ops);
redirty:
redirty_page_for_writepage(wbc, page);
@ -208,7 +199,8 @@ static int gfs2_writepages(struct address_space *mapping,
struct writeback_control *wbc)
{
struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
int ret = mpage_writepages(mapping, wbc, gfs2_get_block_noalloc);
struct iomap_writepage_ctx wpc = { };
int ret;
/*
* Even if we didn't write any pages here, we might still be holding
@ -216,9 +208,9 @@ static int gfs2_writepages(struct address_space *mapping,
* want balance_dirty_pages() to loop indefinitely trying to write out
* pages held in the ail that it can't find.
*/
ret = iomap_writepages(mapping, wbc, &wpc, &gfs2_writeback_ops);
if (ret == 0)
set_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags);
return ret;
}
@ -470,12 +462,13 @@ static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
static int __gfs2_readpage(void *file, struct page *page)
{
struct gfs2_inode *ip = GFS2_I(page->mapping->host);
struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
struct inode *inode = page->mapping->host;
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_sbd *sdp = GFS2_SB(inode);
int error;
if (i_blocksize(page->mapping->host) == PAGE_SIZE &&
!page_has_buffers(page)) {
if (!gfs2_is_jdata(ip) ||
(i_blocksize(inode) == PAGE_SIZE && !page_has_buffers(page))) {
error = iomap_readpage(page, &gfs2_iomap_ops);
} else if (gfs2_is_stuffed(ip)) {
error = stuffed_readpage(ip, page);
@ -563,8 +556,12 @@ static void gfs2_readahead(struct readahead_control *rac)
struct inode *inode = rac->mapping->host;
struct gfs2_inode *ip = GFS2_I(inode);
if (!gfs2_is_stuffed(ip))
if (gfs2_is_stuffed(ip))
;
else if (gfs2_is_jdata(ip))
mpage_readahead(rac, gfs2_block_map);
else
iomap_readahead(rac, &gfs2_iomap_ops);
}
/**
@ -784,12 +781,13 @@ static const struct address_space_operations gfs2_aops = {
.writepages = gfs2_writepages,
.readpage = gfs2_readpage,
.readahead = gfs2_readahead,
.set_page_dirty = iomap_set_page_dirty,
.releasepage = iomap_releasepage,
.invalidatepage = iomap_invalidatepage,
.bmap = gfs2_bmap,
.invalidatepage = gfs2_invalidatepage,
.releasepage = gfs2_releasepage,
.direct_IO = noop_direct_IO,
.migratepage = buffer_migrate_page,
.is_partially_uptodate = block_is_partially_uptodate,
.migratepage = iomap_migrate_page,
.is_partially_uptodate = iomap_is_partially_uptodate,
.error_remove_page = generic_error_remove_page,
};

View File

@ -56,7 +56,6 @@ static int gfs2_unstuffer_page(struct gfs2_inode *ip, struct buffer_head *dibh,
u64 block, struct page *page)
{
struct inode *inode = &ip->i_inode;
struct buffer_head *bh;
int release = 0;
if (!page || page->index) {
@ -80,20 +79,21 @@ static int gfs2_unstuffer_page(struct gfs2_inode *ip, struct buffer_head *dibh,
SetPageUptodate(page);
}
if (!page_has_buffers(page))
create_empty_buffers(page, BIT(inode->i_blkbits),
BIT(BH_Uptodate));
if (gfs2_is_jdata(ip)) {
struct buffer_head *bh;
bh = page_buffers(page);
if (!page_has_buffers(page))
create_empty_buffers(page, BIT(inode->i_blkbits),
BIT(BH_Uptodate));
if (!buffer_mapped(bh))
map_bh(bh, inode->i_sb, block);
bh = page_buffers(page);
if (!buffer_mapped(bh))
map_bh(bh, inode->i_sb, block);
set_buffer_uptodate(bh);
if (gfs2_is_jdata(ip))
set_buffer_uptodate(bh);
gfs2_trans_add_data(ip->i_gl, bh);
else {
mark_buffer_dirty(bh);
} else {
set_page_dirty(page);
gfs2_ordered_add_inode(ip);
}
@ -1158,7 +1158,8 @@ static int gfs2_iomap_begin(struct inode *inode, loff_t pos, loff_t length,
struct metapath mp = { .mp_aheight = 1, };
int ret;
iomap->flags |= IOMAP_F_BUFFER_HEAD;
if (gfs2_is_jdata(ip))
iomap->flags |= IOMAP_F_BUFFER_HEAD;
trace_gfs2_iomap_start(ip, pos, length, flags);
if (gfs2_iomap_need_write_lock(flags)) {
@ -2518,3 +2519,26 @@ out:
gfs2_trans_end(sdp);
return error;
}
static int gfs2_map_blocks(struct iomap_writepage_ctx *wpc, struct inode *inode,
loff_t offset)
{
struct metapath mp = { .mp_aheight = 1, };
int ret;
if (WARN_ON_ONCE(gfs2_is_stuffed(GFS2_I(inode))))
return -EIO;
if (offset >= wpc->iomap.offset &&
offset < wpc->iomap.offset + wpc->iomap.length)
return 0;
memset(&wpc->iomap, 0, sizeof(wpc->iomap));
ret = gfs2_iomap_get(inode, offset, INT_MAX, 0, &wpc->iomap, &mp);
release_metapath(&mp);
return ret;
}
const struct iomap_writeback_ops gfs2_writeback_ops = {
.map_blocks = gfs2_map_blocks,
};

View File

@ -44,6 +44,7 @@ static inline void gfs2_write_calc_reserv(const struct gfs2_inode *ip,
}
extern const struct iomap_ops gfs2_iomap_ops;
extern const struct iomap_writeback_ops gfs2_writeback_ops;
extern int gfs2_unstuff_dinode(struct gfs2_inode *ip, struct page *page);
extern int gfs2_block_map(struct inode *inode, sector_t lblock,