1
0
Fork 0

nilfs2: add missing blkdev_issue_flush() to nilfs_sync_fs()

Under normal circumstances nilfs_sync_fs() writes out the super block,
which causes a flush of the underlying block device.  But this depends
on the THE_NILFS_SB_DIRTY flag, which is only set if the pointer to the
last segment crosses a segment boundary.  So if only a small amount of
data is written before the call to nilfs_sync_fs(), no flush of the
block device occurs.

In the above case an additional call to blkdev_issue_flush() is needed.
To prevent unnecessary overhead, the new flag nilfs->ns_flushed_device
is introduced, which is cleared whenever new logs are written and set
whenever the block device is flushed.  For convenience the function
nilfs_flush_device() is added, which contains the above logic.

Signed-off-by: Andreas Rohner <andreas.rohner@gmx.net>
Signed-off-by: Ryusuke Konishi <konishi.ryusuke@lab.ntt.co.jp>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
wifi-calibration
Andreas Rohner 2014-10-13 15:53:20 -07:00 committed by Linus Torvalds
parent 0f2a84f41a
commit e2c7617ae3
5 changed files with 37 additions and 10 deletions

View File

@ -56,11 +56,9 @@ int nilfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
mutex_unlock(&inode->i_mutex);
nilfs = inode->i_sb->s_fs_info;
if (!err && nilfs_test_opt(nilfs, BARRIER)) {
err = blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
if (err != -EIO)
err = 0;
}
if (!err)
err = nilfs_flush_device(nilfs);
return err;
}

View File

@ -1022,11 +1022,9 @@ static int nilfs_ioctl_sync(struct inode *inode, struct file *filp,
return ret;
nilfs = inode->i_sb->s_fs_info;
if (nilfs_test_opt(nilfs, BARRIER)) {
ret = blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
if (ret == -EIO)
return ret;
}
ret = nilfs_flush_device(nilfs);
if (ret < 0)
return ret;
if (argp != NULL) {
down_read(&nilfs->ns_segctor_sem);

View File

@ -1833,6 +1833,7 @@ static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci)
nilfs_set_next_segment(nilfs, segbuf);
if (update_sr) {
nilfs->ns_flushed_device = 0;
nilfs_set_last_segment(nilfs, segbuf->sb_pseg_start,
segbuf->sb_sum.seg_seq, nilfs->ns_cno++);
@ -2216,6 +2217,8 @@ int nilfs_construct_dsync_segment(struct super_block *sb, struct inode *inode,
sci->sc_dsync_end = end;
err = nilfs_segctor_do_construct(sci, SC_LSEG_DSYNC);
if (!err)
nilfs->ns_flushed_device = 0;
nilfs_transaction_unlock(sb);
return err;

View File

@ -310,6 +310,9 @@ int nilfs_commit_super(struct super_block *sb, int flag)
nilfs->ns_sbsize));
}
clear_nilfs_sb_dirty(nilfs);
nilfs->ns_flushed_device = 1;
/* make sure store to ns_flushed_device cannot be reordered */
smp_wmb();
return nilfs_sync_super(sb, flag);
}
@ -514,6 +517,9 @@ static int nilfs_sync_fs(struct super_block *sb, int wait)
}
up_write(&nilfs->ns_sem);
if (!err)
err = nilfs_flush_device(nilfs);
return err;
}

View File

@ -46,6 +46,7 @@ enum {
/**
* struct the_nilfs - struct to supervise multiple nilfs mount points
* @ns_flags: flags
* @ns_flushed_device: flag indicating if all volatile data was flushed
* @ns_bdev: block device
* @ns_sem: semaphore for shared states
* @ns_snapshot_mount_mutex: mutex to protect snapshot mounts
@ -103,6 +104,7 @@ enum {
*/
struct the_nilfs {
unsigned long ns_flags;
int ns_flushed_device;
struct block_device *ns_bdev;
struct rw_semaphore ns_sem;
@ -371,4 +373,24 @@ static inline int nilfs_segment_is_active(struct the_nilfs *nilfs, __u64 n)
return n == nilfs->ns_segnum || n == nilfs->ns_nextnum;
}
static inline int nilfs_flush_device(struct the_nilfs *nilfs)
{
int err;
if (!nilfs_test_opt(nilfs, BARRIER) || nilfs->ns_flushed_device)
return 0;
nilfs->ns_flushed_device = 1;
/*
* the store to ns_flushed_device must not be reordered after
* blkdev_issue_flush().
*/
smp_wmb();
err = blkdev_issue_flush(nilfs->ns_bdev, GFP_KERNEL, NULL);
if (err != -EIO)
err = 0;
return err;
}
#endif /* _THE_NILFS_H */