1
0
Fork 0

Remove rw from {,__,do_}blockdev_direct_IO()

Most filesystems call through to these at some point, so we'll start
here.

Signed-off-by: Omar Sandoval <osandov@osandov.com>
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
wifi-calibration
Omar Sandoval 2015-03-16 04:33:50 -07:00 committed by Al Viro
parent bd8e0ff956
commit 17f8c842d2
20 changed files with 65 additions and 72 deletions

View File

@ -405,7 +405,7 @@ affs_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter,
return 0;
}
ret = blockdev_direct_IO(rw, iocb, inode, iter, offset, affs_get_block);
ret = blockdev_direct_IO(iocb, inode, iter, offset, affs_get_block);
if (ret < 0 && (rw & WRITE))
affs_write_failed(mapping, offset + count);
return ret;

View File

@ -152,9 +152,8 @@ blkdev_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter,
struct file *file = iocb->ki_filp;
struct inode *inode = file->f_mapping->host;
return __blockdev_direct_IO(rw, iocb, inode, I_BDEV(inode), iter,
offset, blkdev_get_block,
NULL, NULL, 0);
return __blockdev_direct_IO(iocb, inode, I_BDEV(inode), iter, offset,
blkdev_get_block, NULL, NULL, 0);
}
int __sync_blockdev(struct block_device *bdev, int wait)

View File

@ -8174,10 +8174,10 @@ static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
wakeup = false;
}
ret = __blockdev_direct_IO(rw, iocb, inode,
BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev,
iter, offset, btrfs_get_blocks_direct, NULL,
btrfs_submit_direct, flags);
ret = __blockdev_direct_IO(iocb, inode,
BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev,
iter, offset, btrfs_get_blocks_direct, NULL,
btrfs_submit_direct, flags);
if (rw & WRITE) {
current->journal_info = NULL;
if (ret < 0 && ret != -EIOCBQUEUED)

View File

@ -1093,10 +1093,10 @@ static inline int drop_refcount(struct dio *dio)
* for the whole file.
*/
static inline ssize_t
do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
struct block_device *bdev, struct iov_iter *iter, loff_t offset,
get_block_t get_block, dio_iodone_t end_io,
dio_submit_t submit_io, int flags)
do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
struct block_device *bdev, struct iov_iter *iter,
loff_t offset, get_block_t get_block, dio_iodone_t end_io,
dio_submit_t submit_io, int flags)
{
unsigned i_blkbits = ACCESS_ONCE(inode->i_blkbits);
unsigned blkbits = i_blkbits;
@ -1110,9 +1110,6 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
struct blk_plug plug;
unsigned long align = offset | iov_iter_alignment(iter);
if (rw & WRITE)
rw = WRITE_ODIRECT;
/*
* Avoid references to bdev if not absolutely needed to give
* the early prefetch in the caller enough time.
@ -1127,7 +1124,7 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
}
/* watch out for a 0 len io from a tricksy fs */
if (rw == READ && !iov_iter_count(iter))
if (iov_iter_rw(iter) == READ && !iov_iter_count(iter))
return 0;
dio = kmem_cache_alloc(dio_cache, GFP_KERNEL);
@ -1143,7 +1140,7 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
dio->flags = flags;
if (dio->flags & DIO_LOCKING) {
if (rw == READ) {
if (iov_iter_rw(iter) == READ) {
struct address_space *mapping =
iocb->ki_filp->f_mapping;
@ -1169,19 +1166,19 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
if (is_sync_kiocb(iocb))
dio->is_async = false;
else if (!(dio->flags & DIO_ASYNC_EXTEND) &&
(rw & WRITE) && end > i_size_read(inode))
iov_iter_rw(iter) == WRITE && end > i_size_read(inode))
dio->is_async = false;
else
dio->is_async = true;
dio->inode = inode;
dio->rw = rw;
dio->rw = iov_iter_rw(iter) == WRITE ? WRITE_ODIRECT : READ;
/*
* For AIO O_(D)SYNC writes we need to defer completions to a workqueue
* so that we can call ->fsync.
*/
if (dio->is_async && (rw & WRITE) &&
if (dio->is_async && iov_iter_rw(iter) == WRITE &&
((iocb->ki_filp->f_flags & O_DSYNC) ||
IS_SYNC(iocb->ki_filp->f_mapping->host))) {
retval = dio_set_defer_completion(dio);
@ -1274,7 +1271,7 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
* we can let i_mutex go now that its achieved its purpose
* of protecting us from looking up uninitialized blocks.
*/
if (rw == READ && (dio->flags & DIO_LOCKING))
if (iov_iter_rw(iter) == READ && (dio->flags & DIO_LOCKING))
mutex_unlock(&dio->inode->i_mutex);
/*
@ -1286,7 +1283,7 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
*/
BUG_ON(retval == -EIOCBQUEUED);
if (dio->is_async && retval == 0 && dio->result &&
(rw == READ || dio->result == count))
(iov_iter_rw(iter) == READ || dio->result == count))
retval = -EIOCBQUEUED;
else
dio_await_completion(dio);
@ -1300,11 +1297,11 @@ out:
return retval;
}
ssize_t
__blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
struct block_device *bdev, struct iov_iter *iter, loff_t offset,
get_block_t get_block, dio_iodone_t end_io,
dio_submit_t submit_io, int flags)
ssize_t __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
struct block_device *bdev, struct iov_iter *iter,
loff_t offset, get_block_t get_block,
dio_iodone_t end_io, dio_submit_t submit_io,
int flags)
{
/*
* The block device state is needed in the end to finally
@ -1318,8 +1315,8 @@ __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
prefetch(bdev->bd_queue);
prefetch((char *)bdev->bd_queue + SMP_CACHE_BYTES);
return do_blockdev_direct_IO(rw, iocb, inode, bdev, iter, offset,
get_block, end_io, submit_io, flags);
return do_blockdev_direct_IO(iocb, inode, bdev, iter, offset, get_block,
end_io, submit_io, flags);
}
EXPORT_SYMBOL(__blockdev_direct_IO);

View File

@ -864,7 +864,7 @@ ext2_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter,
ret = dax_do_io(rw, iocb, inode, iter, offset, ext2_get_block,
NULL, DIO_LOCKING);
else
ret = blockdev_direct_IO(rw, iocb, inode, iter, offset,
ret = blockdev_direct_IO(iocb, inode, iter, offset,
ext2_get_block);
if (ret < 0 && (rw & WRITE))
ext2_write_failed(mapping, offset + count);

View File

@ -1856,7 +1856,7 @@ static ssize_t ext3_direct_IO(int rw, struct kiocb *iocb,
}
retry:
ret = blockdev_direct_IO(rw, iocb, inode, iter, offset, ext3_get_block);
ret = blockdev_direct_IO(iocb, inode, iter, offset, ext3_get_block);
/*
* In case of error extending write may have instantiated a few
* blocks outside i_size. Trim these off again.

View File

@ -693,9 +693,10 @@ retry:
ret = dax_do_io(rw, iocb, inode, iter, offset,
ext4_get_block, NULL, 0);
else
ret = __blockdev_direct_IO(rw, iocb, inode,
inode->i_sb->s_bdev, iter, offset,
ext4_get_block, NULL, NULL, 0);
ret = __blockdev_direct_IO(iocb, inode,
inode->i_sb->s_bdev, iter,
offset, ext4_get_block, NULL,
NULL, 0);
inode_dio_done(inode);
} else {
locked:
@ -703,8 +704,8 @@ locked:
ret = dax_do_io(rw, iocb, inode, iter, offset,
ext4_get_block, NULL, DIO_LOCKING);
else
ret = blockdev_direct_IO(rw, iocb, inode, iter,
offset, ext4_get_block);
ret = blockdev_direct_IO(iocb, inode, iter, offset,
ext4_get_block);
if (unlikely((rw & WRITE) && ret < 0)) {
loff_t isize = i_size_read(inode);

View File

@ -3037,7 +3037,7 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
ret = dax_do_io(rw, iocb, inode, iter, offset, get_block_func,
ext4_end_io_dio, dio_flags);
else
ret = __blockdev_direct_IO(rw, iocb, inode,
ret = __blockdev_direct_IO(iocb, inode,
inode->i_sb->s_bdev, iter, offset,
get_block_func,
ext4_end_io_dio, NULL, dio_flags);

View File

@ -1159,7 +1159,7 @@ static ssize_t f2fs_direct_IO(int rw, struct kiocb *iocb,
if (rw & WRITE)
__allocate_data_blocks(inode, offset, count);
err = blockdev_direct_IO(rw, iocb, inode, iter, offset, get_data_block);
err = blockdev_direct_IO(iocb, inode, iter, offset, get_data_block);
if (err < 0 && (rw & WRITE))
f2fs_write_failed(mapping, offset + count);

View File

@ -274,7 +274,7 @@ static ssize_t fat_direct_IO(int rw, struct kiocb *iocb,
* FAT need to use the DIO_LOCKING for avoiding the race
* condition of fat_get_block() and ->truncate().
*/
ret = blockdev_direct_IO(rw, iocb, inode, iter, offset, fat_get_block);
ret = blockdev_direct_IO(iocb, inode, iter, offset, fat_get_block);
if (ret < 0 && (rw & WRITE))
fat_write_failed(mapping, offset + count);

View File

@ -1095,9 +1095,8 @@ static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb,
truncate_inode_pages_range(mapping, lstart, end);
}
rv = __blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev,
iter, offset,
gfs2_get_block_direct, NULL, NULL, 0);
rv = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, iter,
offset, gfs2_get_block_direct, NULL, NULL, 0);
out:
gfs2_glock_dq(&gh);
gfs2_holder_uninit(&gh);

View File

@ -133,7 +133,7 @@ static ssize_t hfs_direct_IO(int rw, struct kiocb *iocb,
size_t count = iov_iter_count(iter);
ssize_t ret;
ret = blockdev_direct_IO(rw, iocb, inode, iter, offset, hfs_get_block);
ret = blockdev_direct_IO(iocb, inode, iter, offset, hfs_get_block);
/*
* In case of error extending write may have instantiated a few

View File

@ -131,8 +131,7 @@ static ssize_t hfsplus_direct_IO(int rw, struct kiocb *iocb,
size_t count = iov_iter_count(iter);
ssize_t ret;
ret = blockdev_direct_IO(rw, iocb, inode, iter, offset,
hfsplus_get_block);
ret = blockdev_direct_IO(iocb, inode, iter, offset, hfsplus_get_block);
/*
* In case of error extending write may have instantiated a few

View File

@ -339,7 +339,7 @@ static ssize_t jfs_direct_IO(int rw, struct kiocb *iocb,
size_t count = iov_iter_count(iter);
ssize_t ret;
ret = blockdev_direct_IO(rw, iocb, inode, iter, offset, jfs_get_block);
ret = blockdev_direct_IO(iocb, inode, iter, offset, jfs_get_block);
/*
* In case of error extending write may have instantiated a few

View File

@ -318,8 +318,7 @@ nilfs_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter,
return 0;
/* Needs synchronization with the cleaner */
size = blockdev_direct_IO(rw, iocb, inode, iter, offset,
nilfs_get_block);
size = blockdev_direct_IO(iocb, inode, iter, offset, nilfs_get_block);
/*
* In case of error extending write may have instantiated a few

View File

@ -738,10 +738,9 @@ static ssize_t ocfs2_direct_IO_write(struct kiocb *iocb,
di_bh = NULL;
}
written = __blockdev_direct_IO(WRITE, iocb, inode, inode->i_sb->s_bdev,
iter, offset,
ocfs2_direct_IO_get_blocks,
ocfs2_dio_end_io, NULL, 0);
written = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, iter,
offset, ocfs2_direct_IO_get_blocks,
ocfs2_dio_end_io, NULL, 0);
if (unlikely(written < 0)) {
loff_t i_size = i_size_read(inode);
@ -844,11 +843,10 @@ static ssize_t ocfs2_direct_IO(int rw,
return 0;
if (rw == READ)
return __blockdev_direct_IO(rw, iocb, inode,
inode->i_sb->s_bdev,
iter, offset,
ocfs2_direct_IO_get_blocks,
ocfs2_dio_end_io, NULL, 0);
return __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev,
iter, offset,
ocfs2_direct_IO_get_blocks,
ocfs2_dio_end_io, NULL, 0);
else
return ocfs2_direct_IO_write(iocb, iter, offset);
}

View File

@ -3286,7 +3286,7 @@ static ssize_t reiserfs_direct_IO(int rw, struct kiocb *iocb,
size_t count = iov_iter_count(iter);
ssize_t ret;
ret = blockdev_direct_IO(rw, iocb, inode, iter, offset,
ret = blockdev_direct_IO(iocb, inode, iter, offset,
reiserfs_get_blocks_direct_io);
/*

View File

@ -225,7 +225,7 @@ static ssize_t udf_direct_IO(int rw, struct kiocb *iocb,
size_t count = iov_iter_count(iter);
ssize_t ret;
ret = blockdev_direct_IO(rw, iocb, inode, iter, offset, udf_get_block);
ret = blockdev_direct_IO(iocb, inode, iter, offset, udf_get_block);
if (unlikely(ret < 0 && (rw & WRITE)))
udf_write_failed(mapping, offset + count);
return ret;

View File

@ -1504,14 +1504,13 @@ xfs_vm_direct_IO(
struct block_device *bdev = xfs_find_bdev_for_inode(inode);
if (rw & WRITE) {
return __blockdev_direct_IO(rw, iocb, inode, bdev, iter,
offset, xfs_get_blocks_direct,
return __blockdev_direct_IO(iocb, inode, bdev, iter, offset,
xfs_get_blocks_direct,
xfs_end_io_direct_write, NULL,
DIO_ASYNC_EXTEND);
}
return __blockdev_direct_IO(rw, iocb, inode, bdev, iter,
offset, xfs_get_blocks_direct,
NULL, NULL, 0);
return __blockdev_direct_IO(iocb, inode, bdev, iter, offset,
xfs_get_blocks_direct, NULL, NULL, 0);
}
/*

View File

@ -2634,16 +2634,18 @@ enum {
void dio_end_io(struct bio *bio, int error);
ssize_t __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
struct block_device *bdev, struct iov_iter *iter, loff_t offset,
get_block_t get_block, dio_iodone_t end_io,
dio_submit_t submit_io, int flags);
ssize_t __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
struct block_device *bdev, struct iov_iter *iter,
loff_t offset, get_block_t get_block,
dio_iodone_t end_io, dio_submit_t submit_io,
int flags);
static inline ssize_t blockdev_direct_IO(int rw, struct kiocb *iocb,
struct inode *inode, struct iov_iter *iter, loff_t offset,
get_block_t get_block)
static inline ssize_t blockdev_direct_IO(struct kiocb *iocb,
struct inode *inode,
struct iov_iter *iter, loff_t offset,
get_block_t get_block)
{
return __blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iter,
return __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, iter,
offset, get_block, NULL, NULL,
DIO_LOCKING | DIO_SKIP_HOLES);
}