btrfs: cleanup extent locking sequence

Code cleanup for better understanding:
Variable needs_unlock to be called extent_locked to show state as
opposed to action. Changed the type to int, to reduce code in the
critical path.

Signed-off-by: Goldwyn Rodrigues <rgoldwyn@suse.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
Goldwyn Rodrigues 2017-10-16 05:43:21 -05:00 committed by David Sterba
parent 2dbe0c7718
commit 79f015f216

View file

@ -1590,7 +1590,6 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
int ret = 0;
bool only_release_metadata = false;
bool force_page_uptodate = false;
bool need_unlock;
nrptrs = min(DIV_ROUND_UP(iov_iter_count(i), PAGE_SIZE),
PAGE_SIZE / (sizeof(struct page *)));
@ -1613,6 +1612,7 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
size_t copied;
size_t dirty_sectors;
size_t num_sectors;
int extents_locked;
WARN_ON(num_pages > nrptrs);
@ -1669,7 +1669,6 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
}
release_bytes = reserve_bytes;
need_unlock = false;
again:
/*
* This is going to setup the pages array with the number of
@ -1682,16 +1681,15 @@ again:
if (ret)
break;
ret = lock_and_cleanup_extent_if_need(BTRFS_I(inode), pages,
extents_locked = lock_and_cleanup_extent_if_need(
BTRFS_I(inode), pages,
num_pages, pos, write_bytes, &lockstart,
&lockend, &cached_state);
if (ret < 0) {
if (ret == -EAGAIN)
if (extents_locked < 0) {
if (extents_locked == -EAGAIN)
goto again;
ret = extents_locked;
break;
} else if (ret > 0) {
need_unlock = true;
ret = 0;
}
copied = btrfs_copy_from_user(pos, write_bytes, pages, i);
@ -1756,7 +1754,7 @@ again:
if (copied > 0)
ret = btrfs_dirty_pages(inode, pages, dirty_pages,
pos, copied, NULL);
if (need_unlock)
if (extents_locked)
unlock_extent_cached(&BTRFS_I(inode)->io_tree,
lockstart, lockend, &cached_state,
GFP_NOFS);