Btrfs: map the inode item when doing fill_inode_item

Instead of calling kmap_atomic for every thing we set in the inode item, map the
entire inode item at the start and unmap it at the end.  This makes a sequential
dd of 400mb O_DIRECT something like 1% faster.  Thanks,

Signed-off-by: Josef Bacik <josef@redhat.com>
This commit is contained in:
Josef Bacik 2011-04-05 13:02:27 -04:00
parent 06d5a5899d
commit 12ddb96cb6

View file

@ -2589,6 +2589,13 @@ static void fill_inode_item(struct btrfs_trans_handle *trans,
struct btrfs_inode_item *item,
struct inode *inode)
{
if (!leaf->map_token)
map_private_extent_buffer(leaf, (unsigned long)item,
sizeof(struct btrfs_inode_item),
&leaf->map_token, &leaf->kaddr,
&leaf->map_start, &leaf->map_len,
KM_USER1);
btrfs_set_inode_uid(leaf, item, inode->i_uid);
btrfs_set_inode_gid(leaf, item, inode->i_gid);
btrfs_set_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size);
@ -2617,6 +2624,11 @@ static void fill_inode_item(struct btrfs_trans_handle *trans,
btrfs_set_inode_rdev(leaf, item, inode->i_rdev);
btrfs_set_inode_flags(leaf, item, BTRFS_I(inode)->flags);
btrfs_set_inode_block_group(leaf, item, BTRFS_I(inode)->block_group);
if (leaf->map_token) {
unmap_extent_buffer(leaf, leaf->map_token, KM_USER1);
leaf->map_token = NULL;
}
}
/*