diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index 9adaa79adad9..354913177ba6 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -220,10 +220,12 @@ static void end_compressed_bio_write(struct bio *bio, int err) */ inode = cb->inode; tree = &BTRFS_I(inode)->io_tree; + cb->compressed_pages[0]->mapping = cb->inode->i_mapping; tree->ops->writepage_end_io_hook(cb->compressed_pages[0], cb->start, cb->start + cb->len - 1, NULL, 1); + cb->compressed_pages[0]->mapping = NULL; end_compressed_writeback(inode, cb->start, cb->len); /* note, our inode could be gone now */ @@ -306,6 +308,7 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start, else ret = 0; + page->mapping = NULL; if (ret || bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) { bio_get(bio); @@ -423,6 +426,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, else ret = 0; + page->mapping = NULL; if (ret || bio_add_page(comp_bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) { bio_get(comp_bio); diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index eb3c12e7beaf..9b37ce6e5168 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -1242,12 +1242,21 @@ again: delalloc_end = 0; found = find_delalloc_range(tree, &delalloc_start, &delalloc_end, max_bytes); - if (!found) { + if (!found || delalloc_end <= *start) { *start = delalloc_start; *end = delalloc_end; return found; } + /* + * start comes from the offset of locked_page. We have to lock + * pages in order, so we can't process delalloc bytes before + * locked_page + */ + if (delalloc_start < *start) { + delalloc_start = *start; + } + /* * make sure to limit the number of pages we try to lock down * if we're looping. diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 238a8e215eb9..0c8cc35a8b97 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -368,8 +368,8 @@ int noinline btrfs_drop_extents(struct btrfs_trans_handle *trans, u64 search_start = start; u64 leaf_start; u64 ram_bytes = 0; - u8 compression = 0; - u8 encryption = 0; + u8 compression; + u8 encryption; u16 other_encoding = 0; u64 root_gen; u64 root_owner; @@ -415,6 +415,8 @@ next_slot: leaf_start = 0; root_gen = 0; root_owner = 0; + compression = 0; + encryption = 0; extent = NULL; leaf = path->nodes[0]; slot = path->slots[0]; @@ -546,8 +548,12 @@ next_slot: inline_limit - key.offset); inode_sub_bytes(inode, extent_end - inline_limit); - btrfs_truncate_item(trans, root, path, - new_size, 1); + btrfs_set_file_extent_ram_bytes(leaf, extent, + new_size); + if (!compression && !encryption) { + btrfs_truncate_item(trans, root, path, + new_size, 1); + } } } /* delete the entire extent */ @@ -567,8 +573,11 @@ next_slot: new_size = btrfs_file_extent_calc_inline_size( extent_end - end); inode_sub_bytes(inode, end - key.offset); - ret = btrfs_truncate_item(trans, root, path, - new_size, 0); + btrfs_set_file_extent_ram_bytes(leaf, extent, + new_size); + if (!compression && !encryption) + ret = btrfs_truncate_item(trans, root, path, + new_size, 0); BUG_ON(ret); } /* create bookend, splitting the extent in two */ diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 789c376157f9..806708dd7e38 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -239,6 +239,7 @@ static int cow_file_range_inline(struct btrfs_trans_handle *trans, data_len = compressed_size; if (start > 0 || + actual_end >= PAGE_CACHE_SIZE || data_len >= BTRFS_MAX_INLINE_DATA_SIZE(root) || (!compressed_size && (actual_end & (root->sectorsize - 1)) == 0) || @@ -248,7 +249,7 @@ static int cow_file_range_inline(struct btrfs_trans_handle *trans, } ret = btrfs_drop_extents(trans, root, inode, start, - aligned_end, aligned_end, &hint_byte); + aligned_end, start, &hint_byte); BUG_ON(ret); if (isize > actual_end) @@ -423,6 +424,7 @@ again: * free any pages it allocated and our page pointer array */ for (i = 0; i < nr_pages_ret; i++) { + WARN_ON(pages[i]->mapping); page_cache_release(pages[i]); } kfree(pages); @@ -572,8 +574,10 @@ free_pages_out_fail: extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree, start, end, locked_page, 0, 0, 0); free_pages_out: - for (i = 0; i < nr_pages_ret; i++) + for (i = 0; i < nr_pages_ret; i++) { + WARN_ON(pages[i]->mapping); page_cache_release(pages[i]); + } if (pages) kfree(pages);