1
0
Fork 0

Merge branch 'for-linus-4.1' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs

Pull btrfs fixes from Chris Mason:
 "I fixed up a regression from 4.0 where conversion between different
  raid levels would sometimes bail out without converting.

  Filipe tracked down a race where it was possible to double allocate
  chunks on the drive.

  Mark has a fix for fiemap.  All three will get bundled off for stable
  as well"

* 'for-linus-4.1' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs:
  Btrfs: fix regression in raid level conversion
  Btrfs: fix racy system chunk allocation when setting block group ro
  btrfs: clear 'ret' in btrfs_check_shared() loop
hifive-unleashed-5.1
Linus Torvalds 2015-05-23 11:14:10 -07:00
commit 7ce14f6ff2
3 changed files with 38 additions and 0 deletions

View File

@ -880,6 +880,8 @@ static int __add_keyed_refs(struct btrfs_fs_info *fs_info,
* indirect refs to their parent bytenr.
* When roots are found, they're added to the roots list
*
* NOTE: This can return values > 0
*
* FIXME some caching might speed things up
*/
static int find_parent_nodes(struct btrfs_trans_handle *trans,
@ -1198,6 +1200,19 @@ int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
return ret;
}
/**
* btrfs_check_shared - tell us whether an extent is shared
*
* @trans: optional trans handle
*
* btrfs_check_shared uses the backref walking code but will short
* circuit as soon as it finds a root or inode that doesn't match the
* one passed in. This provides a significant performance benefit for
* callers (such as fiemap) which want to know whether the extent is
* shared but do not need a ref count.
*
* Return: 0 if extent is not shared, 1 if it is shared, < 0 on error.
*/
int btrfs_check_shared(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info, u64 root_objectid,
u64 inum, u64 bytenr)
@ -1226,11 +1241,13 @@ int btrfs_check_shared(struct btrfs_trans_handle *trans,
ret = find_parent_nodes(trans, fs_info, bytenr, elem.seq, tmp,
roots, NULL, root_objectid, inum);
if (ret == BACKREF_FOUND_SHARED) {
/* this is the only condition under which we return 1 */
ret = 1;
break;
}
if (ret < 0 && ret != -ENOENT)
break;
ret = 0;
node = ulist_next(tmp, &uiter);
if (!node)
break;

View File

@ -8829,6 +8829,24 @@ again:
goto again;
}
/*
* if we are changing raid levels, try to allocate a corresponding
* block group with the new raid level.
*/
alloc_flags = update_block_group_flags(root, cache->flags);
if (alloc_flags != cache->flags) {
ret = do_chunk_alloc(trans, root, alloc_flags,
CHUNK_ALLOC_FORCE);
/*
* ENOSPC is allowed here, we may have enough space
* already allocated at the new raid level to
* carry on
*/
if (ret == -ENOSPC)
ret = 0;
if (ret < 0)
goto out;
}
ret = set_block_group_ro(cache, 0);
if (!ret)
@ -8842,7 +8860,9 @@ again:
out:
if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) {
alloc_flags = update_block_group_flags(root, cache->flags);
lock_chunks(root->fs_info->chunk_root);
check_system_chunk(trans, root, alloc_flags);
unlock_chunks(root->fs_info->chunk_root);
}
mutex_unlock(&root->fs_info->ro_block_group_mutex);

View File

@ -4625,6 +4625,7 @@ int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
{
u64 chunk_offset;
ASSERT(mutex_is_locked(&extent_root->fs_info->chunk_mutex));
chunk_offset = find_next_chunk(extent_root->fs_info);
return __btrfs_alloc_chunk(trans, extent_root, chunk_offset, type);
}