1
0
Fork 0

btrfs: rename tree_entry to rb_simple_node and export it

Structure tree_entry provides a very simple rb_tree which only uses
bytenr as search index.

That tree_entry is used in 3 structures: backref_node, mapping_node and
tree_block.

Since we're going to make backref_node independnt from relocation, it's
a good time to extract the tree_entry into rb_simple_node, and export it
into misc.h.

Signed-off-by: Qu Wenruo <wqu@suse.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
alistair/sunxi64-5.8
Qu Wenruo 2020-03-26 14:11:09 +08:00 committed by David Sterba
parent 7053544146
commit e9a28dc52a
3 changed files with 90 additions and 79 deletions

View File

@ -161,8 +161,10 @@ static inline void btrfs_backref_iter_release(struct btrfs_backref_iter *iter)
* Represent a tree block in the backref cache
*/
struct btrfs_backref_node {
struct rb_node rb_node;
u64 bytenr;
struct {
struct rb_node rb_node;
u64 bytenr;
}; /* Use rb_simple_node for search/insert */
u64 new_bytenr;
/* Objectid of tree block owner, can be not uptodate */

View File

@ -6,6 +6,7 @@
#include <linux/sched.h>
#include <linux/wait.h>
#include <asm/div64.h>
#include <linux/rbtree.h>
#define in_range(b, first, len) ((b) >= (first) && (b) < (first) + (len))
@ -58,4 +59,57 @@ static inline bool has_single_bit_set(u64 n)
return is_power_of_two_u64(n);
}
/*
* Simple bytenr based rb_tree relate structures
*
* Any structure wants to use bytenr as single search index should have their
* structure start with these members.
*/
struct rb_simple_node {
struct rb_node rb_node;
u64 bytenr;
};
static inline struct rb_node *rb_simple_search(struct rb_root *root, u64 bytenr)
{
struct rb_node *node = root->rb_node;
struct rb_simple_node *entry;
while (node) {
entry = rb_entry(node, struct rb_simple_node, rb_node);
if (bytenr < entry->bytenr)
node = node->rb_left;
else if (bytenr > entry->bytenr)
node = node->rb_right;
else
return node;
}
return NULL;
}
static inline struct rb_node *rb_simple_insert(struct rb_root *root, u64 bytenr,
struct rb_node *node)
{
struct rb_node **p = &root->rb_node;
struct rb_node *parent = NULL;
struct rb_simple_node *entry;
while (*p) {
parent = *p;
entry = rb_entry(parent, struct rb_simple_node, rb_node);
if (bytenr < entry->bytenr)
p = &(*p)->rb_left;
else if (bytenr > entry->bytenr)
p = &(*p)->rb_right;
else
return parent;
}
rb_link_node(node, parent, p);
rb_insert_color(node, root);
return NULL;
}
#endif

View File

@ -24,6 +24,7 @@
#include "delalloc-space.h"
#include "block-group.h"
#include "backref.h"
#include "misc.h"
/*
* Relocation overview
@ -72,21 +73,15 @@
* The entry point of relocation is relocate_block_group() function.
*/
/*
* btrfs_backref_node, mapping_node and tree_block start with this
*/
struct tree_entry {
struct rb_node rb_node;
u64 bytenr;
};
#define RELOCATION_RESERVED_NODES 256
/*
* map address of tree root to tree
*/
struct mapping_node {
struct rb_node rb_node;
u64 bytenr;
struct {
struct rb_node rb_node;
u64 bytenr;
}; /* Use rb_simle_node for search/insert */
void *data;
};
@ -99,8 +94,10 @@ struct mapping_tree {
* present a tree block to process
*/
struct tree_block {
struct rb_node rb_node;
u64 bytenr;
struct {
struct rb_node rb_node;
u64 bytenr;
}; /* Use rb_simple_node for search/insert */
struct btrfs_key key;
unsigned int level:8;
unsigned int key_ready:1;
@ -294,48 +291,6 @@ static void free_backref_edge(struct btrfs_backref_cache *cache,
}
}
static struct rb_node *tree_insert(struct rb_root *root, u64 bytenr,
struct rb_node *node)
{
struct rb_node **p = &root->rb_node;
struct rb_node *parent = NULL;
struct tree_entry *entry;
while (*p) {
parent = *p;
entry = rb_entry(parent, struct tree_entry, rb_node);
if (bytenr < entry->bytenr)
p = &(*p)->rb_left;
else if (bytenr > entry->bytenr)
p = &(*p)->rb_right;
else
return parent;
}
rb_link_node(node, parent, p);
rb_insert_color(node, root);
return NULL;
}
static struct rb_node *tree_search(struct rb_root *root, u64 bytenr)
{
struct rb_node *n = root->rb_node;
struct tree_entry *entry;
while (n) {
entry = rb_entry(n, struct tree_entry, rb_node);
if (bytenr < entry->bytenr)
n = n->rb_left;
else if (bytenr > entry->bytenr)
n = n->rb_right;
else
return n;
}
return NULL;
}
static void backref_tree_panic(struct rb_node *rb_node, int errno, u64 bytenr)
{
@ -474,7 +429,7 @@ static void update_backref_node(struct btrfs_backref_cache *cache,
struct rb_node *rb_node;
rb_erase(&node->rb_node, &cache->rb_root);
node->bytenr = bytenr;
rb_node = tree_insert(&cache->rb_root, node->bytenr, &node->rb_node);
rb_node = rb_simple_insert(&cache->rb_root, node->bytenr, &node->rb_node);
if (rb_node)
backref_tree_panic(rb_node, -EEXIST, bytenr);
}
@ -599,7 +554,7 @@ struct btrfs_root *find_reloc_root(struct btrfs_fs_info *fs_info, u64 bytenr)
ASSERT(rc);
spin_lock(&rc->reloc_root_tree.lock);
rb_node = tree_search(&rc->reloc_root_tree.rb_root, bytenr);
rb_node = rb_simple_search(&rc->reloc_root_tree.rb_root, bytenr);
if (rb_node) {
node = rb_entry(rb_node, struct mapping_node, rb_node);
root = (struct btrfs_root *)node->data;
@ -667,7 +622,7 @@ static int handle_direct_tree_backref(struct btrfs_backref_cache *cache,
if (!edge)
return -ENOMEM;
rb_node = tree_search(&cache->rb_root, ref_key->offset);
rb_node = rb_simple_search(&cache->rb_root, ref_key->offset);
if (!rb_node) {
/* Parent node not yet cached */
upper = alloc_backref_node(cache, ref_key->offset,
@ -788,7 +743,7 @@ static int handle_indirect_tree_backref(struct btrfs_backref_cache *cache,
}
eb = path->nodes[level];
rb_node = tree_search(&cache->rb_root, eb->start);
rb_node = rb_simple_search(&cache->rb_root, eb->start);
if (!rb_node) {
upper = alloc_backref_node(cache, eb->start,
lower->level + 1);
@ -994,8 +949,8 @@ static int finish_upper_links(struct btrfs_backref_cache *cache,
/* Insert this node to cache if it's not COW-only */
if (!start->cowonly) {
rb_node = tree_insert(&cache->rb_root, start->bytenr,
&start->rb_node);
rb_node = rb_simple_insert(&cache->rb_root, start->bytenr,
&start->rb_node);
if (rb_node)
backref_tree_panic(rb_node, -EEXIST, start->bytenr);
list_add_tail(&start->lower, &cache->leaves);
@ -1062,8 +1017,8 @@ static int finish_upper_links(struct btrfs_backref_cache *cache,
/* Only cache non-COW-only (subvolume trees) tree blocks */
if (!upper->cowonly) {
rb_node = tree_insert(&cache->rb_root, upper->bytenr,
&upper->rb_node);
rb_node = rb_simple_insert(&cache->rb_root, upper->bytenr,
&upper->rb_node);
if (rb_node) {
backref_tree_panic(rb_node, -EEXIST,
upper->bytenr);
@ -1311,7 +1266,7 @@ static int clone_backref_node(struct btrfs_trans_handle *trans,
if (cache->last_trans > 0)
update_backref_cache(trans, cache);
rb_node = tree_search(&cache->rb_root, src->commit_root->start);
rb_node = rb_simple_search(&cache->rb_root, src->commit_root->start);
if (rb_node) {
node = rb_entry(rb_node, struct btrfs_backref_node, rb_node);
if (node->detached)
@ -1321,8 +1276,8 @@ static int clone_backref_node(struct btrfs_trans_handle *trans,
}
if (!node) {
rb_node = tree_search(&cache->rb_root,
reloc_root->commit_root->start);
rb_node = rb_simple_search(&cache->rb_root,
reloc_root->commit_root->start);
if (rb_node) {
node = rb_entry(rb_node, struct btrfs_backref_node,
rb_node);
@ -1355,8 +1310,8 @@ static int clone_backref_node(struct btrfs_trans_handle *trans,
list_add_tail(&new_node->lower, &cache->leaves);
}
rb_node = tree_insert(&cache->rb_root, new_node->bytenr,
&new_node->rb_node);
rb_node = rb_simple_insert(&cache->rb_root, new_node->bytenr,
&new_node->rb_node);
if (rb_node)
backref_tree_panic(rb_node, -EEXIST, new_node->bytenr);
@ -1396,8 +1351,8 @@ static int __must_check __add_reloc_root(struct btrfs_root *root)
node->data = root;
spin_lock(&rc->reloc_root_tree.lock);
rb_node = tree_insert(&rc->reloc_root_tree.rb_root,
node->bytenr, &node->rb_node);
rb_node = rb_simple_insert(&rc->reloc_root_tree.rb_root,
node->bytenr, &node->rb_node);
spin_unlock(&rc->reloc_root_tree.lock);
if (rb_node) {
btrfs_panic(fs_info, -EEXIST,
@ -1423,8 +1378,8 @@ static void __del_reloc_root(struct btrfs_root *root)
if (rc && root->node) {
spin_lock(&rc->reloc_root_tree.lock);
rb_node = tree_search(&rc->reloc_root_tree.rb_root,
root->commit_root->start);
rb_node = rb_simple_search(&rc->reloc_root_tree.rb_root,
root->commit_root->start);
if (rb_node) {
node = rb_entry(rb_node, struct mapping_node, rb_node);
rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root);
@ -1467,8 +1422,8 @@ static int __update_reloc_root(struct btrfs_root *root)
struct reloc_control *rc = fs_info->reloc_ctl;
spin_lock(&rc->reloc_root_tree.lock);
rb_node = tree_search(&rc->reloc_root_tree.rb_root,
root->commit_root->start);
rb_node = rb_simple_search(&rc->reloc_root_tree.rb_root,
root->commit_root->start);
if (rb_node) {
node = rb_entry(rb_node, struct mapping_node, rb_node);
rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root);
@ -1481,8 +1436,8 @@ static int __update_reloc_root(struct btrfs_root *root)
spin_lock(&rc->reloc_root_tree.lock);
node->bytenr = root->node->start;
rb_node = tree_insert(&rc->reloc_root_tree.rb_root,
node->bytenr, &node->rb_node);
rb_node = rb_simple_insert(&rc->reloc_root_tree.rb_root,
node->bytenr, &node->rb_node);
spin_unlock(&rc->reloc_root_tree.lock);
if (rb_node)
backref_tree_panic(rb_node, -EEXIST, node->bytenr);
@ -3640,7 +3595,7 @@ static int add_tree_block(struct reloc_control *rc,
block->level = level;
block->key_ready = 0;
rb_node = tree_insert(blocks, block->bytenr, &block->rb_node);
rb_node = rb_simple_insert(blocks, block->bytenr, &block->rb_node);
if (rb_node)
backref_tree_panic(rb_node, -EEXIST, block->bytenr);
@ -3663,7 +3618,7 @@ static int __add_tree_block(struct reloc_control *rc,
if (tree_block_processed(bytenr, rc))
return 0;
if (tree_search(blocks, bytenr))
if (rb_simple_search(blocks, bytenr))
return 0;
path = btrfs_alloc_path();