1
0
Fork 0

Reimplement IDR and IDA using the radix tree

The IDR is very similar to the radix tree.  It has some functionality that
the radix tree did not have (alloc next free, cyclic allocation, a
callback-based for_each, destroy tree), which is readily implementable on
top of the radix tree.  A few small changes were needed in order to use a
tag to represent nodes with free space below them.  More extensive
changes were needed to support storing NULL as a valid entry in an IDR.
Plain radix trees still interpret NULL as a not-present entry.

The IDA is reimplemented as a client of the newly enhanced radix tree.  As
in the current implementation, it uses a bitmap at the last level of the
tree.

Signed-off-by: Matthew Wilcox <willy@infradead.org>
Signed-off-by: Matthew Wilcox <mawilcox@microsoft.com>
Tested-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Konstantin Khlebnikov <koct9i@gmail.com>
Cc: Ross Zwisler <ross.zwisler@linux.intel.com>
Cc: Tejun Heo <tj@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
hifive-unleashed-5.1
Matthew Wilcox 2016-12-20 10:27:56 -05:00
parent 0ac398ef39
commit 0a835c4f09
13 changed files with 993 additions and 1124 deletions

View File

@ -12,47 +12,28 @@
#ifndef __IDR_H__
#define __IDR_H__
#include <linux/types.h>
#include <linux/bitops.h>
#include <linux/init.h>
#include <linux/rcupdate.h>
/*
* Using 6 bits at each layer allows us to allocate 7 layers out of each page.
* 8 bits only gave us 3 layers out of every pair of pages, which is less
* efficient except for trees with a largest element between 192-255 inclusive.
*/
#define IDR_BITS 6
#define IDR_SIZE (1 << IDR_BITS)
#define IDR_MASK ((1 << IDR_BITS)-1)
struct idr_layer {
int prefix; /* the ID prefix of this idr_layer */
int layer; /* distance from leaf */
struct idr_layer __rcu *ary[1<<IDR_BITS];
int count; /* When zero, we can release it */
union {
/* A zero bit means "space here" */
DECLARE_BITMAP(bitmap, IDR_SIZE);
struct rcu_head rcu_head;
};
};
#include <linux/radix-tree.h>
#include <linux/gfp.h>
struct idr {
struct idr_layer __rcu *hint; /* the last layer allocated from */
struct idr_layer __rcu *top;
int layers; /* only valid w/o concurrent changes */
int cur; /* current pos for cyclic allocation */
spinlock_t lock;
int id_free_cnt;
struct idr_layer *id_free;
struct radix_tree_root idr_rt;
unsigned int idr_next;
};
#define IDR_INIT(name) \
/*
* The IDR API does not expose the tagging functionality of the radix tree
* to users. Use tag 0 to track whether a node has free space below it.
*/
#define IDR_FREE 0
/* Set the IDR flag and the IDR_FREE tag */
#define IDR_RT_MARKER ((__force gfp_t)(3 << __GFP_BITS_SHIFT))
#define IDR_INIT \
{ \
.lock = __SPIN_LOCK_UNLOCKED(name.lock), \
.idr_rt = RADIX_TREE_INIT(IDR_RT_MARKER) \
}
#define DEFINE_IDR(name) struct idr name = IDR_INIT(name)
#define DEFINE_IDR(name) struct idr name = IDR_INIT
/**
* idr_get_cursor - Return the current position of the cyclic allocator
@ -62,9 +43,9 @@ struct idr {
* idr_alloc_cyclic() if it is free (otherwise the search will start from
* this position).
*/
static inline unsigned int idr_get_cursor(struct idr *idr)
static inline unsigned int idr_get_cursor(const struct idr *idr)
{
return READ_ONCE(idr->cur);
return READ_ONCE(idr->idr_next);
}
/**
@ -77,7 +58,7 @@ static inline unsigned int idr_get_cursor(struct idr *idr)
*/
static inline void idr_set_cursor(struct idr *idr, unsigned int val)
{
WRITE_ONCE(idr->cur, val);
WRITE_ONCE(idr->idr_next, val);
}
/**
@ -97,22 +78,31 @@ static inline void idr_set_cursor(struct idr *idr, unsigned int val)
* period).
*/
/*
* This is what we export.
*/
void *idr_find_slowpath(struct idr *idp, int id);
void idr_preload(gfp_t gfp_mask);
int idr_alloc(struct idr *idp, void *ptr, int start, int end, gfp_t gfp_mask);
int idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask);
int idr_for_each(struct idr *idp,
int idr_alloc(struct idr *, void *entry, int start, int end, gfp_t);
int idr_alloc_cyclic(struct idr *, void *entry, int start, int end, gfp_t);
int idr_for_each(const struct idr *,
int (*fn)(int id, void *p, void *data), void *data);
void *idr_get_next(struct idr *idp, int *nextid);
void *idr_replace(struct idr *idp, void *ptr, int id);
void idr_remove(struct idr *idp, int id);
void idr_destroy(struct idr *idp);
void idr_init(struct idr *idp);
bool idr_is_empty(struct idr *idp);
void *idr_get_next(struct idr *, int *nextid);
void *idr_replace(struct idr *, void *, int id);
void idr_destroy(struct idr *);
static inline void idr_remove(struct idr *idr, int id)
{
radix_tree_delete(&idr->idr_rt, id);
}
static inline void idr_init(struct idr *idr)
{
INIT_RADIX_TREE(&idr->idr_rt, IDR_RT_MARKER);
idr->idr_next = 0;
}
static inline bool idr_is_empty(const struct idr *idr)
{
return radix_tree_empty(&idr->idr_rt) &&
radix_tree_tagged(&idr->idr_rt, IDR_FREE);
}
/**
* idr_preload_end - end preload section started with idr_preload()
@ -137,19 +127,14 @@ static inline void idr_preload_end(void)
* This function can be called under rcu_read_lock(), given that the leaf
* pointers lifetimes are correctly managed.
*/
static inline void *idr_find(struct idr *idr, int id)
static inline void *idr_find(const struct idr *idr, int id)
{
struct idr_layer *hint = rcu_dereference_raw(idr->hint);
if (hint && (id & ~IDR_MASK) == hint->prefix)
return rcu_dereference_raw(hint->ary[id & IDR_MASK]);
return idr_find_slowpath(idr, id);
return radix_tree_lookup(&idr->idr_rt, id);
}
/**
* idr_for_each_entry - iterate over an idr's elements of a given type
* @idp: idr handle
* @idr: idr handle
* @entry: the type * to use as cursor
* @id: id entry's key
*
@ -157,57 +142,60 @@ static inline void *idr_find(struct idr *idr, int id)
* after normal terminatinon @entry is left with the value NULL. This
* is convenient for a "not found" value.
*/
#define idr_for_each_entry(idp, entry, id) \
for (id = 0; ((entry) = idr_get_next(idp, &(id))) != NULL; ++id)
#define idr_for_each_entry(idr, entry, id) \
for (id = 0; ((entry) = idr_get_next(idr, &(id))) != NULL; ++id)
/**
* idr_for_each_entry - continue iteration over an idr's elements of a given type
* @idp: idr handle
* idr_for_each_entry_continue - continue iteration over an idr's elements of a given type
* @idr: idr handle
* @entry: the type * to use as cursor
* @id: id entry's key
*
* Continue to iterate over list of given type, continuing after
* the current position.
*/
#define idr_for_each_entry_continue(idp, entry, id) \
for ((entry) = idr_get_next((idp), &(id)); \
#define idr_for_each_entry_continue(idr, entry, id) \
for ((entry) = idr_get_next((idr), &(id)); \
entry; \
++id, (entry) = idr_get_next((idp), &(id)))
++id, (entry) = idr_get_next((idr), &(id)))
/*
* IDA - IDR based id allocator, use when translation from id to
* pointer isn't necessary.
*
* IDA_BITMAP_LONGS is calculated to be one less to accommodate
* ida_bitmap->nr_busy so that the whole struct fits in 128 bytes.
*/
#define IDA_CHUNK_SIZE 128 /* 128 bytes per chunk */
#define IDA_BITMAP_LONGS (IDA_CHUNK_SIZE / sizeof(long) - 1)
#define IDA_BITMAP_LONGS (IDA_CHUNK_SIZE / sizeof(long))
#define IDA_BITMAP_BITS (IDA_BITMAP_LONGS * sizeof(long) * 8)
struct ida_bitmap {
long nr_busy;
unsigned long bitmap[IDA_BITMAP_LONGS];
};
struct ida {
struct idr idr;
struct radix_tree_root ida_rt;
struct ida_bitmap *free_bitmap;
};
#define IDA_INIT(name) { .idr = IDR_INIT((name).idr), .free_bitmap = NULL, }
#define DEFINE_IDA(name) struct ida name = IDA_INIT(name)
#define IDA_INIT { \
.ida_rt = RADIX_TREE_INIT(IDR_RT_MARKER | GFP_NOWAIT), \
}
#define DEFINE_IDA(name) struct ida name = IDA_INIT
int ida_pre_get(struct ida *ida, gfp_t gfp_mask);
int ida_get_new_above(struct ida *ida, int starting_id, int *p_id);
void ida_remove(struct ida *ida, int id);
void ida_destroy(struct ida *ida);
void ida_init(struct ida *ida);
int ida_simple_get(struct ida *ida, unsigned int start, unsigned int end,
gfp_t gfp_mask);
void ida_simple_remove(struct ida *ida, unsigned int id);
static inline void ida_init(struct ida *ida)
{
INIT_RADIX_TREE(&ida->ida_rt, IDR_RT_MARKER | GFP_NOWAIT);
ida->free_bitmap = NULL;
}
/**
* ida_get_new - allocate new ID
* @ida: idr handle
@ -220,11 +208,8 @@ static inline int ida_get_new(struct ida *ida, int *p_id)
return ida_get_new_above(ida, 0, p_id);
}
static inline bool ida_is_empty(struct ida *ida)
static inline bool ida_is_empty(const struct ida *ida)
{
return idr_is_empty(&ida->idr);
return radix_tree_empty(&ida->ida_rt);
}
void __init idr_init_cache(void);
#endif /* __IDR_H__ */

View File

@ -105,7 +105,10 @@ struct radix_tree_node {
unsigned long tags[RADIX_TREE_MAX_TAGS][RADIX_TREE_TAG_LONGS];
};
/* root tags are stored in gfp_mask, shifted by __GFP_BITS_SHIFT */
/* The top bits of gfp_mask are used to store the root tags and the IDR flag */
#define ROOT_IS_IDR ((__force gfp_t)(1 << __GFP_BITS_SHIFT))
#define ROOT_TAG_SHIFT (__GFP_BITS_SHIFT + 1)
struct radix_tree_root {
gfp_t gfp_mask;
struct radix_tree_node __rcu *rnode;
@ -358,10 +361,14 @@ int radix_tree_split(struct radix_tree_root *, unsigned long index,
unsigned new_order);
int radix_tree_join(struct radix_tree_root *, unsigned long index,
unsigned new_order, void *);
void **idr_get_free(struct radix_tree_root *, struct radix_tree_iter *,
gfp_t, int end);
#define RADIX_TREE_ITER_TAG_MASK 0x00FF /* tag index in lower byte */
#define RADIX_TREE_ITER_TAGGED 0x0100 /* lookup tagged slots */
#define RADIX_TREE_ITER_CONTIG 0x0200 /* stop at first hole */
enum {
RADIX_TREE_ITER_TAG_MASK = 0x0f, /* tag index in lower nybble */
RADIX_TREE_ITER_TAGGED = 0x10, /* lookup tagged slots */
RADIX_TREE_ITER_CONTIG = 0x20, /* stop at first hole */
};
/**
* radix_tree_iter_init - initialize radix tree iterator
@ -402,6 +409,40 @@ radix_tree_iter_init(struct radix_tree_iter *iter, unsigned long start)
void **radix_tree_next_chunk(const struct radix_tree_root *,
struct radix_tree_iter *iter, unsigned flags);
/**
* radix_tree_iter_lookup - look up an index in the radix tree
* @root: radix tree root
* @iter: iterator state
* @index: key to look up
*
* If @index is present in the radix tree, this function returns the slot
* containing it and updates @iter to describe the entry. If @index is not
* present, it returns NULL.
*/
static inline void **radix_tree_iter_lookup(const struct radix_tree_root *root,
struct radix_tree_iter *iter, unsigned long index)
{
radix_tree_iter_init(iter, index);
return radix_tree_next_chunk(root, iter, RADIX_TREE_ITER_CONTIG);
}
/**
* radix_tree_iter_find - find a present entry
* @root: radix tree root
* @iter: iterator state
* @index: start location
*
* This function returns the slot containing the entry with the lowest index
* which is at least @index. If @index is larger than any present entry, this
* function returns NULL. The @iter is updated to describe the entry found.
*/
static inline void **radix_tree_iter_find(const struct radix_tree_root *root,
struct radix_tree_iter *iter, unsigned long index)
{
radix_tree_iter_init(iter, index);
return radix_tree_next_chunk(root, iter, 0);
}
/**
* radix_tree_iter_retry - retry this chunk of the iteration
* @iter: iterator state

View File

@ -553,7 +553,7 @@ asmlinkage __visible void __init start_kernel(void)
if (WARN(!irqs_disabled(),
"Interrupts were enabled *very* early, fixing it\n"))
local_irq_disable();
idr_init_cache();
radix_tree_init();
/*
* Allow workqueue creation and work item queueing/cancelling
@ -568,7 +568,6 @@ asmlinkage __visible void __init start_kernel(void)
trace_init();
context_tracking_init();
radix_tree_init();
/* init some links before init_ISA_irqs() */
early_irq_init();
init_IRQ();

1170
lib/idr.c

File diff suppressed because it is too large Load Diff

View File

@ -22,20 +22,21 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/bitmap.h>
#include <linux/bitops.h>
#include <linux/cpu.h>
#include <linux/errno.h>
#include <linux/export.h>
#include <linux/idr.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/radix-tree.h>
#include <linux/percpu.h>
#include <linux/slab.h>
#include <linux/kmemleak.h>
#include <linux/cpu.h>
#include <linux/string.h>
#include <linux/bitops.h>
#include <linux/rcupdate.h>
#include <linux/percpu.h>
#include <linux/preempt.h> /* in_interrupt() */
#include <linux/radix-tree.h>
#include <linux/rcupdate.h>
#include <linux/slab.h>
#include <linux/string.h>
/* Number of nodes in fully populated tree of given height */
@ -59,6 +60,15 @@ static struct kmem_cache *radix_tree_node_cachep;
*/
#define RADIX_TREE_PRELOAD_SIZE (RADIX_TREE_MAX_PATH * 2 - 1)
/*
* The IDR does not have to be as high as the radix tree since it uses
* signed integers, not unsigned longs.
*/
#define IDR_INDEX_BITS (8 /* CHAR_BIT */ * sizeof(int) - 1)
#define IDR_MAX_PATH (DIV_ROUND_UP(IDR_INDEX_BITS, \
RADIX_TREE_MAP_SHIFT))
#define IDR_PRELOAD_SIZE (IDR_MAX_PATH * 2 - 1)
/*
* Per-cpu pool of preloaded nodes
*/
@ -149,27 +159,32 @@ static inline int tag_get(const struct radix_tree_node *node, unsigned int tag,
static inline void root_tag_set(struct radix_tree_root *root, unsigned tag)
{
root->gfp_mask |= (__force gfp_t)(1 << (tag + __GFP_BITS_SHIFT));
root->gfp_mask |= (__force gfp_t)(1 << (tag + ROOT_TAG_SHIFT));
}
static inline void root_tag_clear(struct radix_tree_root *root, unsigned tag)
{
root->gfp_mask &= (__force gfp_t)~(1 << (tag + __GFP_BITS_SHIFT));
root->gfp_mask &= (__force gfp_t)~(1 << (tag + ROOT_TAG_SHIFT));
}
static inline void root_tag_clear_all(struct radix_tree_root *root)
{
root->gfp_mask &= __GFP_BITS_MASK;
root->gfp_mask &= (1 << ROOT_TAG_SHIFT) - 1;
}
static inline int root_tag_get(const struct radix_tree_root *root, unsigned tag)
{
return (__force int)root->gfp_mask & (1 << (tag + __GFP_BITS_SHIFT));
return (__force int)root->gfp_mask & (1 << (tag + ROOT_TAG_SHIFT));
}
static inline unsigned root_tags_get(const struct radix_tree_root *root)
{
return (__force unsigned)root->gfp_mask >> __GFP_BITS_SHIFT;
return (__force unsigned)root->gfp_mask >> ROOT_TAG_SHIFT;
}
static inline bool is_idr(const struct radix_tree_root *root)
{
return !!(root->gfp_mask & ROOT_IS_IDR);
}
/*
@ -187,6 +202,11 @@ static inline int any_tag_set(const struct radix_tree_node *node,
return 0;
}
static inline void all_tag_set(struct radix_tree_node *node, unsigned int tag)
{
bitmap_fill(node->tags[tag], RADIX_TREE_MAP_SIZE);
}
/**
* radix_tree_find_next_bit - find the next set bit in a memory region
*
@ -240,6 +260,13 @@ static inline unsigned long node_maxindex(const struct radix_tree_node *node)
return shift_maxindex(node->shift);
}
static unsigned long next_index(unsigned long index,
const struct radix_tree_node *node,
unsigned long offset)
{
return (index & ~node_maxindex(node)) + (offset << node->shift);
}
#ifndef __KERNEL__
static void dump_node(struct radix_tree_node *node, unsigned long index)
{
@ -278,11 +305,52 @@ static void radix_tree_dump(struct radix_tree_root *root)
{
pr_debug("radix root: %p rnode %p tags %x\n",
root, root->rnode,
root->gfp_mask >> __GFP_BITS_SHIFT);
root->gfp_mask >> ROOT_TAG_SHIFT);
if (!radix_tree_is_internal_node(root->rnode))
return;
dump_node(entry_to_node(root->rnode), 0);
}
static void dump_ida_node(void *entry, unsigned long index)
{
unsigned long i;
if (!entry)
return;
if (radix_tree_is_internal_node(entry)) {
struct radix_tree_node *node = entry_to_node(entry);
pr_debug("ida node: %p offset %d indices %lu-%lu parent %p free %lx shift %d count %d\n",
node, node->offset, index * IDA_BITMAP_BITS,
((index | node_maxindex(node)) + 1) *
IDA_BITMAP_BITS - 1,
node->parent, node->tags[0][0], node->shift,
node->count);
for (i = 0; i < RADIX_TREE_MAP_SIZE; i++)
dump_ida_node(node->slots[i],
index | (i << node->shift));
} else {
struct ida_bitmap *bitmap = entry;
pr_debug("ida btmp: %p offset %d indices %lu-%lu data", bitmap,
(int)(index & RADIX_TREE_MAP_MASK),
index * IDA_BITMAP_BITS,
(index + 1) * IDA_BITMAP_BITS - 1);
for (i = 0; i < IDA_BITMAP_LONGS; i++)
pr_cont(" %lx", bitmap->bitmap[i]);
pr_cont("\n");
}
}
static void ida_dump(struct ida *ida)
{
struct radix_tree_root *root = &ida->ida_rt;
pr_debug("ida: %p %p free %d bitmap %p\n", ida, root->rnode,
root->gfp_mask >> ROOT_TAG_SHIFT,
ida->free_bitmap);
dump_ida_node(root->rnode, 0);
}
#endif
/*
@ -290,13 +358,11 @@ static void radix_tree_dump(struct radix_tree_root *root)
* that the caller has pinned this thread of control to the current CPU.
*/
static struct radix_tree_node *
radix_tree_node_alloc(struct radix_tree_root *root,
struct radix_tree_node *parent,
radix_tree_node_alloc(gfp_t gfp_mask, struct radix_tree_node *parent,
unsigned int shift, unsigned int offset,
unsigned int count, unsigned int exceptional)
{
struct radix_tree_node *ret = NULL;
gfp_t gfp_mask = root_gfp_mask(root);
/*
* Preload code isn't irq safe and it doesn't make sense to use
@ -533,7 +599,7 @@ static unsigned radix_tree_load_root(const struct radix_tree_root *root,
/*
* Extend a radix tree so it can store key @index.
*/
static int radix_tree_extend(struct radix_tree_root *root,
static int radix_tree_extend(struct radix_tree_root *root, gfp_t gfp,
unsigned long index, unsigned int shift)
{
struct radix_tree_node *slot;
@ -546,19 +612,27 @@ static int radix_tree_extend(struct radix_tree_root *root,
maxshift += RADIX_TREE_MAP_SHIFT;
slot = root->rnode;
if (!slot)
if (!slot && (!is_idr(root) || root_tag_get(root, IDR_FREE)))
goto out;
do {
struct radix_tree_node *node = radix_tree_node_alloc(root,
NULL, shift, 0, 1, 0);
struct radix_tree_node *node = radix_tree_node_alloc(gfp, NULL,
shift, 0, 1, 0);
if (!node)
return -ENOMEM;
/* Propagate the aggregated tag info into the new root */
for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) {
if (root_tag_get(root, tag))
tag_set(node, tag, 0);
if (is_idr(root)) {
all_tag_set(node, IDR_FREE);
if (!root_tag_get(root, IDR_FREE)) {
tag_clear(node, IDR_FREE, 0);
root_tag_set(root, IDR_FREE);
}
} else {
/* Propagate the aggregated tag info to the new child */
for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) {
if (root_tag_get(root, tag))
tag_set(node, tag, 0);
}
}
BUG_ON(shift > BITS_PER_LONG);
@ -619,6 +693,8 @@ static inline bool radix_tree_shrink(struct radix_tree_root *root,
* one (root->rnode) as far as dependent read barriers go.
*/
root->rnode = child;
if (is_idr(root) && !tag_get(node, IDR_FREE, 0))
root_tag_clear(root, IDR_FREE);
/*
* We have a dilemma here. The node's slot[0] must not be
@ -674,7 +750,12 @@ static bool delete_node(struct radix_tree_root *root,
parent->slots[node->offset] = NULL;
parent->count--;
} else {
root_tag_clear_all(root);
/*
* Shouldn't the tags already have all been cleared
* by the caller?
*/
if (!is_idr(root))
root_tag_clear_all(root);
root->rnode = NULL;
}
@ -714,6 +795,7 @@ int __radix_tree_create(struct radix_tree_root *root, unsigned long index,
unsigned long maxindex;
unsigned int shift, offset = 0;
unsigned long max = index | ((1UL << order) - 1);
gfp_t gfp = root_gfp_mask(root);
shift = radix_tree_load_root(root, &child, &maxindex);
@ -721,7 +803,7 @@ int __radix_tree_create(struct radix_tree_root *root, unsigned long index,
if (order > 0 && max == ((1UL << order) - 1))
max++;
if (max > maxindex) {
int error = radix_tree_extend(root, max, shift);
int error = radix_tree_extend(root, gfp, max, shift);
if (error < 0)
return error;
shift = error;
@ -732,7 +814,7 @@ int __radix_tree_create(struct radix_tree_root *root, unsigned long index,
shift -= RADIX_TREE_MAP_SHIFT;
if (child == NULL) {
/* Have to add a child node. */
child = radix_tree_node_alloc(root, node, shift,
child = radix_tree_node_alloc(gfp, node, shift,
offset, 0, 0);
if (!child)
return -ENOMEM;
@ -755,7 +837,6 @@ int __radix_tree_create(struct radix_tree_root *root, unsigned long index,
return 0;
}
#ifdef CONFIG_RADIX_TREE_MULTIORDER
/*
* Free any nodes below this node. The tree is presumed to not need
* shrinking, and any user data in the tree is presumed to not need a
@ -791,6 +872,7 @@ static void radix_tree_free_nodes(struct radix_tree_node *node)
}
}
#ifdef CONFIG_RADIX_TREE_MULTIORDER
static inline int insert_entries(struct radix_tree_node *node, void **slot,
void *item, unsigned order, bool replace)
{
@ -996,69 +1078,70 @@ void *radix_tree_lookup(const struct radix_tree_root *root, unsigned long index)
}
EXPORT_SYMBOL(radix_tree_lookup);
static inline int slot_count(struct radix_tree_node *node,
void **slot)
static inline void replace_sibling_entries(struct radix_tree_node *node,
void **slot, int count, int exceptional)
{
int n = 1;
#ifdef CONFIG_RADIX_TREE_MULTIORDER
void *ptr = node_to_entry(slot);
unsigned offset = get_slot_offset(node, slot);
int i;
unsigned offset = get_slot_offset(node, slot) + 1;
for (i = 1; offset + i < RADIX_TREE_MAP_SIZE; i++) {
if (node->slots[offset + i] != ptr)
while (offset < RADIX_TREE_MAP_SIZE) {
if (node->slots[offset] != ptr)
break;
n++;
if (count < 0) {
node->slots[offset] = NULL;
node->count--;
}
node->exceptional += exceptional;
offset++;
}
#endif
return n;
}
static void replace_slot(struct radix_tree_root *root,
struct radix_tree_node *node,
void **slot, void *item,
bool warn_typeswitch)
static void replace_slot(void **slot, void *item, struct radix_tree_node *node,
int count, int exceptional)
{
void *old = rcu_dereference_raw(*slot);
int count, exceptional;
if (WARN_ON_ONCE(radix_tree_is_internal_node(item)))
return;
WARN_ON_ONCE(radix_tree_is_internal_node(item));
count = !!item - !!old;
exceptional = !!radix_tree_exceptional_entry(item) -
!!radix_tree_exceptional_entry(old);
WARN_ON_ONCE(warn_typeswitch && (count || exceptional));
if (node) {
if (node && (count || exceptional)) {
node->count += count;
if (exceptional) {
exceptional *= slot_count(node, slot);
node->exceptional += exceptional;
}
node->exceptional += exceptional;
replace_sibling_entries(node, slot, count, exceptional);
}
rcu_assign_pointer(*slot, item);
}
static inline void delete_sibling_entries(struct radix_tree_node *node,
void **slot)
static bool node_tag_get(const struct radix_tree_root *root,
const struct radix_tree_node *node,
unsigned int tag, unsigned int offset)
{
#ifdef CONFIG_RADIX_TREE_MULTIORDER
bool exceptional = radix_tree_exceptional_entry(*slot);
void *ptr = node_to_entry(slot);
unsigned offset = get_slot_offset(node, slot);
int i;
if (node)
return tag_get(node, tag, offset);
return root_tag_get(root, tag);
}
for (i = 1; offset + i < RADIX_TREE_MAP_SIZE; i++) {
if (node->slots[offset + i] != ptr)
break;
node->slots[offset + i] = NULL;
node->count--;
if (exceptional)
node->exceptional--;
/*
* IDR users want to be able to store NULL in the tree, so if the slot isn't
* free, don't adjust the count, even if it's transitioning between NULL and
* non-NULL. For the IDA, we mark slots as being IDR_FREE while they still
* have empty bits, but it only stores NULL in slots when they're being
* deleted.
*/
static int calculate_count(struct radix_tree_root *root,
struct radix_tree_node *node, void **slot,
void *item, void *old)
{
if (is_idr(root)) {
unsigned offset = get_slot_offset(node, slot);
bool free = node_tag_get(root, node, IDR_FREE, offset);
if (!free)
return 0;
if (!old)
return 1;
}
#endif
return !!item - !!old;
}
/**
@ -1078,15 +1161,19 @@ void __radix_tree_replace(struct radix_tree_root *root,
void **slot, void *item,
radix_tree_update_node_t update_node, void *private)
{
if (!item)
delete_sibling_entries(node, slot);
void *old = rcu_dereference_raw(*slot);
int exceptional = !!radix_tree_exceptional_entry(item) -
!!radix_tree_exceptional_entry(old);
int count = calculate_count(root, node, slot, item, old);
/*
* This function supports replacing exceptional entries and
* deleting entries, but that needs accounting against the
* node unless the slot is root->rnode.
*/
replace_slot(root, node, slot, item,
!node && slot != (void **)&root->rnode);
WARN_ON_ONCE(!node && (slot != (void **)&root->rnode) &&
(count || exceptional));
replace_slot(slot, item, node, count, exceptional);
if (!node)
return;
@ -1116,7 +1203,7 @@ void __radix_tree_replace(struct radix_tree_root *root,
void radix_tree_replace_slot(struct radix_tree_root *root,
void **slot, void *item)
{
replace_slot(root, NULL, slot, item, true);
__radix_tree_replace(root, NULL, slot, item, NULL, NULL);
}
/**
@ -1191,6 +1278,7 @@ int radix_tree_split(struct radix_tree_root *root, unsigned long index,
void **slot;
unsigned int offset, end;
unsigned n, tag, tags = 0;
gfp_t gfp = root_gfp_mask(root);
if (!__radix_tree_lookup(root, index, &parent, &slot))
return -ENOENT;
@ -1228,7 +1316,7 @@ int radix_tree_split(struct radix_tree_root *root, unsigned long index,
for (;;) {
if (node->shift > order) {
child = radix_tree_node_alloc(root, node,
child = radix_tree_node_alloc(gfp, node,
node->shift - RADIX_TREE_MAP_SHIFT,
offset, 0, 0);
if (!child)
@ -1444,8 +1532,6 @@ int radix_tree_tag_get(const struct radix_tree_root *root,
radix_tree_load_root(root, &node, &maxindex);
if (index > maxindex)
return 0;
if (node == NULL)
return 0;
while (radix_tree_is_internal_node(node)) {
unsigned offset;
@ -1453,8 +1539,6 @@ int radix_tree_tag_get(const struct radix_tree_root *root,
parent = entry_to_node(node);
offset = radix_tree_descend(parent, &node, index);
if (!node)
return 0;
if (!tag_get(parent, tag, offset))
return 0;
if (node == RADIX_TREE_RETRY)
@ -1481,6 +1565,11 @@ static void set_iter_tags(struct radix_tree_iter *iter,
unsigned tag_long = offset / BITS_PER_LONG;
unsigned tag_bit = offset % BITS_PER_LONG;
if (!node) {
iter->tags = 1;
return;
}
iter->tags = node->tags[tag][tag_long] >> tag_bit;
/* This never happens if RADIX_TREE_TAG_LONGS == 1 */
@ -1873,13 +1962,18 @@ void __radix_tree_delete_node(struct radix_tree_root *root,
static bool __radix_tree_delete(struct radix_tree_root *root,
struct radix_tree_node *node, void **slot)
{
void *old = rcu_dereference_raw(*slot);
int exceptional = radix_tree_exceptional_entry(old) ? -1 : 0;
unsigned offset = get_slot_offset(node, slot);
int tag;
for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
node_tag_clear(root, node, tag, offset);
if (is_idr(root))
node_tag_set(root, node, IDR_FREE, offset);
else
for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
node_tag_clear(root, node, tag, offset);
replace_slot(root, node, slot, NULL, true);
replace_slot(slot, NULL, node, -1, exceptional);
return node && delete_node(root, node, NULL, NULL);
}
@ -1916,12 +2010,13 @@ void radix_tree_iter_delete(struct radix_tree_root *root,
void *radix_tree_delete_item(struct radix_tree_root *root,
unsigned long index, void *item)
{
struct radix_tree_node *node;
struct radix_tree_node *node = NULL;
void **slot;
void *entry;
entry = __radix_tree_lookup(root, index, &node, &slot);
if (!entry)
if (!entry && (!is_idr(root) || node_tag_get(root, node, IDR_FREE,
get_slot_offset(node, slot))))
return NULL;
if (item && entry != item)
@ -1957,8 +2052,7 @@ void radix_tree_clear_tags(struct radix_tree_root *root,
for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
node_tag_clear(root, node, tag, offset);
} else {
/* Clear root node tags */
root->gfp_mask &= __GFP_BITS_MASK;
root_tag_clear_all(root);
}
}
@ -1973,6 +2067,111 @@ int radix_tree_tagged(const struct radix_tree_root *root, unsigned int tag)
}
EXPORT_SYMBOL(radix_tree_tagged);
/**
* idr_preload - preload for idr_alloc()
* @gfp_mask: allocation mask to use for preloading
*
* Preallocate memory to use for the next call to idr_alloc(). This function
* returns with preemption disabled. It will be enabled by idr_preload_end().
*/
void idr_preload(gfp_t gfp_mask)
{
__radix_tree_preload(gfp_mask, IDR_PRELOAD_SIZE);
}
EXPORT_SYMBOL(idr_preload);
void **idr_get_free(struct radix_tree_root *root,
struct radix_tree_iter *iter, gfp_t gfp, int end)
{
struct radix_tree_node *node = NULL, *child;
void **slot = (void **)&root->rnode;
unsigned long maxindex, start = iter->next_index;
unsigned long max = end > 0 ? end - 1 : INT_MAX;
unsigned int shift, offset = 0;
grow:
shift = radix_tree_load_root(root, &child, &maxindex);
if (!radix_tree_tagged(root, IDR_FREE))
start = max(start, maxindex + 1);
if (start > max)
return ERR_PTR(-ENOSPC);
if (start > maxindex) {
int error = radix_tree_extend(root, gfp, start, shift);
if (error < 0)
return ERR_PTR(error);
shift = error;
child = rcu_dereference_raw(root->rnode);
}
while (shift) {
shift -= RADIX_TREE_MAP_SHIFT;
if (child == NULL) {
/* Have to add a child node. */
child = radix_tree_node_alloc(gfp, node, shift, offset,
0, 0);
if (!child)
return ERR_PTR(-ENOMEM);
all_tag_set(child, IDR_FREE);
rcu_assign_pointer(*slot, node_to_entry(child));
if (node)
node->count++;
} else if (!radix_tree_is_internal_node(child))
break;
node = entry_to_node(child);
offset = radix_tree_descend(node, &child, start);
if (!tag_get(node, IDR_FREE, offset)) {
offset = radix_tree_find_next_bit(node, IDR_FREE,
offset + 1);
start = next_index(start, node, offset);
if (start > max)
return ERR_PTR(-ENOSPC);
while (offset == RADIX_TREE_MAP_SIZE) {
offset = node->offset + 1;
node = node->parent;
if (!node)
goto grow;
shift = node->shift;
}
child = rcu_dereference_raw(node->slots[offset]);
}
slot = &node->slots[offset];
}
iter->index = start;
if (node)
iter->next_index = 1 + min(max, (start | node_maxindex(node)));
else
iter->next_index = 1;
iter->node = node;
__set_iter_shift(iter, shift);
set_iter_tags(iter, node, offset, IDR_FREE);
return slot;
}
/**
* idr_destroy - release all internal memory from an IDR
* @idr: idr handle
*
* After this function is called, the IDR is empty, and may be reused or
* the data structure containing it may be freed.
*
* A typical clean-up sequence for objects stored in an idr tree will use
* idr_for_each() to free all objects, if necessary, then idr_destroy() to
* free the memory used to keep track of those objects.
*/
void idr_destroy(struct idr *idr)
{
struct radix_tree_node *node = rcu_dereference_raw(idr->idr_rt.rnode);
if (radix_tree_is_internal_node(node))
radix_tree_free_nodes(node);
idr->idr_rt.rnode = NULL;
root_tag_set(&idr->idr_rt, IDR_FREE);
}
EXPORT_SYMBOL(idr_destroy);
static void
radix_tree_node_ctor(void *arg)
{

View File

@ -1,2 +1,3 @@
main
radix-tree.c
idr.c

View File

@ -2,8 +2,8 @@
CFLAGS += -I. -I../../include -g -O2 -Wall -D_LGPL_SOURCE
LDFLAGS += -lpthread -lurcu
TARGETS = main
OFILES = main.o radix-tree.o linux.o test.o tag_check.o find_bit.o \
regression1.o regression2.o regression3.o multiorder.o \
OFILES = main.o radix-tree.o idr.o linux.o test.o tag_check.o find_bit.o \
regression1.o regression2.o regression3.o multiorder.o idr-test.o \
iteration_check.o benchmark.o
ifdef BENCHMARK
@ -23,7 +23,11 @@ vpath %.c ../../lib
$(OFILES): *.h */*.h \
../../include/linux/*.h \
../../include/asm/*.h \
../../../include/linux/radix-tree.h
../../../include/linux/radix-tree.h \
../../../include/linux/idr.h
radix-tree.c: ../../../lib/radix-tree.c
sed -e 's/^static //' -e 's/__always_inline //' -e 's/inline //' < $< > $@
idr.c: ../../../lib/idr.c
sed -e 's/^static //' -e 's/__always_inline //' -e 's/inline //' < $< > $@

View File

@ -0,0 +1,342 @@
/*
* idr-test.c: Test the IDR API
* Copyright (c) 2016 Matthew Wilcox <willy@infradead.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#include <linux/idr.h>
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include "test.h"
#define DUMMY_PTR ((void *)0x12)
int item_idr_free(int id, void *p, void *data)
{
struct item *item = p;
assert(item->index == id);
free(p);
return 0;
}
void item_idr_remove(struct idr *idr, int id)
{
struct item *item = idr_find(idr, id);
assert(item->index == id);
idr_remove(idr, id);
free(item);
}
void idr_alloc_test(void)
{
unsigned long i;
DEFINE_IDR(idr);
assert(idr_alloc_cyclic(&idr, DUMMY_PTR, 0, 0x4000, GFP_KERNEL) == 0);
assert(idr_alloc_cyclic(&idr, DUMMY_PTR, 0x3ffd, 0x4000, GFP_KERNEL) == 0x3ffd);
idr_remove(&idr, 0x3ffd);
idr_remove(&idr, 0);
for (i = 0x3ffe; i < 0x4003; i++) {
int id;
struct item *item;
if (i < 0x4000)
item = item_create(i, 0);
else
item = item_create(i - 0x3fff, 0);
id = idr_alloc_cyclic(&idr, item, 1, 0x4000, GFP_KERNEL);
assert(id == item->index);
}
idr_for_each(&idr, item_idr_free, &idr);
idr_destroy(&idr);
}
void idr_replace_test(void)
{
DEFINE_IDR(idr);
idr_alloc(&idr, (void *)-1, 10, 11, GFP_KERNEL);
idr_replace(&idr, &idr, 10);
idr_destroy(&idr);
}
/*
* Unlike the radix tree, you can put a NULL pointer -- with care -- into
* the IDR. Some interfaces, like idr_find() do not distinguish between
* "present, value is NULL" and "not present", but that's exactly what some
* users want.
*/
void idr_null_test(void)
{
int i;
DEFINE_IDR(idr);
assert(idr_is_empty(&idr));
assert(idr_alloc(&idr, NULL, 0, 0, GFP_KERNEL) == 0);
assert(!idr_is_empty(&idr));
idr_remove(&idr, 0);
assert(idr_is_empty(&idr));
assert(idr_alloc(&idr, NULL, 0, 0, GFP_KERNEL) == 0);
assert(!idr_is_empty(&idr));
idr_destroy(&idr);
assert(idr_is_empty(&idr));
for (i = 0; i < 10; i++) {
assert(idr_alloc(&idr, NULL, 0, 0, GFP_KERNEL) == i);
}
assert(idr_replace(&idr, DUMMY_PTR, 3) == NULL);
assert(idr_replace(&idr, DUMMY_PTR, 4) == NULL);
assert(idr_replace(&idr, NULL, 4) == DUMMY_PTR);
assert(idr_replace(&idr, DUMMY_PTR, 11) == ERR_PTR(-ENOENT));
idr_remove(&idr, 5);
assert(idr_alloc(&idr, NULL, 0, 0, GFP_KERNEL) == 5);
idr_remove(&idr, 5);
for (i = 0; i < 9; i++) {
idr_remove(&idr, i);
assert(!idr_is_empty(&idr));
}
idr_remove(&idr, 8);
assert(!idr_is_empty(&idr));
idr_remove(&idr, 9);
assert(idr_is_empty(&idr));
assert(idr_alloc(&idr, NULL, 0, 0, GFP_KERNEL) == 0);
assert(idr_replace(&idr, DUMMY_PTR, 3) == ERR_PTR(-ENOENT));
assert(idr_replace(&idr, DUMMY_PTR, 0) == NULL);
assert(idr_replace(&idr, NULL, 0) == DUMMY_PTR);
idr_destroy(&idr);
assert(idr_is_empty(&idr));
for (i = 1; i < 10; i++) {
assert(idr_alloc(&idr, NULL, 1, 0, GFP_KERNEL) == i);
}
idr_destroy(&idr);
assert(idr_is_empty(&idr));
}
void idr_nowait_test(void)
{
unsigned int i;
DEFINE_IDR(idr);
idr_preload(GFP_KERNEL);
for (i = 0; i < 3; i++) {
struct item *item = item_create(i, 0);
assert(idr_alloc(&idr, item, i, i + 1, GFP_NOWAIT) == i);
}
idr_preload_end();
idr_for_each(&idr, item_idr_free, &idr);
idr_destroy(&idr);
}
void idr_checks(void)
{
unsigned long i;
DEFINE_IDR(idr);
for (i = 0; i < 10000; i++) {
struct item *item = item_create(i, 0);
assert(idr_alloc(&idr, item, 0, 20000, GFP_KERNEL) == i);
}
assert(idr_alloc(&idr, DUMMY_PTR, 5, 30, GFP_KERNEL) < 0);
for (i = 0; i < 5000; i++)
item_idr_remove(&idr, i);
idr_remove(&idr, 3);
idr_for_each(&idr, item_idr_free, &idr);
idr_destroy(&idr);
assert(idr_is_empty(&idr));
idr_remove(&idr, 3);
idr_remove(&idr, 0);
for (i = INT_MAX - 3UL; i < INT_MAX + 1UL; i++) {
struct item *item = item_create(i, 0);
assert(idr_alloc(&idr, item, i, i + 10, GFP_KERNEL) == i);
}
assert(idr_alloc(&idr, DUMMY_PTR, i - 2, i, GFP_KERNEL) == -ENOSPC);
idr_for_each(&idr, item_idr_free, &idr);
idr_destroy(&idr);
idr_destroy(&idr);
assert(idr_is_empty(&idr));
for (i = 1; i < 10000; i++) {
struct item *item = item_create(i, 0);
assert(idr_alloc(&idr, item, 1, 20000, GFP_KERNEL) == i);
}
idr_for_each(&idr, item_idr_free, &idr);
idr_destroy(&idr);
idr_replace_test();
idr_alloc_test();
idr_null_test();
idr_nowait_test();
}
/*
* Check that we get the correct error when we run out of memory doing
* allocations. To ensure we run out of memory, just "forget" to preload.
* The first test is for not having a bitmap available, and the second test
* is for not being able to allocate a level of the radix tree.
*/
void ida_check_nomem(void)
{
DEFINE_IDA(ida);
int id, err;
err = ida_get_new(&ida, &id);
assert(err == -EAGAIN);
err = ida_get_new_above(&ida, 1UL << 30, &id);
assert(err == -EAGAIN);
}
/*
* Check what happens when we fill a leaf and then delete it. This may
* discover mishandling of IDR_FREE.
*/
void ida_check_leaf(void)
{
DEFINE_IDA(ida);
int id;
unsigned long i;
for (i = 0; i < IDA_BITMAP_BITS; i++) {
assert(ida_pre_get(&ida, GFP_KERNEL));
assert(!ida_get_new(&ida, &id));
assert(id == i);
}
ida_destroy(&ida);
assert(ida_is_empty(&ida));
assert(ida_pre_get(&ida, GFP_KERNEL));
assert(!ida_get_new(&ida, &id));
assert(id == 0);
ida_destroy(&ida);
assert(ida_is_empty(&ida));
}
/*
* Check allocations up to and slightly above the maximum allowed (2^31-1) ID.
* Allocating up to 2^31-1 should succeed, and then allocating the next one
* should fail.
*/
void ida_check_max(void)
{
DEFINE_IDA(ida);
int id, err;
unsigned long i, j;
for (j = 1; j < 65537; j *= 2) {
unsigned long base = (1UL << 31) - j;
for (i = 0; i < j; i++) {
assert(ida_pre_get(&ida, GFP_KERNEL));
assert(!ida_get_new_above(&ida, base, &id));
assert(id == base + i);
}
assert(ida_pre_get(&ida, GFP_KERNEL));
err = ida_get_new_above(&ida, base, &id);
assert(err == -ENOSPC);
ida_destroy(&ida);
assert(ida_is_empty(&ida));
rcu_barrier();
}
}
void ida_checks(void)
{
DEFINE_IDA(ida);
int id;
unsigned long i;
radix_tree_cpu_dead(1);
ida_check_nomem();
for (i = 0; i < 10000; i++) {
assert(ida_pre_get(&ida, GFP_KERNEL));
assert(!ida_get_new(&ida, &id));
assert(id == i);
}
ida_remove(&ida, 20);
ida_remove(&ida, 21);
for (i = 0; i < 3; i++) {
assert(ida_pre_get(&ida, GFP_KERNEL));
assert(!ida_get_new(&ida, &id));
if (i == 2)
assert(id == 10000);
}
for (i = 0; i < 5000; i++)
ida_remove(&ida, i);
assert(ida_pre_get(&ida, GFP_KERNEL));
assert(!ida_get_new_above(&ida, 5000, &id));
assert(id == 10001);
ida_destroy(&ida);
assert(ida_is_empty(&ida));
assert(ida_pre_get(&ida, GFP_KERNEL));
assert(!ida_get_new_above(&ida, 1, &id));
assert(id == 1);
ida_remove(&ida, id);
assert(ida_is_empty(&ida));
ida_destroy(&ida);
assert(ida_is_empty(&ida));
assert(ida_pre_get(&ida, GFP_KERNEL));
assert(!ida_get_new_above(&ida, 1, &id));
ida_destroy(&ida);
assert(ida_is_empty(&ida));
assert(ida_pre_get(&ida, GFP_KERNEL));
assert(!ida_get_new_above(&ida, 1, &id));
assert(id == 1);
assert(ida_pre_get(&ida, GFP_KERNEL));
assert(!ida_get_new_above(&ida, 1025, &id));
assert(id == 1025);
assert(ida_pre_get(&ida, GFP_KERNEL));
assert(!ida_get_new_above(&ida, 10000, &id));
assert(id == 10000);
ida_remove(&ida, 1025);
ida_destroy(&ida);
assert(ida_is_empty(&ida));
ida_check_leaf();
ida_check_max();
radix_tree_cpu_dead(1);
}

View File

@ -15,10 +15,12 @@
#define __GFP_DIRECT_RECLAIM 0x400000u
#define __GFP_KSWAPD_RECLAIM 0x2000000u
#define __GFP_RECLAIM (__GFP_DIRECT_RECLAIM|__GFP_KSWAPD_RECLAIM)
#define __GFP_RECLAIM (__GFP_DIRECT_RECLAIM|__GFP_KSWAPD_RECLAIM)
#define GFP_ATOMIC (__GFP_HIGH|__GFP_ATOMIC|__GFP_KSWAPD_RECLAIM)
#define GFP_KERNEL (__GFP_RECLAIM | __GFP_IO | __GFP_FS)
#define GFP_NOWAIT (__GFP_KSWAPD_RECLAIM)
#define GFP_ATOMIC (__GFP_HIGH|__GFP_ATOMIC|__GFP_KSWAPD_RECLAIM)
#define GFP_KERNEL (__GFP_RECLAIM | __GFP_IO | __GFP_FS)
static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags)
{

View File

@ -0,0 +1 @@
#include "../../../../include/linux/idr.h"

View File

@ -20,6 +20,7 @@
#define printk printf
#define pr_debug printk
#define pr_cont printk
#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))

View File

@ -3,6 +3,7 @@
#include <unistd.h>
#include <time.h>
#include <assert.h>
#include <limits.h>
#include <linux/slab.h>
#include <linux/radix-tree.h>
@ -314,6 +315,11 @@ static void single_thread_tests(bool long_run)
rcu_barrier();
printf("after dynamic_height_check: %d allocated, preempt %d\n",
nr_allocated, preempt_count);
idr_checks();
ida_checks();
rcu_barrier();
printf("after idr_checks: %d allocated, preempt %d\n",
nr_allocated, preempt_count);
big_gang_check(long_run);
rcu_barrier();
printf("after big_gang_check: %d allocated, preempt %d\n",

View File

@ -34,6 +34,8 @@ void tag_check(void);
void multiorder_checks(void);
void iteration_test(unsigned order, unsigned duration);
void benchmark(void);
void idr_checks(void);
void ida_checks(void);
struct item *
item_tag_set(struct radix_tree_root *root, unsigned long index, int tag);