1
0
Fork 0

rhashtable: Remove GFP flag from rhashtable_walk_init

The commit 8f6fd83c6c ("rhashtable:
accept GFP flags in rhashtable_walk_init") added a GFP flag argument
to rhashtable_walk_init because some users wish to use the walker
in an unsleepable context.

In fact we don't need to allocate memory in rhashtable_walk_init
at all.  The walker is always paired with an iterator so we could
just stash ourselves there.

This patch does that by introducing a new enter function to replace
the existing init function.  This way we don't have to churn all
the existing users again.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: David S. Miller <davem@davemloft.net>
hifive-unleashed-5.1
Herbert Xu 2016-08-18 16:50:56 +08:00 committed by David S. Miller
parent 363dc396a5
commit 246779dd09
2 changed files with 29 additions and 31 deletions

View File

@ -173,7 +173,7 @@ struct rhashtable_walker {
struct rhashtable_iter {
struct rhashtable *ht;
struct rhash_head *p;
struct rhashtable_walker *walker;
struct rhashtable_walker walker;
unsigned int slot;
unsigned int skip;
};
@ -346,8 +346,8 @@ struct bucket_table *rhashtable_insert_slow(struct rhashtable *ht,
struct bucket_table *old_tbl);
int rhashtable_insert_rehash(struct rhashtable *ht, struct bucket_table *tbl);
int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter,
gfp_t gfp);
void rhashtable_walk_enter(struct rhashtable *ht,
struct rhashtable_iter *iter);
void rhashtable_walk_exit(struct rhashtable_iter *iter);
int rhashtable_walk_start(struct rhashtable_iter *iter) __acquires(RCU);
void *rhashtable_walk_next(struct rhashtable_iter *iter);
@ -906,4 +906,12 @@ static inline int rhashtable_replace_fast(
return err;
}
/* Obsolete function, do not use in new code. */
static inline int rhashtable_walk_init(struct rhashtable *ht,
struct rhashtable_iter *iter, gfp_t gfp)
{
rhashtable_walk_enter(ht, iter);
return 0;
}
#endif /* _LINUX_RHASHTABLE_H */

View File

@ -489,10 +489,9 @@ exit:
EXPORT_SYMBOL_GPL(rhashtable_insert_slow);
/**
* rhashtable_walk_init - Initialise an iterator
* rhashtable_walk_enter - Initialise an iterator
* @ht: Table to walk over
* @iter: Hash table Iterator
* @gfp: GFP flags for allocations
*
* This function prepares a hash table walk.
*
@ -507,30 +506,22 @@ EXPORT_SYMBOL_GPL(rhashtable_insert_slow);
* This function may sleep so you must not call it from interrupt
* context or with spin locks held.
*
* You must call rhashtable_walk_exit if this function returns
* successfully.
* You must call rhashtable_walk_exit after this function returns.
*/
int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter,
gfp_t gfp)
void rhashtable_walk_enter(struct rhashtable *ht, struct rhashtable_iter *iter)
{
iter->ht = ht;
iter->p = NULL;
iter->slot = 0;
iter->skip = 0;
iter->walker = kmalloc(sizeof(*iter->walker), gfp);
if (!iter->walker)
return -ENOMEM;
spin_lock(&ht->lock);
iter->walker->tbl =
iter->walker.tbl =
rcu_dereference_protected(ht->tbl, lockdep_is_held(&ht->lock));
list_add(&iter->walker->list, &iter->walker->tbl->walkers);
list_add(&iter->walker.list, &iter->walker.tbl->walkers);
spin_unlock(&ht->lock);
return 0;
}
EXPORT_SYMBOL_GPL(rhashtable_walk_init);
EXPORT_SYMBOL_GPL(rhashtable_walk_enter);
/**
* rhashtable_walk_exit - Free an iterator
@ -541,10 +532,9 @@ EXPORT_SYMBOL_GPL(rhashtable_walk_init);
void rhashtable_walk_exit(struct rhashtable_iter *iter)
{
spin_lock(&iter->ht->lock);
if (iter->walker->tbl)
list_del(&iter->walker->list);
if (iter->walker.tbl)
list_del(&iter->walker.list);
spin_unlock(&iter->ht->lock);
kfree(iter->walker);
}
EXPORT_SYMBOL_GPL(rhashtable_walk_exit);
@ -570,12 +560,12 @@ int rhashtable_walk_start(struct rhashtable_iter *iter)
rcu_read_lock();
spin_lock(&ht->lock);
if (iter->walker->tbl)
list_del(&iter->walker->list);
if (iter->walker.tbl)
list_del(&iter->walker.list);
spin_unlock(&ht->lock);
if (!iter->walker->tbl) {
iter->walker->tbl = rht_dereference_rcu(ht->tbl, ht);
if (!iter->walker.tbl) {
iter->walker.tbl = rht_dereference_rcu(ht->tbl, ht);
return -EAGAIN;
}
@ -597,7 +587,7 @@ EXPORT_SYMBOL_GPL(rhashtable_walk_start);
*/
void *rhashtable_walk_next(struct rhashtable_iter *iter)
{
struct bucket_table *tbl = iter->walker->tbl;
struct bucket_table *tbl = iter->walker.tbl;
struct rhashtable *ht = iter->ht;
struct rhash_head *p = iter->p;
@ -630,8 +620,8 @@ next:
/* Ensure we see any new tables. */
smp_rmb();
iter->walker->tbl = rht_dereference_rcu(tbl->future_tbl, ht);
if (iter->walker->tbl) {
iter->walker.tbl = rht_dereference_rcu(tbl->future_tbl, ht);
if (iter->walker.tbl) {
iter->slot = 0;
iter->skip = 0;
return ERR_PTR(-EAGAIN);
@ -651,7 +641,7 @@ void rhashtable_walk_stop(struct rhashtable_iter *iter)
__releases(RCU)
{
struct rhashtable *ht;
struct bucket_table *tbl = iter->walker->tbl;
struct bucket_table *tbl = iter->walker.tbl;
if (!tbl)
goto out;
@ -660,9 +650,9 @@ void rhashtable_walk_stop(struct rhashtable_iter *iter)
spin_lock(&ht->lock);
if (tbl->rehash < tbl->size)
list_add(&iter->walker->list, &tbl->walkers);
list_add(&iter->walker.list, &tbl->walkers);
else
iter->walker->tbl = NULL;
iter->walker.tbl = NULL;
spin_unlock(&ht->lock);
iter->p = NULL;