1
0
Fork 0

[PATCH] fix uidhash_lock <-> RCU deadlock

RCU task-struct freeing can call free_uid(), which is taking
uidhash_lock - while other users of uidhash_lock are softirq-unsafe.

The fix is to always take the uidhash_spinlock in a softirq-safe manner.

Signed-off-by: Ingo Molnar <mingo@elte.hu>
Acked-by: Paul E. McKenney <paulmck@us.ibm.com>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
hifive-unleashed-5.1
Ingo Molnar 2006-01-25 15:23:07 +01:00 committed by Linus Torvalds
parent d5bee77513
commit 4021cb279a
1 changed files with 17 additions and 8 deletions

View File

@ -13,6 +13,7 @@
#include <linux/slab.h>
#include <linux/bitops.h>
#include <linux/key.h>
#include <linux/interrupt.h>
/*
* UID task count cache, to get fast user lookup in "alloc_uid"
@ -27,6 +28,12 @@
static kmem_cache_t *uid_cachep;
static struct list_head uidhash_table[UIDHASH_SZ];
/*
* The uidhash_lock is mostly taken from process context, but it is
* occasionally also taken from softirq/tasklet context, when
* task-structs get RCU-freed. Hence all locking must be softirq-safe.
*/
static DEFINE_SPINLOCK(uidhash_lock);
struct user_struct root_user = {
@ -83,14 +90,15 @@ struct user_struct *find_user(uid_t uid)
{
struct user_struct *ret;
spin_lock(&uidhash_lock);
spin_lock_bh(&uidhash_lock);
ret = uid_hash_find(uid, uidhashentry(uid));
spin_unlock(&uidhash_lock);
spin_unlock_bh(&uidhash_lock);
return ret;
}
void free_uid(struct user_struct *up)
{
local_bh_disable();
if (up && atomic_dec_and_lock(&up->__count, &uidhash_lock)) {
uid_hash_remove(up);
key_put(up->uid_keyring);
@ -98,6 +106,7 @@ void free_uid(struct user_struct *up)
kmem_cache_free(uid_cachep, up);
spin_unlock(&uidhash_lock);
}
local_bh_enable();
}
struct user_struct * alloc_uid(uid_t uid)
@ -105,9 +114,9 @@ struct user_struct * alloc_uid(uid_t uid)
struct list_head *hashent = uidhashentry(uid);
struct user_struct *up;
spin_lock(&uidhash_lock);
spin_lock_bh(&uidhash_lock);
up = uid_hash_find(uid, hashent);
spin_unlock(&uidhash_lock);
spin_unlock_bh(&uidhash_lock);
if (!up) {
struct user_struct *new;
@ -137,7 +146,7 @@ struct user_struct * alloc_uid(uid_t uid)
* Before adding this, check whether we raced
* on adding the same user already..
*/
spin_lock(&uidhash_lock);
spin_lock_bh(&uidhash_lock);
up = uid_hash_find(uid, hashent);
if (up) {
key_put(new->uid_keyring);
@ -147,7 +156,7 @@ struct user_struct * alloc_uid(uid_t uid)
uid_hash_insert(new, hashent);
up = new;
}
spin_unlock(&uidhash_lock);
spin_unlock_bh(&uidhash_lock);
}
return up;
@ -183,9 +192,9 @@ static int __init uid_cache_init(void)
INIT_LIST_HEAD(uidhash_table + n);
/* Insert the root user immediately (init already runs as root) */
spin_lock(&uidhash_lock);
spin_lock_bh(&uidhash_lock);
uid_hash_insert(&root_user, uidhashentry(0));
spin_unlock(&uidhash_lock);
spin_unlock_bh(&uidhash_lock);
return 0;
}