remarkable-linux/kernel/user.c
Pavel Emelyanov 28f300d236 Fix user namespace exiting OOPs
It turned out, that the user namespace is released during the do_exit() in
exit_task_namespaces(), but the struct user_struct is released only during the
put_task_struct(), i.e.  MUCH later.

On debug kernels with poisoned slabs this will cause the oops in
uid_hash_remove() because the head of the chain, which resides inside the
struct user_namespace, will be already freed and poisoned.

Since the uid hash itself is required only when someone can search it, i.e.
when the namespace is alive, we can safely unhash all the user_struct-s from
it during the namespace exiting.  The subsequent free_uid() will complete the
user_struct destruction.

For example simple program

   #include <sched.h>

   char stack[2 * 1024 * 1024];

   int f(void *foo)
   {
   	return 0;
   }

   int main(void)
   {
   	clone(f, stack + 1 * 1024 * 1024, 0x10000000, 0);
   	return 0;
   }

run on kernel with CONFIG_USER_NS turned on will oops the
kernel immediately.

This was spotted during OpenVZ kernel testing.

Signed-off-by: Pavel Emelyanov <xemul@openvz.org>
Signed-off-by: Alexey Dobriyan <adobriyan@openvz.org>
Acked-by: "Serge E. Hallyn" <serue@us.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-09-19 11:24:18 -07:00

246 lines
5.9 KiB
C

/*
* The "user cache".
*
* (C) Copyright 1991-2000 Linus Torvalds
*
* We have a per-user structure to keep track of how many
* processes, files etc the user has claimed, in order to be
* able to have per-user limits for system resources.
*/
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/bitops.h>
#include <linux/key.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/user_namespace.h>
/*
* UID task count cache, to get fast user lookup in "alloc_uid"
* when changing user ID's (ie setuid() and friends).
*/
#define UIDHASH_MASK (UIDHASH_SZ - 1)
#define __uidhashfn(uid) (((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK)
#define uidhashentry(ns, uid) ((ns)->uidhash_table + __uidhashfn((uid)))
static struct kmem_cache *uid_cachep;
/*
* The uidhash_lock is mostly taken from process context, but it is
* occasionally also taken from softirq/tasklet context, when
* task-structs get RCU-freed. Hence all locking must be softirq-safe.
* But free_uid() is also called with local interrupts disabled, and running
* local_bh_enable() with local interrupts disabled is an error - we'll run
* softirq callbacks, and they can unconditionally enable interrupts, and
* the caller of free_uid() didn't expect that..
*/
static DEFINE_SPINLOCK(uidhash_lock);
struct user_struct root_user = {
.__count = ATOMIC_INIT(1),
.processes = ATOMIC_INIT(1),
.files = ATOMIC_INIT(0),
.sigpending = ATOMIC_INIT(0),
.mq_bytes = 0,
.locked_shm = 0,
#ifdef CONFIG_KEYS
.uid_keyring = &root_user_keyring,
.session_keyring = &root_session_keyring,
#endif
};
/*
* These routines must be called with the uidhash spinlock held!
*/
static inline void uid_hash_insert(struct user_struct *up, struct hlist_head *hashent)
{
hlist_add_head(&up->uidhash_node, hashent);
}
static inline void uid_hash_remove(struct user_struct *up)
{
hlist_del_init(&up->uidhash_node);
}
static inline struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent)
{
struct user_struct *user;
struct hlist_node *h;
hlist_for_each_entry(user, h, hashent, uidhash_node) {
if(user->uid == uid) {
atomic_inc(&user->__count);
return user;
}
}
return NULL;
}
/*
* Locate the user_struct for the passed UID. If found, take a ref on it. The
* caller must undo that ref with free_uid().
*
* If the user_struct could not be found, return NULL.
*/
struct user_struct *find_user(uid_t uid)
{
struct user_struct *ret;
unsigned long flags;
struct user_namespace *ns = current->nsproxy->user_ns;
spin_lock_irqsave(&uidhash_lock, flags);
ret = uid_hash_find(uid, uidhashentry(ns, uid));
spin_unlock_irqrestore(&uidhash_lock, flags);
return ret;
}
void free_uid(struct user_struct *up)
{
unsigned long flags;
if (!up)
return;
local_irq_save(flags);
if (atomic_dec_and_lock(&up->__count, &uidhash_lock)) {
uid_hash_remove(up);
spin_unlock_irqrestore(&uidhash_lock, flags);
key_put(up->uid_keyring);
key_put(up->session_keyring);
kmem_cache_free(uid_cachep, up);
} else {
local_irq_restore(flags);
}
}
struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid)
{
struct hlist_head *hashent = uidhashentry(ns, uid);
struct user_struct *up;
spin_lock_irq(&uidhash_lock);
up = uid_hash_find(uid, hashent);
spin_unlock_irq(&uidhash_lock);
if (!up) {
struct user_struct *new;
new = kmem_cache_alloc(uid_cachep, GFP_KERNEL);
if (!new)
return NULL;
new->uid = uid;
atomic_set(&new->__count, 1);
atomic_set(&new->processes, 0);
atomic_set(&new->files, 0);
atomic_set(&new->sigpending, 0);
#ifdef CONFIG_INOTIFY_USER
atomic_set(&new->inotify_watches, 0);
atomic_set(&new->inotify_devs, 0);
#endif
new->mq_bytes = 0;
new->locked_shm = 0;
if (alloc_uid_keyring(new, current) < 0) {
kmem_cache_free(uid_cachep, new);
return NULL;
}
/*
* Before adding this, check whether we raced
* on adding the same user already..
*/
spin_lock_irq(&uidhash_lock);
up = uid_hash_find(uid, hashent);
if (up) {
key_put(new->uid_keyring);
key_put(new->session_keyring);
kmem_cache_free(uid_cachep, new);
} else {
uid_hash_insert(new, hashent);
up = new;
}
spin_unlock_irq(&uidhash_lock);
}
return up;
}
void switch_uid(struct user_struct *new_user)
{
struct user_struct *old_user;
/* What if a process setreuid()'s and this brings the
* new uid over his NPROC rlimit? We can check this now
* cheaply with the new uid cache, so if it matters
* we should be checking for it. -DaveM
*/
old_user = current->user;
atomic_inc(&new_user->processes);
atomic_dec(&old_user->processes);
switch_uid_keyring(new_user);
current->user = new_user;
/*
* We need to synchronize with __sigqueue_alloc()
* doing a get_uid(p->user).. If that saw the old
* user value, we need to wait until it has exited
* its critical region before we can free the old
* structure.
*/
smp_mb();
spin_unlock_wait(&current->sighand->siglock);
free_uid(old_user);
suid_keys(current);
}
void release_uids(struct user_namespace *ns)
{
int i;
unsigned long flags;
struct hlist_head *head;
struct hlist_node *nd;
spin_lock_irqsave(&uidhash_lock, flags);
/*
* collapse the chains so that the user_struct-s will
* be still alive, but not in hashes. subsequent free_uid()
* will free them.
*/
for (i = 0; i < UIDHASH_SZ; i++) {
head = ns->uidhash_table + i;
while (!hlist_empty(head)) {
nd = head->first;
hlist_del_init(nd);
}
}
spin_unlock_irqrestore(&uidhash_lock, flags);
free_uid(ns->root_user);
}
static int __init uid_cache_init(void)
{
int n;
uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct),
0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
for(n = 0; n < UIDHASH_SZ; ++n)
INIT_HLIST_HEAD(init_user_ns.uidhash_table + n);
/* Insert the root user immediately (init already runs as root) */
spin_lock_irq(&uidhash_lock);
uid_hash_insert(&root_user, uidhashentry(&init_user_ns, 0));
spin_unlock_irq(&uidhash_lock);
return 0;
}
module_init(uid_cache_init);