alistair23-linux/arch/powerpc/mm/mmu_context_hash64.c
David Gibson d28513bc7f powerpc/mm: Fix pgtable cache cleanup with CONFIG_PPC_SUBPAGE_PROT
Commit a0668cdc15 cleans up the handling
of kmem_caches for allocating various levels of pagetables.
Unfortunately, it conflicts badly with CONFIG_PPC_SUBPAGE_PROT, due to
the latter's cleverly hidden technique of adding some extra allocation
space to the top level page directory to store the extra information
it needs.

Since that extra allocation really doesn't fit into the cleaned up
page directory allocating scheme, this patch alters
CONFIG_PPC_SUBPAGE_PROT to instead allocate its struct
subpage_prot_table as part of the mm_context_t.

Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2009-12-08 15:59:33 +11:00

99 lines
2.3 KiB
C

/*
* MMU context allocation for 64-bit kernels.
*
* Copyright (C) 2004 Anton Blanchard, IBM Corp. <anton@samba.org>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
*/
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/spinlock.h>
#include <linux/idr.h>
#include <linux/module.h>
#include <asm/mmu_context.h>
static DEFINE_SPINLOCK(mmu_context_lock);
static DEFINE_IDR(mmu_context_idr);
/*
* The proto-VSID space has 2^35 - 1 segments available for user mappings.
* Each segment contains 2^28 bytes. Each context maps 2^44 bytes,
* so we can support 2^19-1 contexts (19 == 35 + 28 - 44).
*/
#define NO_CONTEXT 0
#define MAX_CONTEXT ((1UL << 19) - 1)
int __init_new_context(void)
{
int index;
int err;
again:
if (!idr_pre_get(&mmu_context_idr, GFP_KERNEL))
return -ENOMEM;
spin_lock(&mmu_context_lock);
err = idr_get_new_above(&mmu_context_idr, NULL, 1, &index);
spin_unlock(&mmu_context_lock);
if (err == -EAGAIN)
goto again;
else if (err)
return err;
if (index > MAX_CONTEXT) {
spin_lock(&mmu_context_lock);
idr_remove(&mmu_context_idr, index);
spin_unlock(&mmu_context_lock);
return -ENOMEM;
}
return index;
}
EXPORT_SYMBOL_GPL(__init_new_context);
int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
int index;
index = __init_new_context();
if (index < 0)
return index;
/* The old code would re-promote on fork, we don't do that
* when using slices as it could cause problem promoting slices
* that have been forced down to 4K
*/
if (slice_mm_new_context(mm))
slice_set_user_psize(mm, mmu_virtual_psize);
subpage_prot_init_new_context(mm);
mm->context.id = index;
return 0;
}
void __destroy_context(int context_id)
{
spin_lock(&mmu_context_lock);
idr_remove(&mmu_context_idr, context_id);
spin_unlock(&mmu_context_lock);
}
EXPORT_SYMBOL_GPL(__destroy_context);
void destroy_context(struct mm_struct *mm)
{
__destroy_context(mm->context.id);
subpage_prot_free(mm);
mm->context.id = NO_CONTEXT;
}