1
0
Fork 0
remarkable-linux/arch/powerpc/include/asm/book3s/64/pgalloc.h

224 lines
6.0 KiB
C
Raw Normal View History

#ifndef _ASM_POWERPC_BOOK3S_64_PGALLOC_H
#define _ASM_POWERPC_BOOK3S_64_PGALLOC_H
/*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/slab.h>
#include <linux/cpumask.h>
#include <linux/percpu.h>
struct vmemmap_backing {
struct vmemmap_backing *list;
unsigned long phys;
unsigned long virt_addr;
};
extern struct vmemmap_backing *vmemmap_list;
/*
* Functions that deal with pagetables that could be at any level of
* the table need to be passed an "index_size" so they know how to
* handle allocation. For PTE pages (which are linked to a struct
* page for now, and drawn from the main get_free_pages() pool), the
* allocation size will be (2^index_size * sizeof(pointer)) and
* allocations are drawn from the kmem_cache in PGT_CACHE(index_size).
*
* The maximum index size needs to be big enough to allow any
* pagetable sizes we need, but small enough to fit in the low bits of
* any page table pointer. In other words all pagetables, even tiny
* ones, must be aligned to allow at least enough low 0 bits to
* contain this value. This value is also used as a mask, so it must
* be one less than a power of two.
*/
#define MAX_PGTABLE_INDEX_SIZE 0xf
extern struct kmem_cache *pgtable_cache[];
#define PGT_CACHE(shift) ({ \
BUG_ON(!(shift)); \
pgtable_cache[(shift) - 1]; \
})
#define PGALLOC_GFP GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO
extern pte_t *pte_fragment_alloc(struct mm_struct *, unsigned long, int);
extern void pte_fragment_free(unsigned long *, int);
extern void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift);
#ifdef CONFIG_SMP
extern void __tlb_remove_table(void *_table);
#endif
static inline pgd_t *radix__pgd_alloc(struct mm_struct *mm)
{
#ifdef CONFIG_PPC_64K_PAGES
return (pgd_t *)__get_free_page(pgtable_gfp_flags(mm, PGALLOC_GFP));
#else
struct page *page;
page = alloc_pages(pgtable_gfp_flags(mm, PGALLOC_GFP | __GFP_REPEAT),
4);
if (!page)
return NULL;
return (pgd_t *) page_address(page);
#endif
}
static inline void radix__pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
#ifdef CONFIG_PPC_64K_PAGES
free_page((unsigned long)pgd);
#else
free_pages((unsigned long)pgd, 4);
#endif
}
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
if (radix_enabled())
return radix__pgd_alloc(mm);
return kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE),
pgtable_gfp_flags(mm, GFP_KERNEL));
}
static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
if (radix_enabled())
return radix__pgd_free(mm, pgd);
kmem_cache_free(PGT_CACHE(PGD_INDEX_SIZE), pgd);
}
static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
{
pgd_set(pgd, __pgtable_ptr_val(pud) | PGD_VAL_BITS);
}
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
{
return kmem_cache_alloc(PGT_CACHE(PUD_INDEX_SIZE),
pgtable_gfp_flags(mm, GFP_KERNEL));
}
static inline void pud_free(struct mm_struct *mm, pud_t *pud)
{
kmem_cache_free(PGT_CACHE(PUD_INDEX_SIZE), pud);
}
static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
{
pud_set(pud, __pgtable_ptr_val(pmd) | PUD_VAL_BITS);
}
static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
unsigned long address)
{
/*
* By now all the pud entries should be none entries. So go
* ahead and flush the page walk cache
*/
flush_tlb_pgtable(tlb, address);
pgtable_free_tlb(tlb, pud, PUD_INDEX_SIZE);
}
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
{
return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX),
pgtable_gfp_flags(mm, GFP_KERNEL));
}
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
{
kmem_cache_free(PGT_CACHE(PMD_CACHE_INDEX), pmd);
}
static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
unsigned long address)
{
/*
* By now all the pud entries should be none entries. So go
* ahead and flush the page walk cache
*/
flush_tlb_pgtable(tlb, address);
return pgtable_free_tlb(tlb, pmd, PMD_CACHE_INDEX);
}
static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
pte_t *pte)
{
pmd_set(pmd, __pgtable_ptr_val(pte) | PMD_VAL_BITS);
}
static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
pgtable_t pte_page)
{
pmd_set(pmd, __pgtable_ptr_val(pte_page) | PMD_VAL_BITS);
}
static inline pgtable_t pmd_pgtable(pmd_t pmd)
{
return (pgtable_t)pmd_page_vaddr(pmd);
}
#ifdef CONFIG_PPC_4K_PAGES
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address)
{
tree wide: get rid of __GFP_REPEAT for order-0 allocations part I This is the third version of the patchset previously sent [1]. I have basically only rebased it on top of 4.7-rc1 tree and dropped "dm: get rid of superfluous gfp flags" which went through dm tree. I am sending it now because it is tree wide and chances for conflicts are reduced considerably when we want to target rc2. I plan to send the next step and rename the flag and move to a better semantic later during this release cycle so we will have a new semantic ready for 4.8 merge window hopefully. Motivation: While working on something unrelated I've checked the current usage of __GFP_REPEAT in the tree. It seems that a majority of the usage is and always has been bogus because __GFP_REPEAT has always been about costly high order allocations while we are using it for order-0 or very small orders very often. It seems that a big pile of them is just a copy&paste when a code has been adopted from one arch to another. I think it makes some sense to get rid of them because they are just making the semantic more unclear. Please note that GFP_REPEAT is documented as * __GFP_REPEAT: Try hard to allocate the memory, but the allocation attempt * _might_ fail. This depends upon the particular VM implementation. while !costly requests have basically nofail semantic. So one could reasonably expect that order-0 request with __GFP_REPEAT will not loop for ever. This is not implemented right now though. I would like to move on with __GFP_REPEAT and define a better semantic for it. $ git grep __GFP_REPEAT origin/master | wc -l 111 $ git grep __GFP_REPEAT | wc -l 36 So we are down to the third after this patch series. The remaining places really seem to be relying on __GFP_REPEAT due to large allocation requests. This still needs some double checking which I will do later after all the simple ones are sorted out. I am touching a lot of arch specific code here and I hope I got it right but as a matter of fact I even didn't compile test for some archs as I do not have cross compiler for them. Patches should be quite trivial to review for stupid compile mistakes though. The tricky parts are usually hidden by macro definitions and thats where I would appreciate help from arch maintainers. [1] http://lkml.kernel.org/r/1461849846-27209-1-git-send-email-mhocko@kernel.org This patch (of 19): __GFP_REPEAT has a rather weak semantic but since it has been introduced around 2.6.12 it has been ignored for low order allocations. Yet we have the full kernel tree with its usage for apparently order-0 allocations. This is really confusing because __GFP_REPEAT is explicitly documented to allow allocation failures which is a weaker semantic than the current order-0 has (basically nofail). Let's simply drop __GFP_REPEAT from those places. This would allow to identify place which really need allocator to retry harder and formulate a more specific semantic for what the flag is supposed to do actually. Link: http://lkml.kernel.org/r/1464599699-30131-2-git-send-email-mhocko@kernel.org Signed-off-by: Michal Hocko <mhocko@suse.com> Cc: "David S. Miller" <davem@davemloft.net> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: "James E.J. Bottomley" <jejb@parisc-linux.org> Cc: "Theodore Ts'o" <tytso@mit.edu> Cc: Andy Lutomirski <luto@kernel.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Chen Liqin <liqin.linux@gmail.com> Cc: Chris Metcalf <cmetcalf@mellanox.com> [for tile] Cc: Guan Xuetao <gxt@mprc.pku.edu.cn> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Helge Deller <deller@gmx.de> Cc: Ingo Molnar <mingo@redhat.com> Cc: Jan Kara <jack@suse.cz> Cc: John Crispin <blogic@openwrt.org> Cc: Lennox Wu <lennox.wu@gmail.com> Cc: Ley Foon Tan <lftan@altera.com> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Matt Fleming <matt@codeblueprint.co.uk> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: Rich Felker <dalias@libc.org> Cc: Russell King <linux@arm.linux.org.uk> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Vineet Gupta <vgupta@synopsys.com> Cc: Will Deacon <will.deacon@arm.com> Cc: Yoshinori Sato <ysato@users.sourceforge.jp> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-06-24 15:48:47 -06:00
return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
}
static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
unsigned long address)
{
struct page *page;
pte_t *pte;
pte = (pte_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO | __GFP_ACCOUNT);
if (!pte)
return NULL;
page = virt_to_page(pte);
if (!pgtable_page_ctor(page)) {
__free_page(page);
return NULL;
}
return pte;
}
#else /* if CONFIG_PPC_64K_PAGES */
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address)
{
return (pte_t *)pte_fragment_alloc(mm, address, 1);
}
static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
unsigned long address)
{
return (pgtable_t)pte_fragment_alloc(mm, address, 0);
}
#endif
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
pte_fragment_free((unsigned long *)pte, 1);
}
static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
{
pte_fragment_free((unsigned long *)ptepage, 0);
}
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
unsigned long address)
{
/*
* By now all the pud entries should be none entries. So go
* ahead and flush the page walk cache
*/
flush_tlb_pgtable(tlb, address);
pgtable_free_tlb(tlb, table, 0);
}
#define check_pgt_cache() do { } while (0)
#endif /* _ASM_POWERPC_BOOK3S_64_PGALLOC_H */