1
0
Fork 0
alistair23-linux/arch/arc/mm/highmem.c

146 lines
4.2 KiB
C
Raw Normal View History

/*
* Copyright (C) 2015 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
mm: remove include/linux/bootmem.h Move remaining definitions and declarations from include/linux/bootmem.h into include/linux/memblock.h and remove the redundant header. The includes were replaced with the semantic patch below and then semi-automated removal of duplicated '#include <linux/memblock.h> @@ @@ - #include <linux/bootmem.h> + #include <linux/memblock.h> [sfr@canb.auug.org.au: dma-direct: fix up for the removal of linux/bootmem.h] Link: http://lkml.kernel.org/r/20181002185342.133d1680@canb.auug.org.au [sfr@canb.auug.org.au: powerpc: fix up for removal of linux/bootmem.h] Link: http://lkml.kernel.org/r/20181005161406.73ef8727@canb.auug.org.au [sfr@canb.auug.org.au: x86/kaslr, ACPI/NUMA: fix for linux/bootmem.h removal] Link: http://lkml.kernel.org/r/20181008190341.5e396491@canb.auug.org.au Link: http://lkml.kernel.org/r/1536927045-23536-30-git-send-email-rppt@linux.vnet.ibm.com Signed-off-by: Mike Rapoport <rppt@linux.vnet.ibm.com> Signed-off-by: Stephen Rothwell <sfr@canb.auug.org.au> Acked-by: Michal Hocko <mhocko@suse.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Chris Zankel <chris@zankel.net> Cc: "David S. Miller" <davem@davemloft.net> Cc: Geert Uytterhoeven <geert@linux-m68k.org> Cc: Greentime Hu <green.hu@gmail.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Guan Xuetao <gxt@pku.edu.cn> Cc: Ingo Molnar <mingo@redhat.com> Cc: "James E.J. Bottomley" <jejb@parisc-linux.org> Cc: Jonas Bonn <jonas@southpole.se> Cc: Jonathan Corbet <corbet@lwn.net> Cc: Ley Foon Tan <lftan@altera.com> Cc: Mark Salter <msalter@redhat.com> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Matt Turner <mattst88@gmail.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Michal Simek <monstr@monstr.eu> Cc: Palmer Dabbelt <palmer@sifive.com> Cc: Paul Burton <paul.burton@mips.com> Cc: Richard Kuo <rkuo@codeaurora.org> Cc: Richard Weinberger <richard@nod.at> Cc: Rich Felker <dalias@libc.org> Cc: Russell King <linux@armlinux.org.uk> Cc: Serge Semin <fancer.lancer@gmail.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Tony Luck <tony.luck@intel.com> Cc: Vineet Gupta <vgupta@synopsys.com> Cc: Yoshinori Sato <ysato@users.sourceforge.jp> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2018-10-30 16:09:49 -06:00
#include <linux/memblock.h>
#include <linux/export.h>
#include <linux/highmem.h>
#include <asm/processor.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
/*
* HIGHMEM API:
*
* kmap() API provides sleep semantics hence referred to as "permanent maps"
* It allows mapping LAST_PKMAP pages, using @last_pkmap_nr as the cursor
* for book-keeping
*
* kmap_atomic() can't sleep (calls pagefault_disable()), thus it provides
* shortlived ala "temporary mappings" which historically were implemented as
* fixmaps (compile time addr etc). Their book-keeping is done per cpu.
*
* Both these facts combined (preemption disabled and per-cpu allocation)
* means the total number of concurrent fixmaps will be limited to max
* such allocations in a single control path. Thus KM_TYPE_NR (another
* historic relic) is a small'ish number which caps max percpu fixmaps
*
* ARC HIGHMEM Details
*
* - the kernel vaddr space from 0x7z to 0x8z (currently used by vmalloc/module)
* is now shared between vmalloc and kmap (non overlapping though)
*
* - Both fixmap/pkmap use a dedicated page table each, hooked up to swapper PGD
* This means each only has 1 PGDIR_SIZE worth of kvaddr mappings, which means
* 2M of kvaddr space for typical config (8K page and 11:8:13 traversal split)
*
* - fixmap anyhow needs a limited number of mappings. So 2M kvaddr == 256 PTE
* slots across NR_CPUS would be more than sufficient (generic code defines
* KM_TYPE_NR as 20).
*
* - pkmap being preemptible, in theory could do with more than 256 concurrent
* mappings. However, generic pkmap code: map_new_virtual(), doesn't traverse
* the PGD and only works with a single page table @pkmap_page_table, hence
* sets the limit
*/
extern pte_t * pkmap_page_table;
static pte_t * fixmap_page_table;
void *kmap(struct page *page)
{
BUG_ON(in_interrupt());
if (!PageHighMem(page))
return page_address(page);
return kmap_high(page);
}
EXPORT_SYMBOL(kmap);
void *kmap_atomic(struct page *page)
{
int idx, cpu_idx;
unsigned long vaddr;
preempt_disable();
pagefault_disable();
if (!PageHighMem(page))
return page_address(page);
cpu_idx = kmap_atomic_idx_push();
idx = cpu_idx + KM_TYPE_NR * smp_processor_id();
vaddr = FIXMAP_ADDR(idx);
set_pte_at(&init_mm, vaddr, fixmap_page_table + idx,
mk_pte(page, kmap_prot));
return (void *)vaddr;
}
EXPORT_SYMBOL(kmap_atomic);
void __kunmap_atomic(void *kv)
{
unsigned long kvaddr = (unsigned long)kv;
if (kvaddr >= FIXMAP_BASE && kvaddr < (FIXMAP_BASE + FIXMAP_SIZE)) {
/*
* Because preemption is disabled, this vaddr can be associated
* with the current allocated index.
* But in case of multiple live kmap_atomic(), it still relies on
* callers to unmap in right order.
*/
int cpu_idx = kmap_atomic_idx();
int idx = cpu_idx + KM_TYPE_NR * smp_processor_id();
WARN_ON(kvaddr != FIXMAP_ADDR(idx));
pte_clear(&init_mm, kvaddr, fixmap_page_table + idx);
local_flush_tlb_kernel_range(kvaddr, kvaddr + PAGE_SIZE);
kmap_atomic_idx_pop();
}
pagefault_enable();
preempt_enable();
}
EXPORT_SYMBOL(__kunmap_atomic);
static noinline pte_t * __init alloc_kmap_pgtable(unsigned long kvaddr)
{
pgd_t *pgd_k;
pud_t *pud_k;
pmd_t *pmd_k;
pte_t *pte_k;
pgd_k = pgd_offset_k(kvaddr);
pud_k = pud_offset(pgd_k, kvaddr);
pmd_k = pmd_offset(pud_k, kvaddr);
memblock: replace alloc_bootmem_low_pages with memblock_alloc_low The alloc_bootmem_low_pages() function allocates PAGE_SIZE aligned regions from low memory. memblock_alloc_low() with alignment set to PAGE_SIZE does exactly the same thing. The conversion is done using the following semantic patch: @@ expression e; @@ - alloc_bootmem_low_pages(e) + memblock_alloc_low(e, PAGE_SIZE) Link: http://lkml.kernel.org/r/1536927045-23536-19-git-send-email-rppt@linux.vnet.ibm.com Signed-off-by: Mike Rapoport <rppt@linux.vnet.ibm.com> Acked-by: Michal Hocko <mhocko@suse.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Chris Zankel <chris@zankel.net> Cc: "David S. Miller" <davem@davemloft.net> Cc: Geert Uytterhoeven <geert@linux-m68k.org> Cc: Greentime Hu <green.hu@gmail.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Guan Xuetao <gxt@pku.edu.cn> Cc: Ingo Molnar <mingo@redhat.com> Cc: "James E.J. Bottomley" <jejb@parisc-linux.org> Cc: Jonas Bonn <jonas@southpole.se> Cc: Jonathan Corbet <corbet@lwn.net> Cc: Ley Foon Tan <lftan@altera.com> Cc: Mark Salter <msalter@redhat.com> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Matt Turner <mattst88@gmail.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Michal Simek <monstr@monstr.eu> Cc: Palmer Dabbelt <palmer@sifive.com> Cc: Paul Burton <paul.burton@mips.com> Cc: Richard Kuo <rkuo@codeaurora.org> Cc: Richard Weinberger <richard@nod.at> Cc: Rich Felker <dalias@libc.org> Cc: Russell King <linux@armlinux.org.uk> Cc: Serge Semin <fancer.lancer@gmail.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Tony Luck <tony.luck@intel.com> Cc: Vineet Gupta <vgupta@synopsys.com> Cc: Yoshinori Sato <ysato@users.sourceforge.jp> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2018-10-30 16:08:54 -06:00
pte_k = (pte_t *)memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
treewide: add checks for the return value of memblock_alloc*() Add check for the return value of memblock_alloc*() functions and call panic() in case of error. The panic message repeats the one used by panicing memblock allocators with adjustment of parameters to include only relevant ones. The replacement was mostly automated with semantic patches like the one below with manual massaging of format strings. @@ expression ptr, size, align; @@ ptr = memblock_alloc(size, align); + if (!ptr) + panic("%s: Failed to allocate %lu bytes align=0x%lx\n", __func__, size, align); [anders.roxell@linaro.org: use '%pa' with 'phys_addr_t' type] Link: http://lkml.kernel.org/r/20190131161046.21886-1-anders.roxell@linaro.org [rppt@linux.ibm.com: fix format strings for panics after memblock_alloc] Link: http://lkml.kernel.org/r/1548950940-15145-1-git-send-email-rppt@linux.ibm.com [rppt@linux.ibm.com: don't panic if the allocation in sparse_buffer_init fails] Link: http://lkml.kernel.org/r/20190131074018.GD28876@rapoport-lnx [akpm@linux-foundation.org: fix xtensa printk warning] Link: http://lkml.kernel.org/r/1548057848-15136-20-git-send-email-rppt@linux.ibm.com Signed-off-by: Mike Rapoport <rppt@linux.ibm.com> Signed-off-by: Anders Roxell <anders.roxell@linaro.org> Reviewed-by: Guo Ren <ren_guo@c-sky.com> [c-sky] Acked-by: Paul Burton <paul.burton@mips.com> [MIPS] Acked-by: Heiko Carstens <heiko.carstens@de.ibm.com> [s390] Reviewed-by: Juergen Gross <jgross@suse.com> [Xen] Reviewed-by: Geert Uytterhoeven <geert@linux-m68k.org> [m68k] Acked-by: Max Filippov <jcmvbkbc@gmail.com> [xtensa] Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Christophe Leroy <christophe.leroy@c-s.fr> Cc: Christoph Hellwig <hch@lst.de> Cc: "David S. Miller" <davem@davemloft.net> Cc: Dennis Zhou <dennis@kernel.org> Cc: Greentime Hu <green.hu@gmail.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Guan Xuetao <gxt@pku.edu.cn> Cc: Guo Ren <guoren@kernel.org> Cc: Mark Salter <msalter@redhat.com> Cc: Matt Turner <mattst88@gmail.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Michal Simek <monstr@monstr.eu> Cc: Petr Mladek <pmladek@suse.com> Cc: Richard Weinberger <richard@nod.at> Cc: Rich Felker <dalias@libc.org> Cc: Rob Herring <robh+dt@kernel.org> Cc: Rob Herring <robh@kernel.org> Cc: Russell King <linux@armlinux.org.uk> Cc: Stafford Horne <shorne@gmail.com> Cc: Tony Luck <tony.luck@intel.com> Cc: Vineet Gupta <vgupta@synopsys.com> Cc: Yoshinori Sato <ysato@users.sourceforge.jp> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2019-03-12 00:30:31 -06:00
if (!pte_k)
panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
__func__, PAGE_SIZE, PAGE_SIZE);
pmd_populate_kernel(&init_mm, pmd_k, pte_k);
return pte_k;
}
void __init kmap_init(void)
{
/* Due to recursive include hell, we can't do this in processor.h */
BUILD_BUG_ON(PAGE_OFFSET < (VMALLOC_END + FIXMAP_SIZE + PKMAP_SIZE));
BUILD_BUG_ON(KM_TYPE_NR > PTRS_PER_PTE);
pkmap_page_table = alloc_kmap_pgtable(PKMAP_BASE);
BUILD_BUG_ON(LAST_PKMAP > PTRS_PER_PTE);
fixmap_page_table = alloc_kmap_pgtable(FIXMAP_BASE);
}