1
0
Fork 0
alistair23-linux/arch/mips/mm/init.c

552 lines
13 KiB
C
Raw Normal View History

/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1994 - 2000 Ralf Baechle
* Copyright (C) 1999, 2000 Silicon Graphics, Inc.
* Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
* Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
*/
#include <linux/bug.h>
#include <linux/init.h>
#include <linux/export.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/smp.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/pagemap.h>
#include <linux/ptrace.h>
#include <linux/mman.h>
#include <linux/mm.h>
mm: remove include/linux/bootmem.h Move remaining definitions and declarations from include/linux/bootmem.h into include/linux/memblock.h and remove the redundant header. The includes were replaced with the semantic patch below and then semi-automated removal of duplicated '#include <linux/memblock.h> @@ @@ - #include <linux/bootmem.h> + #include <linux/memblock.h> [sfr@canb.auug.org.au: dma-direct: fix up for the removal of linux/bootmem.h] Link: http://lkml.kernel.org/r/20181002185342.133d1680@canb.auug.org.au [sfr@canb.auug.org.au: powerpc: fix up for removal of linux/bootmem.h] Link: http://lkml.kernel.org/r/20181005161406.73ef8727@canb.auug.org.au [sfr@canb.auug.org.au: x86/kaslr, ACPI/NUMA: fix for linux/bootmem.h removal] Link: http://lkml.kernel.org/r/20181008190341.5e396491@canb.auug.org.au Link: http://lkml.kernel.org/r/1536927045-23536-30-git-send-email-rppt@linux.vnet.ibm.com Signed-off-by: Mike Rapoport <rppt@linux.vnet.ibm.com> Signed-off-by: Stephen Rothwell <sfr@canb.auug.org.au> Acked-by: Michal Hocko <mhocko@suse.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Chris Zankel <chris@zankel.net> Cc: "David S. Miller" <davem@davemloft.net> Cc: Geert Uytterhoeven <geert@linux-m68k.org> Cc: Greentime Hu <green.hu@gmail.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Guan Xuetao <gxt@pku.edu.cn> Cc: Ingo Molnar <mingo@redhat.com> Cc: "James E.J. Bottomley" <jejb@parisc-linux.org> Cc: Jonas Bonn <jonas@southpole.se> Cc: Jonathan Corbet <corbet@lwn.net> Cc: Ley Foon Tan <lftan@altera.com> Cc: Mark Salter <msalter@redhat.com> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Matt Turner <mattst88@gmail.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Michal Simek <monstr@monstr.eu> Cc: Palmer Dabbelt <palmer@sifive.com> Cc: Paul Burton <paul.burton@mips.com> Cc: Richard Kuo <rkuo@codeaurora.org> Cc: Richard Weinberger <richard@nod.at> Cc: Rich Felker <dalias@libc.org> Cc: Russell King <linux@armlinux.org.uk> Cc: Serge Semin <fancer.lancer@gmail.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Tony Luck <tony.luck@intel.com> Cc: Vineet Gupta <vgupta@synopsys.com> Cc: Yoshinori Sato <ysato@users.sourceforge.jp> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2018-10-30 16:09:49 -06:00
#include <linux/memblock.h>
#include <linux/highmem.h>
#include <linux/swap.h>
#include <linux/proc_fs.h>
#include <linux/pfn.h>
#include <linux/hardirq.h>
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h percpu.h is included by sched.h and module.h and thus ends up being included when building most .c files. percpu.h includes slab.h which in turn includes gfp.h making everything defined by the two files universally available and complicating inclusion dependencies. percpu.h -> slab.h dependency is about to be removed. Prepare for this change by updating users of gfp and slab facilities include those headers directly instead of assuming availability. As this conversion needs to touch large number of source files, the following script is used as the basis of conversion. http://userweb.kernel.org/~tj/misc/slabh-sweep.py The script does the followings. * Scan files for gfp and slab usages and update includes such that only the necessary includes are there. ie. if only gfp is used, gfp.h, if slab is used, slab.h. * When the script inserts a new include, it looks at the include blocks and try to put the new include such that its order conforms to its surrounding. It's put in the include block which contains core kernel includes, in the same order that the rest are ordered - alphabetical, Christmas tree, rev-Xmas-tree or at the end if there doesn't seem to be any matching order. * If the script can't find a place to put a new include (mostly because the file doesn't have fitting include block), it prints out an error message indicating which .h file needs to be added to the file. The conversion was done in the following steps. 1. The initial automatic conversion of all .c files updated slightly over 4000 files, deleting around 700 includes and adding ~480 gfp.h and ~3000 slab.h inclusions. The script emitted errors for ~400 files. 2. Each error was manually checked. Some didn't need the inclusion, some needed manual addition while adding it to implementation .h or embedding .c file was more appropriate for others. This step added inclusions to around 150 files. 3. The script was run again and the output was compared to the edits from #2 to make sure no file was left behind. 4. Several build tests were done and a couple of problems were fixed. e.g. lib/decompress_*.c used malloc/free() wrappers around slab APIs requiring slab.h to be added manually. 5. The script was run on all .h files but without automatically editing them as sprinkling gfp.h and slab.h inclusions around .h files could easily lead to inclusion dependency hell. Most gfp.h inclusion directives were ignored as stuff from gfp.h was usually wildly available and often used in preprocessor macros. Each slab.h inclusion directive was examined and added manually as necessary. 6. percpu.h was updated not to include slab.h. 7. Build test were done on the following configurations and failures were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my distributed build env didn't work with gcov compiles) and a few more options had to be turned off depending on archs to make things build (like ipr on powerpc/64 which failed due to missing writeq). * x86 and x86_64 UP and SMP allmodconfig and a custom test config. * powerpc and powerpc64 SMP allmodconfig * sparc and sparc64 SMP allmodconfig * ia64 SMP allmodconfig * s390 SMP allmodconfig * alpha SMP allmodconfig * um on x86_64 SMP allmodconfig 8. percpu.h modifications were reverted so that it could be applied as a separate patch and serve as bisection point. Given the fact that I had only a couple of failures from tests on step 6, I'm fairly confident about the coverage of this conversion patch. If there is a breakage, it's likely to be something in one of the arch headers which should be easily discoverable easily on most builds of the specific arch. Signed-off-by: Tejun Heo <tj@kernel.org> Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
2010-03-24 02:04:11 -06:00
#include <linux/gfp.h>
#include <linux/kcore.h>
#include <linux/initrd.h>
#include <asm/bootinfo.h>
#include <asm/cachectl.h>
#include <asm/cpu.h>
#include <asm/dma.h>
#include <asm/kmap_types.h>
#include <asm/maar.h>
#include <asm/mmu_context.h>
#include <asm/sections.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/tlb.h>
#include <asm/fixmap.h>
/*
* We have up to 8 empty zeroed pages so we can map one of the right colour
* when needed. This is necessary only on R4000 / R4400 SC and MC versions
* where we have to avoid VCED / VECI exceptions for good performance at
* any price. Since page is never written to after the initialization we
* don't have to care about aliases on other CPUs.
*/
unsigned long empty_zero_page, zero_page_mask;
EXPORT_SYMBOL_GPL(empty_zero_page);
EXPORT_SYMBOL(zero_page_mask);
/*
* Not static inline because used by IP27 special magic initialization code
*/
void setup_zero_pages(void)
{
unsigned int order, i;
struct page *page;
if (cpu_has_vce)
order = 3;
else
order = 0;
empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
if (!empty_zero_page)
panic("Oh boy, that early out of memory?");
page = virt_to_page((void *)empty_zero_page);
split_page(page, order);
for (i = 0; i < (1 << order); i++, page++)
mark_page_reserved(page);
zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK;
}
static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot)
{
enum fixed_addresses idx;
MIPS: MemoryMapID (MMID) Support Introduce support for using MemoryMapIDs (MMIDs) as an alternative to Address Space IDs (ASIDs). The major difference between the two is that MMIDs are global - ie. an MMID uniquely identifies an address space across all coherent CPUs. In contrast ASIDs are non-global per-CPU IDs, wherein each address space is allocated a separate ASID for each CPU upon which it is used. This global namespace allows a new GINVT instruction be used to globally invalidate TLB entries associated with a particular MMID across all coherent CPUs in the system, removing the need for IPIs to invalidate entries with separate ASIDs on each CPU. The allocation scheme used here is largely borrowed from arm64 (see arch/arm64/mm/context.c). In essence we maintain a bitmap to track available MMIDs, and MMIDs in active use at the time of a rollover to a new MMID version are preserved in the new version. The allocation scheme requires efficient 64 bit atomics in order to perform reasonably, so this support depends upon CONFIG_GENERIC_ATOMIC64=n (ie. currently it will only be included in MIPS64 kernels). The first, and currently only, available CPU with support for MMIDs is the MIPS I6500. This CPU supports 16 bit MMIDs, and so for now we cap our MMIDs to 16 bits wide in order to prevent the bitmap growing to absurd sizes if any future CPU does implement 32 bit MMIDs as the architecture manuals suggest is recommended. When MMIDs are in use we also make use of GINVT instruction which is available due to the global nature of MMIDs. By executing a sequence of GINVT & SYNC 0x14 instructions we can avoid the overhead of an IPI to each remote CPU in many cases. One complication is that GINVT will invalidate wired entries (in all cases apart from type 0, which targets the entire TLB). In order to avoid GINVT invalidating any wired TLB entries we set up, we make sure to create those entries using a reserved MMID (0) that we never associate with any address space. Also of note is that KVM will require further work in order to support MMIDs & GINVT, since KVM is involved in allocating IDs for guests & in configuring the MMU. That work is not part of this patch, so for now when MMIDs are in use KVM is disabled. Signed-off-by: Paul Burton <paul.burton@mips.com> Cc: linux-mips@vger.kernel.org
2019-02-01 18:43:28 -07:00
unsigned int uninitialized_var(old_mmid);
unsigned long vaddr, flags, entrylo;
unsigned long old_ctx;
pte_t pte;
int tlbidx;
BUG_ON(Page_dcache_dirty(page));
preempt_disable();
pagefault_disable();
idx = (addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1);
idx += in_interrupt() ? FIX_N_COLOURS : 0;
vaddr = __fix_to_virt(FIX_CMAP_END - idx);
pte = mk_pte(page, prot);
MIPS: mm: Fix MIPS32 36b physical addressing (alchemy, netlogic) There are 2 distinct cases in which a kernel for a MIPS32 CPU (CONFIG_CPU_MIPS32=y) may use 64 bit physical addresses (CONFIG_PHYS_ADDR_T_64BIT=y): - 36 bit physical addressing as used by RMI Alchemy & Netlogic XLP/XLR CPUs. - MIPS32r5 eXtended Physical Addressing (XPA). These 2 cases are distinct in that they require different behaviour from the kernel - the EntryLo registers have different formats. Until Linux v4.1 we only supported the first case, with code conditional upon the 2 aforementioned Kconfig variables being set. Commit c5b367835cfc ("MIPS: Add support for XPA.") added support for the second case, but did so by modifying the code that existed for the first case rather than treating the 2 cases as distinct. Since the EntryLo registers have different formats this breaks the 36 bit Alchemy/XLP/XLR case. Fix this by splitting the 2 cases, with XPA cases now being conditional upon CONFIG_XPA and the non-XPA case matching the code as it existed prior to commit c5b367835cfc ("MIPS: Add support for XPA."). Signed-off-by: Paul Burton <paul.burton@imgtec.com> Reported-by: Manuel Lauss <manuel.lauss@gmail.com> Tested-by: Manuel Lauss <manuel.lauss@gmail.com> Fixes: c5b367835cfc ("MIPS: Add support for XPA.") Cc: James Hogan <james.hogan@imgtec.com> Cc: David Daney <david.daney@cavium.com> Cc: Huacai Chen <chenhc@lemote.com> Cc: Maciej W. Rozycki <macro@linux-mips.org> Cc: Paul Gortmaker <paul.gortmaker@windriver.com> Cc: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Cc: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: David Hildenbrand <dahi@linux.vnet.ibm.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Ingo Molnar <mingo@kernel.org> Cc: Alex Smith <alex.smith@imgtec.com> Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: stable@vger.kernel.org # v4.1+ Cc: linux-mips@linux-mips.org Cc: linux-kernel@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/13119/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2016-04-19 02:25:05 -06:00
#if defined(CONFIG_XPA)
entrylo = pte_to_entrylo(pte.pte_high);
MIPS: mm: Fix MIPS32 36b physical addressing (alchemy, netlogic) There are 2 distinct cases in which a kernel for a MIPS32 CPU (CONFIG_CPU_MIPS32=y) may use 64 bit physical addresses (CONFIG_PHYS_ADDR_T_64BIT=y): - 36 bit physical addressing as used by RMI Alchemy & Netlogic XLP/XLR CPUs. - MIPS32r5 eXtended Physical Addressing (XPA). These 2 cases are distinct in that they require different behaviour from the kernel - the EntryLo registers have different formats. Until Linux v4.1 we only supported the first case, with code conditional upon the 2 aforementioned Kconfig variables being set. Commit c5b367835cfc ("MIPS: Add support for XPA.") added support for the second case, but did so by modifying the code that existed for the first case rather than treating the 2 cases as distinct. Since the EntryLo registers have different formats this breaks the 36 bit Alchemy/XLP/XLR case. Fix this by splitting the 2 cases, with XPA cases now being conditional upon CONFIG_XPA and the non-XPA case matching the code as it existed prior to commit c5b367835cfc ("MIPS: Add support for XPA."). Signed-off-by: Paul Burton <paul.burton@imgtec.com> Reported-by: Manuel Lauss <manuel.lauss@gmail.com> Tested-by: Manuel Lauss <manuel.lauss@gmail.com> Fixes: c5b367835cfc ("MIPS: Add support for XPA.") Cc: James Hogan <james.hogan@imgtec.com> Cc: David Daney <david.daney@cavium.com> Cc: Huacai Chen <chenhc@lemote.com> Cc: Maciej W. Rozycki <macro@linux-mips.org> Cc: Paul Gortmaker <paul.gortmaker@windriver.com> Cc: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Cc: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: David Hildenbrand <dahi@linux.vnet.ibm.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Ingo Molnar <mingo@kernel.org> Cc: Alex Smith <alex.smith@imgtec.com> Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: stable@vger.kernel.org # v4.1+ Cc: linux-mips@linux-mips.org Cc: linux-kernel@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/13119/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2016-04-19 02:25:05 -06:00
#elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
entrylo = pte.pte_high;
#else
entrylo = pte_to_entrylo(pte_val(pte));
#endif
local_irq_save(flags);
old_ctx = read_c0_entryhi();
write_c0_entryhi(vaddr & (PAGE_MASK << 1));
write_c0_entrylo0(entrylo);
write_c0_entrylo1(entrylo);
MIPS: MemoryMapID (MMID) Support Introduce support for using MemoryMapIDs (MMIDs) as an alternative to Address Space IDs (ASIDs). The major difference between the two is that MMIDs are global - ie. an MMID uniquely identifies an address space across all coherent CPUs. In contrast ASIDs are non-global per-CPU IDs, wherein each address space is allocated a separate ASID for each CPU upon which it is used. This global namespace allows a new GINVT instruction be used to globally invalidate TLB entries associated with a particular MMID across all coherent CPUs in the system, removing the need for IPIs to invalidate entries with separate ASIDs on each CPU. The allocation scheme used here is largely borrowed from arm64 (see arch/arm64/mm/context.c). In essence we maintain a bitmap to track available MMIDs, and MMIDs in active use at the time of a rollover to a new MMID version are preserved in the new version. The allocation scheme requires efficient 64 bit atomics in order to perform reasonably, so this support depends upon CONFIG_GENERIC_ATOMIC64=n (ie. currently it will only be included in MIPS64 kernels). The first, and currently only, available CPU with support for MMIDs is the MIPS I6500. This CPU supports 16 bit MMIDs, and so for now we cap our MMIDs to 16 bits wide in order to prevent the bitmap growing to absurd sizes if any future CPU does implement 32 bit MMIDs as the architecture manuals suggest is recommended. When MMIDs are in use we also make use of GINVT instruction which is available due to the global nature of MMIDs. By executing a sequence of GINVT & SYNC 0x14 instructions we can avoid the overhead of an IPI to each remote CPU in many cases. One complication is that GINVT will invalidate wired entries (in all cases apart from type 0, which targets the entire TLB). In order to avoid GINVT invalidating any wired TLB entries we set up, we make sure to create those entries using a reserved MMID (0) that we never associate with any address space. Also of note is that KVM will require further work in order to support MMIDs & GINVT, since KVM is involved in allocating IDs for guests & in configuring the MMU. That work is not part of this patch, so for now when MMIDs are in use KVM is disabled. Signed-off-by: Paul Burton <paul.burton@mips.com> Cc: linux-mips@vger.kernel.org
2019-02-01 18:43:28 -07:00
if (cpu_has_mmid) {
old_mmid = read_c0_memorymapid();
write_c0_memorymapid(MMID_KERNEL_WIRED);
}
#ifdef CONFIG_XPA
if (cpu_has_xpa) {
entrylo = (pte.pte_low & _PFNX_MASK);
writex_c0_entrylo0(entrylo);
writex_c0_entrylo1(entrylo);
}
#endif
MIPS: Mask out limit field when calculating wired entry count Since MIPSr6 the Wired register is split into 2 fields, with the upper 16 bits of the register indicating a limit on the value that the wired entry count in the bottom 16 bits of the register can take. This means that simply reading the wired register doesn't get us a valid TLB entry index any longer, and we instead need to retrieve only the lower 16 bits of the register. Introduce a new num_wired_entries() function which does this on MIPSr6 or higher and simply returns the value of the wired register on older architecture revisions, and make use of it when reading the number of wired entries. Since commit e710d6668309 ("MIPS: tlb-r4k: If there are wired entries, don't use TLBINVF") we have been using a non-zero number of wired entries to determine whether we should avoid use of the tlbinvf instruction (which would invalidate wired entries) and instead loop over TLB entries in local_flush_tlb_all(). This loop begins with the number of wired entries, or before this patch some large bogus TLB index on MIPSr6 systems. Thus since the aforementioned commit some MIPSr6 systems with FTLBs have been prone to leaving stale address translations in the FTLB & crashing in various weird & wonderful ways when we later observe the wrong memory. Signed-off-by: Paul Burton <paul.burton@imgtec.com> Cc: Matt Redfearn <matt.redfearn@imgtec.com> Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/14557/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2016-11-11 18:26:07 -07:00
tlbidx = num_wired_entries();
write_c0_wired(tlbidx + 1);
write_c0_index(tlbidx);
mtc0_tlbw_hazard();
tlb_write_indexed();
tlbw_use_hazard();
write_c0_entryhi(old_ctx);
MIPS: MemoryMapID (MMID) Support Introduce support for using MemoryMapIDs (MMIDs) as an alternative to Address Space IDs (ASIDs). The major difference between the two is that MMIDs are global - ie. an MMID uniquely identifies an address space across all coherent CPUs. In contrast ASIDs are non-global per-CPU IDs, wherein each address space is allocated a separate ASID for each CPU upon which it is used. This global namespace allows a new GINVT instruction be used to globally invalidate TLB entries associated with a particular MMID across all coherent CPUs in the system, removing the need for IPIs to invalidate entries with separate ASIDs on each CPU. The allocation scheme used here is largely borrowed from arm64 (see arch/arm64/mm/context.c). In essence we maintain a bitmap to track available MMIDs, and MMIDs in active use at the time of a rollover to a new MMID version are preserved in the new version. The allocation scheme requires efficient 64 bit atomics in order to perform reasonably, so this support depends upon CONFIG_GENERIC_ATOMIC64=n (ie. currently it will only be included in MIPS64 kernels). The first, and currently only, available CPU with support for MMIDs is the MIPS I6500. This CPU supports 16 bit MMIDs, and so for now we cap our MMIDs to 16 bits wide in order to prevent the bitmap growing to absurd sizes if any future CPU does implement 32 bit MMIDs as the architecture manuals suggest is recommended. When MMIDs are in use we also make use of GINVT instruction which is available due to the global nature of MMIDs. By executing a sequence of GINVT & SYNC 0x14 instructions we can avoid the overhead of an IPI to each remote CPU in many cases. One complication is that GINVT will invalidate wired entries (in all cases apart from type 0, which targets the entire TLB). In order to avoid GINVT invalidating any wired TLB entries we set up, we make sure to create those entries using a reserved MMID (0) that we never associate with any address space. Also of note is that KVM will require further work in order to support MMIDs & GINVT, since KVM is involved in allocating IDs for guests & in configuring the MMU. That work is not part of this patch, so for now when MMIDs are in use KVM is disabled. Signed-off-by: Paul Burton <paul.burton@mips.com> Cc: linux-mips@vger.kernel.org
2019-02-01 18:43:28 -07:00
if (cpu_has_mmid)
write_c0_memorymapid(old_mmid);
local_irq_restore(flags);
return (void*) vaddr;
}
void *kmap_coherent(struct page *page, unsigned long addr)
{
return __kmap_pgprot(page, addr, PAGE_KERNEL);
}
void *kmap_noncoherent(struct page *page, unsigned long addr)
{
return __kmap_pgprot(page, addr, PAGE_KERNEL_NC);
}
void kunmap_coherent(void)
{
unsigned int wired;
unsigned long flags, old_ctx;
local_irq_save(flags);
old_ctx = read_c0_entryhi();
MIPS: Mask out limit field when calculating wired entry count Since MIPSr6 the Wired register is split into 2 fields, with the upper 16 bits of the register indicating a limit on the value that the wired entry count in the bottom 16 bits of the register can take. This means that simply reading the wired register doesn't get us a valid TLB entry index any longer, and we instead need to retrieve only the lower 16 bits of the register. Introduce a new num_wired_entries() function which does this on MIPSr6 or higher and simply returns the value of the wired register on older architecture revisions, and make use of it when reading the number of wired entries. Since commit e710d6668309 ("MIPS: tlb-r4k: If there are wired entries, don't use TLBINVF") we have been using a non-zero number of wired entries to determine whether we should avoid use of the tlbinvf instruction (which would invalidate wired entries) and instead loop over TLB entries in local_flush_tlb_all(). This loop begins with the number of wired entries, or before this patch some large bogus TLB index on MIPSr6 systems. Thus since the aforementioned commit some MIPSr6 systems with FTLBs have been prone to leaving stale address translations in the FTLB & crashing in various weird & wonderful ways when we later observe the wrong memory. Signed-off-by: Paul Burton <paul.burton@imgtec.com> Cc: Matt Redfearn <matt.redfearn@imgtec.com> Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/14557/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2016-11-11 18:26:07 -07:00
wired = num_wired_entries() - 1;
write_c0_wired(wired);
write_c0_index(wired);
write_c0_entryhi(UNIQUE_ENTRYHI(wired));
write_c0_entrylo0(0);
write_c0_entrylo1(0);
mtc0_tlbw_hazard();
tlb_write_indexed();
tlbw_use_hazard();
write_c0_entryhi(old_ctx);
local_irq_restore(flags);
pagefault_enable();
preempt_enable();
}
void copy_user_highpage(struct page *to, struct page *from,
unsigned long vaddr, struct vm_area_struct *vma)
{
void *vfrom, *vto;
vto = kmap_atomic(to);
if (cpu_has_dc_aliases &&
page_mapcount(from) && !Page_dcache_dirty(from)) {
vfrom = kmap_coherent(from, vaddr);
copy_page(vto, vfrom);
kunmap_coherent();
} else {
vfrom = kmap_atomic(from);
copy_page(vto, vfrom);
kunmap_atomic(vfrom);
}
if ((!cpu_has_ic_fills_f_dc) ||
pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
flush_data_cache_page((unsigned long)vto);
kunmap_atomic(vto);
/* Make sure this page is cleared on other CPU's too before using it */
smp_wmb();
}
void copy_to_user_page(struct vm_area_struct *vma,
struct page *page, unsigned long vaddr, void *dst, const void *src,
unsigned long len)
{
if (cpu_has_dc_aliases &&
page_mapcount(page) && !Page_dcache_dirty(page)) {
void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
memcpy(vto, src, len);
kunmap_coherent();
} else {
memcpy(dst, src, len);
if (cpu_has_dc_aliases)
SetPageDcacheDirty(page);
}
MIPS: c-r4k: Sync icache when it fills from dcache It is still necessary to handle icache coherency in flush_cache_range() and copy_to_user_page() when the icache fills from the dcache, even though the dcache does not need to be written back. However when this handling was added in commit 2eaa7ec286db ("[MIPS] Handle I-cache coherency in flush_cache_range()"), it did not do any icache flushing when it fills from dcache. Therefore fix r4k_flush_cache_range() to run local_r4k_flush_cache_range() without taking into account whether icache fills from dcache, so that the icache coherency gets handled. Checks are also added in local_r4k_flush_cache_range() so that the dcache blast doesn't take place when icache fills from dcache. A test to mmap a page PROT_READ|PROT_WRITE, modify code in it, and mprotect it to VM_READ|VM_EXEC (similar to case described in above commit) can hit this case quite easily to verify the fix. A similar check was added in commit f8829caee311 ("[MIPS] Fix aliasing bug in copy_to_user_page / copy_from_user_page"), so also fix copy_to_user_page() similarly, to call flush_cache_page() without taking into account whether icache fills from dcache, since flush_cache_page() already takes that into account to avoid performing a dcache flush. Signed-off-by: James Hogan <james.hogan@imgtec.com> Cc: Leonid Yegoshin <leonid.yegoshin@imgtec.com> Cc: Manuel Lauss <manuel.lauss@gmail.com> Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/12179/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2016-01-22 03:58:25 -07:00
if (vma->vm_flags & VM_EXEC)
flush_cache_page(vma, vaddr, page_to_pfn(page));
}
void copy_from_user_page(struct vm_area_struct *vma,
struct page *page, unsigned long vaddr, void *dst, const void *src,
unsigned long len)
{
if (cpu_has_dc_aliases &&
page_mapcount(page) && !Page_dcache_dirty(page)) {
void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
memcpy(dst, vfrom, len);
kunmap_coherent();
} else {
memcpy(dst, src, len);
if (cpu_has_dc_aliases)
SetPageDcacheDirty(page);
}
}
EXPORT_SYMBOL_GPL(copy_from_user_page);
void __init fixrange_init(unsigned long start, unsigned long end,
pgd_t *pgd_base)
{
#ifdef CONFIG_HIGHMEM
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
int i, j, k;
unsigned long vaddr;
vaddr = start;
i = __pgd_offset(vaddr);
j = __pud_offset(vaddr);
k = __pmd_offset(vaddr);
pgd = pgd_base + i;
for ( ; (i < PTRS_PER_PGD) && (vaddr < end); pgd++, i++) {
pud = (pud_t *)pgd;
for ( ; (j < PTRS_PER_PUD) && (vaddr < end); pud++, j++) {
pmd = (pmd_t *)pud;
for (; (k < PTRS_PER_PMD) && (vaddr < end); pmd++, k++) {
if (pmd_none(*pmd)) {
memblock: replace alloc_bootmem_low_pages with memblock_alloc_low The alloc_bootmem_low_pages() function allocates PAGE_SIZE aligned regions from low memory. memblock_alloc_low() with alignment set to PAGE_SIZE does exactly the same thing. The conversion is done using the following semantic patch: @@ expression e; @@ - alloc_bootmem_low_pages(e) + memblock_alloc_low(e, PAGE_SIZE) Link: http://lkml.kernel.org/r/1536927045-23536-19-git-send-email-rppt@linux.vnet.ibm.com Signed-off-by: Mike Rapoport <rppt@linux.vnet.ibm.com> Acked-by: Michal Hocko <mhocko@suse.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Chris Zankel <chris@zankel.net> Cc: "David S. Miller" <davem@davemloft.net> Cc: Geert Uytterhoeven <geert@linux-m68k.org> Cc: Greentime Hu <green.hu@gmail.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Guan Xuetao <gxt@pku.edu.cn> Cc: Ingo Molnar <mingo@redhat.com> Cc: "James E.J. Bottomley" <jejb@parisc-linux.org> Cc: Jonas Bonn <jonas@southpole.se> Cc: Jonathan Corbet <corbet@lwn.net> Cc: Ley Foon Tan <lftan@altera.com> Cc: Mark Salter <msalter@redhat.com> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Matt Turner <mattst88@gmail.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Michal Simek <monstr@monstr.eu> Cc: Palmer Dabbelt <palmer@sifive.com> Cc: Paul Burton <paul.burton@mips.com> Cc: Richard Kuo <rkuo@codeaurora.org> Cc: Richard Weinberger <richard@nod.at> Cc: Rich Felker <dalias@libc.org> Cc: Russell King <linux@armlinux.org.uk> Cc: Serge Semin <fancer.lancer@gmail.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Tony Luck <tony.luck@intel.com> Cc: Vineet Gupta <vgupta@synopsys.com> Cc: Yoshinori Sato <ysato@users.sourceforge.jp> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2018-10-30 16:08:54 -06:00
pte = (pte_t *) memblock_alloc_low(PAGE_SIZE,
PAGE_SIZE);
treewide: add checks for the return value of memblock_alloc*() Add check for the return value of memblock_alloc*() functions and call panic() in case of error. The panic message repeats the one used by panicing memblock allocators with adjustment of parameters to include only relevant ones. The replacement was mostly automated with semantic patches like the one below with manual massaging of format strings. @@ expression ptr, size, align; @@ ptr = memblock_alloc(size, align); + if (!ptr) + panic("%s: Failed to allocate %lu bytes align=0x%lx\n", __func__, size, align); [anders.roxell@linaro.org: use '%pa' with 'phys_addr_t' type] Link: http://lkml.kernel.org/r/20190131161046.21886-1-anders.roxell@linaro.org [rppt@linux.ibm.com: fix format strings for panics after memblock_alloc] Link: http://lkml.kernel.org/r/1548950940-15145-1-git-send-email-rppt@linux.ibm.com [rppt@linux.ibm.com: don't panic if the allocation in sparse_buffer_init fails] Link: http://lkml.kernel.org/r/20190131074018.GD28876@rapoport-lnx [akpm@linux-foundation.org: fix xtensa printk warning] Link: http://lkml.kernel.org/r/1548057848-15136-20-git-send-email-rppt@linux.ibm.com Signed-off-by: Mike Rapoport <rppt@linux.ibm.com> Signed-off-by: Anders Roxell <anders.roxell@linaro.org> Reviewed-by: Guo Ren <ren_guo@c-sky.com> [c-sky] Acked-by: Paul Burton <paul.burton@mips.com> [MIPS] Acked-by: Heiko Carstens <heiko.carstens@de.ibm.com> [s390] Reviewed-by: Juergen Gross <jgross@suse.com> [Xen] Reviewed-by: Geert Uytterhoeven <geert@linux-m68k.org> [m68k] Acked-by: Max Filippov <jcmvbkbc@gmail.com> [xtensa] Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Christophe Leroy <christophe.leroy@c-s.fr> Cc: Christoph Hellwig <hch@lst.de> Cc: "David S. Miller" <davem@davemloft.net> Cc: Dennis Zhou <dennis@kernel.org> Cc: Greentime Hu <green.hu@gmail.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Guan Xuetao <gxt@pku.edu.cn> Cc: Guo Ren <guoren@kernel.org> Cc: Mark Salter <msalter@redhat.com> Cc: Matt Turner <mattst88@gmail.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Michal Simek <monstr@monstr.eu> Cc: Petr Mladek <pmladek@suse.com> Cc: Richard Weinberger <richard@nod.at> Cc: Rich Felker <dalias@libc.org> Cc: Rob Herring <robh+dt@kernel.org> Cc: Rob Herring <robh@kernel.org> Cc: Russell King <linux@armlinux.org.uk> Cc: Stafford Horne <shorne@gmail.com> Cc: Tony Luck <tony.luck@intel.com> Cc: Vineet Gupta <vgupta@synopsys.com> Cc: Yoshinori Sato <ysato@users.sourceforge.jp> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2019-03-12 00:30:31 -06:00
if (!pte)
panic("%s: Failed to allocate %lu bytes align=%lx\n",
__func__, PAGE_SIZE,
PAGE_SIZE);
set_pmd(pmd, __pmd((unsigned long)pte));
BUG_ON(pte != pte_offset_kernel(pmd, 0));
}
vaddr += PMD_SIZE;
}
k = 0;
}
j = 0;
}
#endif
}
unsigned __weak platform_maar_init(unsigned num_pairs)
{
struct maar_config cfg[BOOT_MEM_MAP_MAX];
unsigned i, num_configured, num_cfg = 0;
for (i = 0; i < boot_mem_map.nr_map; i++) {
switch (boot_mem_map.map[i].type) {
case BOOT_MEM_RAM:
case BOOT_MEM_INIT_RAM:
break;
default:
continue;
}
/* Round lower up */
cfg[num_cfg].lower = boot_mem_map.map[i].addr;
cfg[num_cfg].lower = (cfg[num_cfg].lower + 0xffff) & ~0xffff;
/* Round upper down */
cfg[num_cfg].upper = boot_mem_map.map[i].addr +
boot_mem_map.map[i].size;
cfg[num_cfg].upper = (cfg[num_cfg].upper & ~0xffff) - 1;
cfg[num_cfg].attrs = MIPS_MAAR_S;
num_cfg++;
}
num_configured = maar_config(cfg, num_cfg, num_pairs);
if (num_configured < num_cfg)
pr_warn("Not enough MAAR pairs (%u) for all bootmem regions (%u)\n",
num_pairs, num_cfg);
return num_configured;
}
void maar_init(void)
{
unsigned num_maars, used, i;
phys_addr_t lower, upper, attr;
static struct {
struct maar_config cfgs[3];
unsigned used;
} recorded = { { { 0 } }, 0 };
if (!cpu_has_maar)
return;
/* Detect the number of MAARs */
write_c0_maari(~0);
back_to_back_c0_hazard();
num_maars = read_c0_maari() + 1;
/* MAARs should be in pairs */
WARN_ON(num_maars % 2);
/* Set MAARs using values we recorded already */
if (recorded.used) {
used = maar_config(recorded.cfgs, recorded.used, num_maars / 2);
BUG_ON(used != recorded.used);
} else {
/* Configure the required MAARs */
used = platform_maar_init(num_maars / 2);
}
/* Disable any further MAARs */
for (i = (used * 2); i < num_maars; i++) {
write_c0_maari(i);
back_to_back_c0_hazard();
write_c0_maar(0);
back_to_back_c0_hazard();
}
if (recorded.used)
return;
pr_info("MAAR configuration:\n");
for (i = 0; i < num_maars; i += 2) {
write_c0_maari(i);
back_to_back_c0_hazard();
upper = read_c0_maar();
write_c0_maari(i + 1);
back_to_back_c0_hazard();
lower = read_c0_maar();
attr = lower & upper;
lower = (lower & MIPS_MAAR_ADDR) << 4;
upper = ((upper & MIPS_MAAR_ADDR) << 4) | 0xffff;
pr_info(" [%d]: ", i / 2);
if (!(attr & MIPS_MAAR_VL)) {
pr_cont("disabled\n");
continue;
}
pr_cont("%pa-%pa", &lower, &upper);
if (attr & MIPS_MAAR_S)
pr_cont(" speculate");
pr_cont("\n");
/* Record the setup for use on secondary CPUs */
if (used <= ARRAY_SIZE(recorded.cfgs)) {
recorded.cfgs[recorded.used].lower = lower;
recorded.cfgs[recorded.used].upper = upper;
recorded.cfgs[recorded.used].attrs = attr;
recorded.used++;
}
}
}
#ifndef CONFIG_NEED_MULTIPLE_NODES
int page_is_ram(unsigned long pagenr)
{
int i;
for (i = 0; i < boot_mem_map.nr_map; i++) {
unsigned long addr, end;
MIPS: Handle initmem in systems with kernel not in add_memory_region() mem This patch addresses a couple of related problems: 1) The kernel may reside in physical memory outside of the ranges set by plat_mem_setup(). If this is the case, init mem cannot be reused as it resides outside of the range of pages that the kernel memory allocators control. 2) initrd images might be loaded in physical memory outside of the ranges set by plat_mem_setup(). The memory likewise cannot be reused. The patch doesn't handle this specific case, but the infrastructure is useful for future patches that do. The crux of the problem is that there are memory regions that need be memory_present(), but that cannot be free_bootmem() at the time of arch_mem_init(). We create a new type of memory (BOOT_MEM_INIT_RAM) for use with add_memory_region(). Then arch_mem_init() adds the init mem with this type if the init mem is not already covered by existing ranges. When memory is being freed into the bootmem allocator, we skip the BOOT_MEM_INIT_RAM ranges so they are not clobbered, but we do signal them as memory_present(). This way when they are later freed, the necessary memory manager structures have initialized and the Sparse allocater is prevented from crashing. The Octeon specific code that handled this case is removed, because the new general purpose code handles the case. Signed-off-by: David Daney <ddaney@caviumnetworks.com> To: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/1988/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2011-11-22 07:38:03 -07:00
switch (boot_mem_map.map[i].type) {
case BOOT_MEM_RAM:
case BOOT_MEM_INIT_RAM:
break;
default:
/* not usable memory */
continue;
MIPS: Handle initmem in systems with kernel not in add_memory_region() mem This patch addresses a couple of related problems: 1) The kernel may reside in physical memory outside of the ranges set by plat_mem_setup(). If this is the case, init mem cannot be reused as it resides outside of the range of pages that the kernel memory allocators control. 2) initrd images might be loaded in physical memory outside of the ranges set by plat_mem_setup(). The memory likewise cannot be reused. The patch doesn't handle this specific case, but the infrastructure is useful for future patches that do. The crux of the problem is that there are memory regions that need be memory_present(), but that cannot be free_bootmem() at the time of arch_mem_init(). We create a new type of memory (BOOT_MEM_INIT_RAM) for use with add_memory_region(). Then arch_mem_init() adds the init mem with this type if the init mem is not already covered by existing ranges. When memory is being freed into the bootmem allocator, we skip the BOOT_MEM_INIT_RAM ranges so they are not clobbered, but we do signal them as memory_present(). This way when they are later freed, the necessary memory manager structures have initialized and the Sparse allocater is prevented from crashing. The Octeon specific code that handled this case is removed, because the new general purpose code handles the case. Signed-off-by: David Daney <ddaney@caviumnetworks.com> To: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/1988/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2011-11-22 07:38:03 -07:00
}
addr = PFN_UP(boot_mem_map.map[i].addr);
end = PFN_DOWN(boot_mem_map.map[i].addr +
boot_mem_map.map[i].size);
if (pagenr >= addr && pagenr < end)
return 1;
}
return 0;
}
void __init paging_init(void)
{
unsigned long max_zone_pfns[MAX_NR_ZONES];
pagetable_init();
#ifdef CONFIG_HIGHMEM
kmap_init();
#endif
#ifdef CONFIG_ZONE_DMA
max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
#endif
#ifdef CONFIG_ZONE_DMA32
max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
#endif
max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
#ifdef CONFIG_HIGHMEM
max_zone_pfns[ZONE_HIGHMEM] = highend_pfn;
if (cpu_has_dc_aliases && max_low_pfn != highend_pfn) {
printk(KERN_WARNING "This processor doesn't support highmem."
" %ldk highmem ignored\n",
(highend_pfn - max_low_pfn) << (PAGE_SHIFT - 10));
max_zone_pfns[ZONE_HIGHMEM] = max_low_pfn;
}
#endif
free_area_init_nodes(max_zone_pfns);
}
#ifdef CONFIG_64BIT
static struct kcore_list kcore_kseg0;
#endif
static inline void mem_init_free_highmem(void)
{
#ifdef CONFIG_HIGHMEM
unsigned long tmp;
MIPS: Fix detection of unsupported highmem with cache aliases The paging_init() function contains code which detects that highmem is in use but unsupported due to dcache aliasing. However this code was ineffective because it was being run before the caches are probed, meaning that cpu_has_dc_aliases would always evaluate to false (unless a platform overrides it to a compile-time constant) and the detection of the unsupported case is never triggered. The kernel would then go on to attempt to use highmem & either hit coherency issues or trigger the BUG_ON in flush_kernel_dcache_page(). Fix this by running paging_init() later than cpu_cache_init(), such that the cpu_has_dc_aliases macro will evaluate correctly & the unsupported highmem case will be detected successfully. This then leads to a formerly hidden issue in that mem_init_free_highmem() will attempt to free all highmem pages, even though we're avoiding use of them & don't have valid page structs for them. This leads to an invalid pointer dereference & a TLB exception. Avoid this by skipping the loop in mem_init_free_highmem() if cpu_has_dc_aliases evaluates true. Signed-off-by: Paul Burton <paul.burton@imgtec.com> Cc: Rabin Vincent <rabinv@axis.com> Cc: Matt Redfearn <matt.redfearn@imgtec.com> Cc: Jerome Marchand <jmarchan@redhat.com> Cc: Alexander Sverdlin <alexander.sverdlin@gmail.com> Cc: Aurelien Jarno <aurelien@aurel32.net> Cc: Jaedon Shin <jaedon.shin@gmail.com> Cc: Toshi Kani <toshi.kani@hpe.com> Cc: James Hogan <james.hogan@imgtec.com> Cc: Sergey Ryazanov <ryazanov.s.a@gmail.com> Cc: Jonas Gorski <jogo@openwrt.org> Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: linux-mips@linux-mips.org Cc: linux-kernel@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/14184/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2016-09-02 08:17:31 -06:00
if (cpu_has_dc_aliases)
return;
for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) {
struct page *page = pfn_to_page(tmp);
if (!page_is_ram(tmp))
SetPageReserved(page);
else
free_highmem_page(page);
}
#endif
}
void __init mem_init(void)
{
#ifdef CONFIG_HIGHMEM
#ifdef CONFIG_DISCONTIGMEM
#error "CONFIG_HIGHMEM and CONFIG_DISCONTIGMEM dont work together yet"
#endif
max_mapnr = highend_pfn ? highend_pfn : max_low_pfn;
#else
max_mapnr = max_low_pfn;
#endif
high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
maar_init();
memblock: rename free_all_bootmem to memblock_free_all The conversion is done using sed -i 's@free_all_bootmem@memblock_free_all@' \ $(git grep -l free_all_bootmem) Link: http://lkml.kernel.org/r/1536927045-23536-26-git-send-email-rppt@linux.vnet.ibm.com Signed-off-by: Mike Rapoport <rppt@linux.vnet.ibm.com> Acked-by: Michal Hocko <mhocko@suse.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Chris Zankel <chris@zankel.net> Cc: "David S. Miller" <davem@davemloft.net> Cc: Geert Uytterhoeven <geert@linux-m68k.org> Cc: Greentime Hu <green.hu@gmail.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Guan Xuetao <gxt@pku.edu.cn> Cc: Ingo Molnar <mingo@redhat.com> Cc: "James E.J. Bottomley" <jejb@parisc-linux.org> Cc: Jonas Bonn <jonas@southpole.se> Cc: Jonathan Corbet <corbet@lwn.net> Cc: Ley Foon Tan <lftan@altera.com> Cc: Mark Salter <msalter@redhat.com> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Matt Turner <mattst88@gmail.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Michal Simek <monstr@monstr.eu> Cc: Palmer Dabbelt <palmer@sifive.com> Cc: Paul Burton <paul.burton@mips.com> Cc: Richard Kuo <rkuo@codeaurora.org> Cc: Richard Weinberger <richard@nod.at> Cc: Rich Felker <dalias@libc.org> Cc: Russell King <linux@armlinux.org.uk> Cc: Serge Semin <fancer.lancer@gmail.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Tony Luck <tony.luck@intel.com> Cc: Vineet Gupta <vgupta@synopsys.com> Cc: Yoshinori Sato <ysato@users.sourceforge.jp> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2018-10-30 16:09:30 -06:00
memblock_free_all();
setup_zero_pages(); /* Setup zeroed pages. */
mem_init_free_highmem();
mem_init_print_info(NULL);
#ifdef CONFIG_64BIT
if ((unsigned long) &_text > (unsigned long) CKSEG0)
/* The -4 is a hack so that user tools don't have to handle
the overflow. */
kclist_add(&kcore_kseg0, (void *) CKSEG0,
0x80000000 - 4, KCORE_TEXT);
#endif
}
#endif /* !CONFIG_NEED_MULTIPLE_NODES */
void free_init_pages(const char *what, unsigned long begin, unsigned long end)
{
unsigned long pfn;
for (pfn = PFN_UP(begin); pfn < PFN_DOWN(end); pfn++) {
struct page *page = pfn_to_page(pfn);
void *addr = phys_to_virt(PFN_PHYS(pfn));
memset(addr, POISON_FREE_INITMEM, PAGE_SIZE);
free_reserved_page(page);
}
printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
}
#ifdef CONFIG_BLK_DEV_INITRD
void free_initrd_mem(unsigned long start, unsigned long end)
{
mm: change signature of free_reserved_area() to fix building warnings Change signature of free_reserved_area() according to Russell King's suggestion to fix following build warnings: arch/arm/mm/init.c: In function 'mem_init': arch/arm/mm/init.c:603:2: warning: passing argument 1 of 'free_reserved_area' makes integer from pointer without a cast [enabled by default] free_reserved_area(__va(PHYS_PFN_OFFSET), swapper_pg_dir, 0, NULL); ^ In file included from include/linux/mman.h:4:0, from arch/arm/mm/init.c:15: include/linux/mm.h:1301:22: note: expected 'long unsigned int' but argument is of type 'void *' extern unsigned long free_reserved_area(unsigned long start, unsigned long end, mm/page_alloc.c: In function 'free_reserved_area': >> mm/page_alloc.c:5134:3: warning: passing argument 1 of 'virt_to_phys' makes pointer from integer without a cast [enabled by default] In file included from arch/mips/include/asm/page.h:49:0, from include/linux/mmzone.h:20, from include/linux/gfp.h:4, from include/linux/mm.h:8, from mm/page_alloc.c:18: arch/mips/include/asm/io.h:119:29: note: expected 'const volatile void *' but argument is of type 'long unsigned int' mm/page_alloc.c: In function 'free_area_init_nodes': mm/page_alloc.c:5030:34: warning: array subscript is below array bounds [-Warray-bounds] Also address some minor code review comments. Signed-off-by: Jiang Liu <jiang.liu@huawei.com> Reported-by: Arnd Bergmann <arnd@arndb.de> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: "Michael S. Tsirkin" <mst@redhat.com> Cc: <sworddragon2@aol.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Chris Metcalf <cmetcalf@tilera.com> Cc: David Howells <dhowells@redhat.com> Cc: Geert Uytterhoeven <geert@linux-m68k.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Jeremy Fitzhardinge <jeremy@goop.org> Cc: Jianguo Wu <wujianguo@huawei.com> Cc: Joonsoo Kim <js1304@gmail.com> Cc: Kamezawa Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Cc: Marek Szyprowski <m.szyprowski@samsung.com> Cc: Mel Gorman <mel@csn.ul.ie> Cc: Michel Lespinasse <walken@google.com> Cc: Minchan Kim <minchan@kernel.org> Cc: Rik van Riel <riel@redhat.com> Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: Tang Chen <tangchen@cn.fujitsu.com> Cc: Tejun Heo <tj@kernel.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Wen Congyang <wency@cn.fujitsu.com> Cc: Will Deacon <will.deacon@arm.com> Cc: Yasuaki Ishimatsu <isimatu.yasuaki@jp.fujitsu.com> Cc: Yinghai Lu <yinghai@kernel.org> Cc: Russell King <rmk@arm.linux.org.uk> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-07-03 16:02:48 -06:00
free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM,
"initrd");
}
#endif
void (*free_init_pages_eva)(void *begin, void *end) = NULL;
void __ref free_initmem(void)
{
prom_free_prom_memory();
/*
* Let the platform define a specific function to free the
* init section since EVA may have used any possible mapping
* between virtual and physical addresses.
*/
if (free_init_pages_eva)
free_init_pages_eva((void *)&__init_begin, (void *)&__init_end);
else
free_initmem_default(POISON_FREE_INITMEM);
}
#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
unsigned long pgd_current[NR_CPUS];
#endif
/*
* Align swapper_pg_dir in to 64K, allows its address to be loaded
* with a single LUI instruction in the TLB handlers. If we used
* __aligned(64K), its size would get rounded up to the alignment
* size, and waste space. So we place it in its own section and align
* it in the linker script.
*/
pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(.bss..swapper_pg_dir);
#ifndef __PAGETABLE_PUD_FOLDED
pud_t invalid_pud_table[PTRS_PER_PUD] __page_aligned_bss;
#endif
#ifndef __PAGETABLE_PMD_FOLDED
pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned_bss;
EXPORT_SYMBOL_GPL(invalid_pmd_table);
#endif
pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss;
EXPORT_SYMBOL(invalid_pte_table);