From be0c37c985eddc46d0d67543898c086f60460e2e Mon Sep 17 00:00:00 2001 From: "Steven J. Hill" Date: Thu, 26 Feb 2015 18:16:37 -0600 Subject: [PATCH 01/33] MIPS: Rearrange PTE bits into fixed positions. This patch rearranges the PTE bits into fixed positions for R2 and later cores. In the past, the TLB handling code did runtime checking of RI/XI and adjusted the shifts and rotates in order to fit the largest PFN value into the PTE. The checking now occurs when building the TLB handler, thus eliminating those checks. These new arrangements also define the largest possible PFN value that can fit in the PTE. HUGE page support is only available for 64-bit cores. Layouts of the PTE bits are now: 64-bit, R1 or earlier: CCC D V G [S H] M A W R P 32-bit, R1 or earler: CCC D V G M A W R P 64-bit, R2 or later: CCC D V G RI/R XI [S H] M A W P 32-bit, R2 or later: CCC D V G RI/R XI M A W P [ralf@linux-mips.org: Fix another build error *rant* *rant*] Signed-off-by: Steven J. Hill Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/9353/ Signed-off-by: Ralf Baechle --- arch/mips/include/asm/pgtable-bits.h | 83 ++++++++++++++++++---------- arch/mips/include/asm/pgtable.h | 38 ++++++------- arch/mips/mm/tlbex.c | 4 +- 3 files changed, 76 insertions(+), 49 deletions(-) diff --git a/arch/mips/include/asm/pgtable-bits.h b/arch/mips/include/asm/pgtable-bits.h index 91747c282bb3..8e432a8ec4fe 100644 --- a/arch/mips/include/asm/pgtable-bits.h +++ b/arch/mips/include/asm/pgtable-bits.h @@ -95,11 +95,7 @@ #else /* - * When using the RI/XI bit support, we have 13 bits of flags below - * the physical address. The RI/XI bits are placed such that a SRL 5 - * can strip off the software bits, then a ROTR 2 can move the RI/XI - * into bits [63:62]. This also limits physical address to 56 bits, - * which is more than we need right now. + * Below are the "Normal" R4K cases */ /* @@ -107,38 +103,59 @@ */ #define _PAGE_PRESENT_SHIFT 0 #define _PAGE_PRESENT (1 << _PAGE_PRESENT_SHIFT) -#define _PAGE_READ_SHIFT (cpu_has_rixi ? _PAGE_PRESENT_SHIFT : _PAGE_PRESENT_SHIFT + 1) -#define _PAGE_READ ({BUG_ON(cpu_has_rixi); 1 << _PAGE_READ_SHIFT; }) +/* R2 or later cores check for RI/XI support to determine _PAGE_READ */ +#ifdef CONFIG_CPU_MIPSR2 +#define _PAGE_WRITE_SHIFT (_PAGE_PRESENT_SHIFT + 1) +#define _PAGE_WRITE (1 << _PAGE_WRITE_SHIFT) +#else +#define _PAGE_READ_SHIFT (_PAGE_PRESENT_SHIFT + 1) +#define _PAGE_READ (1 << _PAGE_READ_SHIFT) #define _PAGE_WRITE_SHIFT (_PAGE_READ_SHIFT + 1) #define _PAGE_WRITE (1 << _PAGE_WRITE_SHIFT) +#endif #define _PAGE_ACCESSED_SHIFT (_PAGE_WRITE_SHIFT + 1) #define _PAGE_ACCESSED (1 << _PAGE_ACCESSED_SHIFT) #define _PAGE_MODIFIED_SHIFT (_PAGE_ACCESSED_SHIFT + 1) #define _PAGE_MODIFIED (1 << _PAGE_MODIFIED_SHIFT) -#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT -/* huge tlb page */ +#if defined(CONFIG_64BIT) && defined(CONFIG_MIPS_HUGE_TLB_SUPPORT) +/* Huge TLB page */ #define _PAGE_HUGE_SHIFT (_PAGE_MODIFIED_SHIFT + 1) #define _PAGE_HUGE (1 << _PAGE_HUGE_SHIFT) #define _PAGE_SPLITTING_SHIFT (_PAGE_HUGE_SHIFT + 1) #define _PAGE_SPLITTING (1 << _PAGE_SPLITTING_SHIFT) + +/* Only R2 or newer cores have the XI bit */ +#ifdef CONFIG_CPU_MIPSR2 +#define _PAGE_NO_EXEC_SHIFT (_PAGE_SPLITTING_SHIFT + 1) #else -#define _PAGE_HUGE_SHIFT (_PAGE_MODIFIED_SHIFT) -#define _PAGE_HUGE ({BUG(); 1; }) /* Dummy value */ -#define _PAGE_SPLITTING_SHIFT (_PAGE_HUGE_SHIFT) -#define _PAGE_SPLITTING ({BUG(); 1; }) /* Dummy value */ +#define _PAGE_GLOBAL_SHIFT (_PAGE_SPLITTING_SHIFT + 1) +#define _PAGE_GLOBAL (1 << _PAGE_GLOBAL_SHIFT) +#endif /* CONFIG_CPU_MIPSR2 */ + +#endif /* CONFIG_64BIT && CONFIG_MIPS_HUGE_TLB_SUPPORT */ + +#ifdef CONFIG_CPU_MIPSR2 +/* XI - page cannot be executed */ +#ifndef _PAGE_NO_EXEC_SHIFT +#define _PAGE_NO_EXEC_SHIFT (_PAGE_MODIFIED_SHIFT + 1) #endif +#define _PAGE_NO_EXEC (cpu_has_rixi ? (1 << _PAGE_NO_EXEC_SHIFT) : 0) -/* Page cannot be executed */ -#define _PAGE_NO_EXEC_SHIFT (cpu_has_rixi ? _PAGE_SPLITTING_SHIFT + 1 : _PAGE_SPLITTING_SHIFT) -#define _PAGE_NO_EXEC ({BUG_ON(!cpu_has_rixi); 1 << _PAGE_NO_EXEC_SHIFT; }) - -/* Page cannot be read */ -#define _PAGE_NO_READ_SHIFT (cpu_has_rixi ? _PAGE_NO_EXEC_SHIFT + 1 : _PAGE_NO_EXEC_SHIFT) -#define _PAGE_NO_READ ({BUG_ON(!cpu_has_rixi); 1 << _PAGE_NO_READ_SHIFT; }) +/* RI - page cannot be read */ +#define _PAGE_READ_SHIFT (_PAGE_NO_EXEC_SHIFT + 1) +#define _PAGE_READ (cpu_has_rixi ? 0 : (1 << _PAGE_READ_SHIFT)) +#define _PAGE_NO_READ_SHIFT _PAGE_READ_SHIFT +#define _PAGE_NO_READ (cpu_has_rixi ? (1 << _PAGE_READ_SHIFT) : 0) #define _PAGE_GLOBAL_SHIFT (_PAGE_NO_READ_SHIFT + 1) #define _PAGE_GLOBAL (1 << _PAGE_GLOBAL_SHIFT) + +#else /* !CONFIG_CPU_MIPSR2 */ +#define _PAGE_GLOBAL_SHIFT (_PAGE_MODIFIED_SHIFT + 1) +#define _PAGE_GLOBAL (1 << _PAGE_GLOBAL_SHIFT) +#endif /* CONFIG_CPU_MIPSR2 */ + #define _PAGE_VALID_SHIFT (_PAGE_GLOBAL_SHIFT + 1) #define _PAGE_VALID (1 << _PAGE_VALID_SHIFT) #define _PAGE_DIRTY_SHIFT (_PAGE_VALID_SHIFT + 1) @@ -150,18 +167,26 @@ #endif /* defined(CONFIG_PHYS_ADDR_T_64BIT && defined(CONFIG_CPU_MIPS32) */ +#ifndef _PAGE_NO_EXEC +#define _PAGE_NO_EXEC 0 +#endif +#ifndef _PAGE_NO_READ +#define _PAGE_NO_READ 0 +#endif + #define _PAGE_SILENT_READ _PAGE_VALID #define _PAGE_SILENT_WRITE _PAGE_DIRTY #define _PFN_MASK (~((1 << (_PFN_SHIFT)) - 1)) -#ifndef _PAGE_NO_READ -#define _PAGE_NO_READ ({BUG(); 0; }) -#define _PAGE_NO_READ_SHIFT ({BUG(); 0; }) -#endif -#ifndef _PAGE_NO_EXEC -#define _PAGE_NO_EXEC ({BUG(); 0; }) -#endif +/* + * The final layouts of the PTE bits are: + * + * 64-bit, R1 or earlier: CCC D V G [S H] M A W R P + * 32-bit, R1 or earler: CCC D V G M A W R P + * 64-bit, R2 or later: CCC D V G RI/R XI [S H] M A W P + * 32-bit, R2 or later: CCC D V G RI/R XI M A W P + */ #ifndef __ASSEMBLY__ @@ -171,6 +196,7 @@ */ static inline uint64_t pte_to_entrylo(unsigned long pte_val) { +#ifdef CONFIG_CPU_MIPSR2 if (cpu_has_rixi) { int sa; #ifdef CONFIG_32BIT @@ -186,6 +212,7 @@ static inline uint64_t pte_to_entrylo(unsigned long pte_val) return (pte_val >> _PAGE_GLOBAL_SHIFT) | ((pte_val & (_PAGE_NO_EXEC | _PAGE_NO_READ)) << sa); } +#endif return pte_val >> _PAGE_GLOBAL_SHIFT; } @@ -245,7 +272,7 @@ static inline uint64_t pte_to_entrylo(unsigned long pte_val) #define _CACHE_UNCACHED_ACCELERATED (7<<_CACHE_SHIFT) #endif -#define __READABLE (_PAGE_SILENT_READ | _PAGE_ACCESSED | (cpu_has_rixi ? 0 : _PAGE_READ)) +#define __READABLE (_PAGE_SILENT_READ | _PAGE_READ | _PAGE_ACCESSED) #define __WRITEABLE (_PAGE_SILENT_WRITE | _PAGE_WRITE | _PAGE_MODIFIED) #define _PAGE_CHG_MASK (_PAGE_ACCESSED | _PAGE_MODIFIED | \ diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h index bef782c4a44b..e1fec0237ce2 100644 --- a/arch/mips/include/asm/pgtable.h +++ b/arch/mips/include/asm/pgtable.h @@ -24,17 +24,17 @@ struct mm_struct; struct vm_area_struct; #define PAGE_NONE __pgprot(_PAGE_PRESENT | _CACHE_CACHABLE_NONCOHERENT) -#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_WRITE | (cpu_has_rixi ? 0 : _PAGE_READ) | \ +#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_WRITE | _PAGE_READ | \ _page_cachable_default) -#define PAGE_COPY __pgprot(_PAGE_PRESENT | (cpu_has_rixi ? 0 : _PAGE_READ) | \ - (cpu_has_rixi ? _PAGE_NO_EXEC : 0) | _page_cachable_default) -#define PAGE_READONLY __pgprot(_PAGE_PRESENT | (cpu_has_rixi ? 0 : _PAGE_READ) | \ +#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_NO_EXEC | \ + _page_cachable_default) +#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_READ | \ _page_cachable_default) #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \ _PAGE_GLOBAL | _page_cachable_default) #define PAGE_KERNEL_NC __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \ _PAGE_GLOBAL | _CACHE_CACHABLE_NONCOHERENT) -#define PAGE_USERIO __pgprot(_PAGE_PRESENT | (cpu_has_rixi ? 0 : _PAGE_READ) | _PAGE_WRITE | \ +#define PAGE_USERIO __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ _page_cachable_default) #define PAGE_KERNEL_UNCACHED __pgprot(_PAGE_PRESENT | __READABLE | \ __WRITEABLE | _PAGE_GLOBAL | _CACHE_UNCACHED) @@ -332,13 +332,13 @@ static inline pte_t pte_mkdirty(pte_t pte) static inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= _PAGE_ACCESSED; - if (cpu_has_rixi) { - if (!(pte_val(pte) & _PAGE_NO_READ)) - pte_val(pte) |= _PAGE_SILENT_READ; - } else { - if (pte_val(pte) & _PAGE_READ) - pte_val(pte) |= _PAGE_SILENT_READ; - } +#ifdef CONFIG_CPU_MIPSR2 + if (!(pte_val(pte) & _PAGE_NO_READ)) + pte_val(pte) |= _PAGE_SILENT_READ; + else +#endif + if (pte_val(pte) & _PAGE_READ) + pte_val(pte) |= _PAGE_SILENT_READ; return pte; } @@ -534,13 +534,13 @@ static inline pmd_t pmd_mkyoung(pmd_t pmd) { pmd_val(pmd) |= _PAGE_ACCESSED; - if (cpu_has_rixi) { - if (!(pmd_val(pmd) & _PAGE_NO_READ)) - pmd_val(pmd) |= _PAGE_SILENT_READ; - } else { - if (pmd_val(pmd) & _PAGE_READ) - pmd_val(pmd) |= _PAGE_SILENT_READ; - } +#ifdef CONFIG_CPU_MIPSR2 + if (!(pmd_val(pmd) & _PAGE_NO_READ)) + pmd_val(pmd) |= _PAGE_SILENT_READ; + else +#endif + if (pmd_val(pmd) & _PAGE_READ) + pmd_val(pmd) |= _PAGE_SILENT_READ; return pmd; } diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index d75ff73a2012..20d985901e44 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c @@ -231,14 +231,14 @@ static void output_pgtable_bits_defines(void) pr_define("_PAGE_HUGE_SHIFT %d\n", _PAGE_HUGE_SHIFT); pr_define("_PAGE_SPLITTING_SHIFT %d\n", _PAGE_SPLITTING_SHIFT); #endif +#ifdef CONFIG_CPU_MIPSR2 if (cpu_has_rixi) { #ifdef _PAGE_NO_EXEC_SHIFT pr_define("_PAGE_NO_EXEC_SHIFT %d\n", _PAGE_NO_EXEC_SHIFT); -#endif -#ifdef _PAGE_NO_READ_SHIFT pr_define("_PAGE_NO_READ_SHIFT %d\n", _PAGE_NO_READ_SHIFT); #endif } +#endif pr_define("_PAGE_GLOBAL_SHIFT %d\n", _PAGE_GLOBAL_SHIFT); pr_define("_PAGE_VALID_SHIFT %d\n", _PAGE_VALID_SHIFT); pr_define("_PAGE_DIRTY_SHIFT %d\n", _PAGE_DIRTY_SHIFT); From c5b367835cfc7a8ef53b9670a409ffcc95194344 Mon Sep 17 00:00:00 2001 From: "Steven J. Hill" Date: Thu, 26 Feb 2015 18:16:38 -0600 Subject: [PATCH 02/33] MIPS: Add support for XPA. Add support for extended physical addressing (XPA) so that 32-bit platforms can access equal to or greater than 40 bits of physical addresses. NOTE: 1) XPA and EVA are not the same and cannot be used simultaneously. 2) If you configure your kernel for XPA, the PTEs and all address sizes become 64-bit. 3) Your platform MUST have working HIGHMEM support. Signed-off-by: Steven J. Hill Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/9355/ Signed-off-by: Ralf Baechle --- arch/mips/Kconfig | 35 +++++++++++ arch/mips/include/asm/cpu-features.h | 3 + arch/mips/include/asm/cpu.h | 1 + arch/mips/include/asm/pgtable-32.h | 15 +++-- arch/mips/include/asm/pgtable-bits.h | 13 +++- arch/mips/include/asm/pgtable.h | 36 +++++------ arch/mips/kernel/cpu-probe.c | 4 ++ arch/mips/kernel/proc.c | 1 + arch/mips/mm/init.c | 7 ++- arch/mips/mm/tlb-r4k.c | 12 ++++ arch/mips/mm/tlbex.c | 90 ++++++++++++++++++++++++---- 11 files changed, 173 insertions(+), 44 deletions(-) diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index c7a16904cd03..69a3b0fab926 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -377,6 +377,7 @@ config MIPS_MALTA select SYS_HAS_CPU_MIPS32_R1 select SYS_HAS_CPU_MIPS32_R2 select SYS_HAS_CPU_MIPS32_R3_5 + select SYS_HAS_CPU_MIPS32_R5 select SYS_HAS_CPU_MIPS32_R6 select SYS_HAS_CPU_MIPS64_R1 select SYS_HAS_CPU_MIPS64_R2 @@ -386,6 +387,7 @@ config MIPS_MALTA select SYS_SUPPORTS_32BIT_KERNEL select SYS_SUPPORTS_64BIT_KERNEL select SYS_SUPPORTS_BIG_ENDIAN + select SYS_SUPPORTS_HIGHMEM select SYS_SUPPORTS_LITTLE_ENDIAN select SYS_SUPPORTS_MICROMIPS select SYS_SUPPORTS_MIPS_CMP @@ -1596,6 +1598,33 @@ config CPU_MIPS32_3_5_EVA One of its primary benefits is an increase in the maximum size of lowmem (up to 3GB). If unsure, say 'N' here. +config CPU_MIPS32_R5_FEATURES + bool "MIPS32 Release 5 Features" + depends on SYS_HAS_CPU_MIPS32_R5 + depends on CPU_MIPS32_R2 + help + Choose this option to build a kernel for release 2 or later of the + MIPS32 architecture including features from release 5 such as + support for Extended Physical Addressing (XPA). + +config CPU_MIPS32_R5_XPA + bool "Extended Physical Addressing (XPA)" + depends on CPU_MIPS32_R5_FEATURES + depends on !EVA + depends on !PAGE_SIZE_4KB + depends on SYS_SUPPORTS_HIGHMEM + select XPA + select HIGHMEM + select ARCH_PHYS_ADDR_T_64BIT + default n + help + Choose this option if you want to enable the Extended Physical + Addressing (XPA) on your MIPS32 core (such as P5600 series). The + benefit is to increase physical addressing equal to or greater + than 40 bits. Note that this has the side effect of turning on + 64-bit addressing which in turn makes the PTEs 64-bit in size. + If unsure, say 'N' here. + if CPU_LOONGSON2F config CPU_NOP_WORKAROUNDS bool @@ -1699,6 +1728,9 @@ config SYS_HAS_CPU_MIPS32_R2 config SYS_HAS_CPU_MIPS32_R3_5 bool +config SYS_HAS_CPU_MIPS32_R5 + bool + config SYS_HAS_CPU_MIPS32_R6 bool @@ -1836,6 +1868,9 @@ config CPU_MIPSR6 config EVA bool +config XPA + bool + config SYS_SUPPORTS_32BIT_KERNEL bool config SYS_SUPPORTS_64BIT_KERNEL diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h index 0d8208de9a3f..a324751b02ff 100644 --- a/arch/mips/include/asm/cpu-features.h +++ b/arch/mips/include/asm/cpu-features.h @@ -139,6 +139,9 @@ # endif #endif +#ifndef cpu_has_xpa +#define cpu_has_xpa (cpu_data[0].options & MIPS_CPU_XPA) +#endif #ifndef cpu_has_vtag_icache #define cpu_has_vtag_icache (cpu_data[0].icache.flags & MIPS_CACHE_VTAG) #endif diff --git a/arch/mips/include/asm/cpu.h b/arch/mips/include/asm/cpu.h index 15687234d70a..e492c740bb94 100644 --- a/arch/mips/include/asm/cpu.h +++ b/arch/mips/include/asm/cpu.h @@ -377,6 +377,7 @@ enum cpu_type_enum { #define MIPS_CPU_MAAR 0x400000000ull /* MAAR(I) registers are present */ #define MIPS_CPU_FRE 0x800000000ull /* FRE & UFE bits implemented */ #define MIPS_CPU_RW_LLB 0x1000000000ull /* LLADDR/LLB writes are allowed */ +#define MIPS_CPU_XPA 0x2000000000ull /* CPU supports Extended Physical Addressing */ /* * CPU ASE encodings diff --git a/arch/mips/include/asm/pgtable-32.h b/arch/mips/include/asm/pgtable-32.h index a6be006b6f75..7d56686c0e62 100644 --- a/arch/mips/include/asm/pgtable-32.h +++ b/arch/mips/include/asm/pgtable-32.h @@ -105,13 +105,16 @@ static inline void pmd_clear(pmd_t *pmdp) #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) #define pte_page(x) pfn_to_page(pte_pfn(x)) -#define pte_pfn(x) ((unsigned long)((x).pte_high >> 6)) +#define pte_pfn(x) (((unsigned long)((x).pte_high >> _PFN_SHIFT)) | (unsigned long)((x).pte_low << _PAGE_PRESENT_SHIFT)) static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot) { pte_t pte; - pte.pte_high = (pfn << 6) | (pgprot_val(prot) & 0x3f); - pte.pte_low = pgprot_val(prot); + + pte.pte_low = (pfn >> _PAGE_PRESENT_SHIFT) | + (pgprot_val(prot) & ~_PFNX_MASK); + pte.pte_high = (pfn << _PFN_SHIFT) | + (pgprot_val(prot) & ~_PFN_MASK); return pte; } @@ -166,9 +169,9 @@ pfn_pte(unsigned long pfn, pgprot_t prot) #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) /* Swap entries must have VALID and GLOBAL bits cleared. */ -#define __swp_type(x) (((x).val >> 2) & 0x1f) -#define __swp_offset(x) ((x).val >> 7) -#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << 2) | ((offset) << 7) }) +#define __swp_type(x) (((x).val >> 4) & 0x1f) +#define __swp_offset(x) ((x).val >> 9) +#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << 4) | ((offset) << 9) }) #define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_high }) #define __swp_entry_to_pte(x) ((pte_t) { 0, (x).val }) diff --git a/arch/mips/include/asm/pgtable-bits.h b/arch/mips/include/asm/pgtable-bits.h index 8e432a8ec4fe..18ae5ddef118 100644 --- a/arch/mips/include/asm/pgtable-bits.h +++ b/arch/mips/include/asm/pgtable-bits.h @@ -37,7 +37,11 @@ /* * The following bits are implemented by the TLB hardware */ -#define _PAGE_GLOBAL_SHIFT 0 +#define _PAGE_NO_EXEC_SHIFT 0 +#define _PAGE_NO_EXEC (1 << _PAGE_NO_EXEC_SHIFT) +#define _PAGE_NO_READ_SHIFT (_PAGE_NO_EXEC_SHIFT + 1) +#define _PAGE_NO_READ (1 << _PAGE_NO_READ_SHIFT) +#define _PAGE_GLOBAL_SHIFT (_PAGE_NO_READ_SHIFT + 1) #define _PAGE_GLOBAL (1 << _PAGE_GLOBAL_SHIFT) #define _PAGE_VALID_SHIFT (_PAGE_GLOBAL_SHIFT + 1) #define _PAGE_VALID (1 << _PAGE_VALID_SHIFT) @@ -49,7 +53,7 @@ /* * The following bits are implemented in software */ -#define _PAGE_PRESENT_SHIFT (_CACHE_SHIFT + 3) +#define _PAGE_PRESENT_SHIFT (24) #define _PAGE_PRESENT (1 << _PAGE_PRESENT_SHIFT) #define _PAGE_READ_SHIFT (_PAGE_PRESENT_SHIFT + 1) #define _PAGE_READ (1 << _PAGE_READ_SHIFT) @@ -62,6 +66,11 @@ #define _PFN_SHIFT (PAGE_SHIFT - 12 + _CACHE_SHIFT + 3) +/* + * Bits for extended EntryLo0/EntryLo1 registers + */ +#define _PFNX_MASK 0xffffff + #elif defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) /* diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h index e1fec0237ce2..bffd46ca3694 100644 --- a/arch/mips/include/asm/pgtable.h +++ b/arch/mips/include/asm/pgtable.h @@ -133,7 +133,7 @@ extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) -#define pte_none(pte) (!(((pte).pte_low | (pte).pte_high) & ~_PAGE_GLOBAL)) +#define pte_none(pte) (!(((pte).pte_high) & ~_PAGE_GLOBAL)) #define pte_present(pte) ((pte).pte_low & _PAGE_PRESENT) static inline void set_pte(pte_t *ptep, pte_t pte) @@ -142,16 +142,14 @@ static inline void set_pte(pte_t *ptep, pte_t pte) smp_wmb(); ptep->pte_low = pte.pte_low; - if (pte.pte_low & _PAGE_GLOBAL) { + if (pte.pte_high & _PAGE_GLOBAL) { pte_t *buddy = ptep_buddy(ptep); /* * Make sure the buddy is global too (if it's !none, * it better already be global) */ - if (pte_none(*buddy)) { - buddy->pte_low |= _PAGE_GLOBAL; + if (pte_none(*buddy)) buddy->pte_high |= _PAGE_GLOBAL; - } } } @@ -161,8 +159,8 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *pt htw_stop(); /* Preserve global status for the pair */ - if (ptep_buddy(ptep)->pte_low & _PAGE_GLOBAL) - null.pte_low = null.pte_high = _PAGE_GLOBAL; + if (ptep_buddy(ptep)->pte_high & _PAGE_GLOBAL) + null.pte_high = _PAGE_GLOBAL; set_pte_at(mm, addr, ptep, null); htw_start(); @@ -242,21 +240,21 @@ static inline int pte_young(pte_t pte) { return pte.pte_low & _PAGE_ACCESSED; } static inline pte_t pte_wrprotect(pte_t pte) { - pte.pte_low &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE); + pte.pte_low &= ~_PAGE_WRITE; pte.pte_high &= ~_PAGE_SILENT_WRITE; return pte; } static inline pte_t pte_mkclean(pte_t pte) { - pte.pte_low &= ~(_PAGE_MODIFIED | _PAGE_SILENT_WRITE); + pte.pte_low &= ~_PAGE_MODIFIED; pte.pte_high &= ~_PAGE_SILENT_WRITE; return pte; } static inline pte_t pte_mkold(pte_t pte) { - pte.pte_low &= ~(_PAGE_ACCESSED | _PAGE_SILENT_READ); + pte.pte_low &= ~_PAGE_ACCESSED; pte.pte_high &= ~_PAGE_SILENT_READ; return pte; } @@ -264,30 +262,24 @@ static inline pte_t pte_mkold(pte_t pte) static inline pte_t pte_mkwrite(pte_t pte) { pte.pte_low |= _PAGE_WRITE; - if (pte.pte_low & _PAGE_MODIFIED) { - pte.pte_low |= _PAGE_SILENT_WRITE; + if (pte.pte_low & _PAGE_MODIFIED) pte.pte_high |= _PAGE_SILENT_WRITE; - } return pte; } static inline pte_t pte_mkdirty(pte_t pte) { pte.pte_low |= _PAGE_MODIFIED; - if (pte.pte_low & _PAGE_WRITE) { - pte.pte_low |= _PAGE_SILENT_WRITE; + if (pte.pte_low & _PAGE_WRITE) pte.pte_high |= _PAGE_SILENT_WRITE; - } return pte; } static inline pte_t pte_mkyoung(pte_t pte) { pte.pte_low |= _PAGE_ACCESSED; - if (pte.pte_low & _PAGE_READ) { - pte.pte_low |= _PAGE_SILENT_READ; + if (pte.pte_low & _PAGE_READ) pte.pte_high |= _PAGE_SILENT_READ; - } return pte; } #else @@ -391,10 +383,10 @@ static inline pgprot_t pgprot_writecombine(pgprot_t _prot) #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) { - pte.pte_low &= _PAGE_CHG_MASK; + pte.pte_low &= (_PAGE_MODIFIED | _PAGE_ACCESSED | _PFNX_MASK); pte.pte_high &= (_PFN_MASK | _CACHE_MASK); - pte.pte_low |= pgprot_val(newprot); - pte.pte_high |= pgprot_val(newprot) & ~(_PFN_MASK | _CACHE_MASK); + pte.pte_low |= pgprot_val(newprot) & ~_PFNX_MASK; + pte.pte_high |= pgprot_val(newprot) & ~_PFN_MASK; return pte; } #else diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c index 48dfb9de853d..ac96817d1f99 100644 --- a/arch/mips/kernel/cpu-probe.c +++ b/arch/mips/kernel/cpu-probe.c @@ -516,6 +516,10 @@ static inline unsigned int decode_config5(struct cpuinfo_mips *c) c->options |= MIPS_CPU_MAAR; if (config5 & MIPS_CONF5_LLB) c->options |= MIPS_CPU_RW_LLB; +#ifdef CONFIG_XPA + if (config5 & MIPS_CONF5_MVH) + c->options |= MIPS_CPU_XPA; +#endif return config5 & MIPS_CONF_M; } diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c index 130af7d26a9c..298b2b773d12 100644 --- a/arch/mips/kernel/proc.c +++ b/arch/mips/kernel/proc.c @@ -120,6 +120,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) if (cpu_has_msa) seq_printf(m, "%s", " msa"); if (cpu_has_eva) seq_printf(m, "%s", " eva"); if (cpu_has_htw) seq_printf(m, "%s", " htw"); + if (cpu_has_xpa) seq_printf(m, "%s", " xpa"); seq_printf(m, "\n"); if (cpu_has_mmips) { diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c index 448cde372af0..faa5c9822ecc 100644 --- a/arch/mips/mm/init.c +++ b/arch/mips/mm/init.c @@ -96,7 +96,7 @@ static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot) vaddr = __fix_to_virt(FIX_CMAP_END - idx); pte = mk_pte(page, prot); #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) - entrylo = pte.pte_high; + entrylo = pte_to_entrylo(pte.pte_high); #else entrylo = pte_to_entrylo(pte_val(pte)); #endif @@ -106,6 +106,11 @@ static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot) write_c0_entryhi(vaddr & (PAGE_MASK << 1)); write_c0_entrylo0(entrylo); write_c0_entrylo1(entrylo); +#ifdef CONFIG_XPA + entrylo = (pte.pte_low & _PFNX_MASK); + writex_c0_entrylo0(entrylo); + writex_c0_entrylo1(entrylo); +#endif tlbidx = read_c0_wired(); write_c0_wired(tlbidx + 1); write_c0_index(tlbidx); diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c index b2afa49beab0..c2500f4cb1d1 100644 --- a/arch/mips/mm/tlb-r4k.c +++ b/arch/mips/mm/tlb-r4k.c @@ -333,9 +333,17 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte) ptep = pte_offset_map(pmdp, address); #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) +#ifdef CONFIG_XPA + write_c0_entrylo0(pte_to_entrylo(ptep->pte_high)); + writex_c0_entrylo0(ptep->pte_low & _PFNX_MASK); + ptep++; + write_c0_entrylo1(pte_to_entrylo(ptep->pte_high)); + writex_c0_entrylo1(ptep->pte_low & _PFNX_MASK); +#else write_c0_entrylo0(ptep->pte_high); ptep++; write_c0_entrylo1(ptep->pte_high); +#endif #else write_c0_entrylo0(pte_to_entrylo(pte_val(*ptep++))); write_c0_entrylo1(pte_to_entrylo(pte_val(*ptep))); @@ -355,6 +363,9 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte) void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1, unsigned long entryhi, unsigned long pagemask) { +#ifdef CONFIG_XPA + panic("Broken for XPA kernels"); +#else unsigned long flags; unsigned long wired; unsigned long old_pagemask; @@ -383,6 +394,7 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1, write_c0_pagemask(old_pagemask); local_flush_tlb_all(); local_irq_restore(flags); +#endif } #ifdef CONFIG_TRANSPARENT_HUGEPAGE diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index 20d985901e44..7709920e0cef 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c @@ -35,6 +35,17 @@ #include #include +static int __cpuinitdata mips_xpa_disabled; + +static int __init xpa_disable(char *s) +{ + mips_xpa_disabled = 1; + + return 1; +} + +__setup("noxpa", xpa_disable); + /* * TLB load/store/modify handlers. * @@ -1027,12 +1038,27 @@ static void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep) } else { int pte_off_even = sizeof(pte_t) / 2; int pte_off_odd = pte_off_even + sizeof(pte_t); +#ifdef CONFIG_XPA + const int scratch = 1; /* Our extra working register */ - /* The pte entries are pre-shifted */ - uasm_i_lw(p, tmp, pte_off_even, ptep); /* get even pte */ - UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */ - uasm_i_lw(p, ptep, pte_off_odd, ptep); /* get odd pte */ - UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */ + uasm_i_addu(p, scratch, 0, ptep); +#endif + uasm_i_lw(p, tmp, pte_off_even, ptep); /* even pte */ + uasm_i_lw(p, ptep, pte_off_odd, ptep); /* odd pte */ + UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); + UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL)); + UASM_i_MTC0(p, tmp, C0_ENTRYLO0); + UASM_i_MTC0(p, ptep, C0_ENTRYLO1); +#ifdef CONFIG_XPA + uasm_i_lw(p, tmp, 0, scratch); + uasm_i_lw(p, ptep, sizeof(pte_t), scratch); + uasm_i_lui(p, scratch, 0xff); + uasm_i_ori(p, scratch, scratch, 0xffff); + uasm_i_and(p, tmp, scratch, tmp); + uasm_i_and(p, ptep, scratch, ptep); + uasm_i_mthc0(p, tmp, C0_ENTRYLO0); + uasm_i_mthc0(p, ptep, C0_ENTRYLO1); +#endif } #else UASM_i_LW(p, tmp, 0, ptep); /* get even pte */ @@ -1533,8 +1559,14 @@ iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr, { #ifdef CONFIG_PHYS_ADDR_T_64BIT unsigned int hwmode = mode & (_PAGE_VALID | _PAGE_DIRTY); -#endif + if (!cpu_has_64bits) { + const int scratch = 1; /* Our extra working register */ + + uasm_i_lui(p, scratch, (mode >> 16)); + uasm_i_or(p, pte, pte, scratch); + } else +#endif uasm_i_ori(p, pte, pte, mode); #ifdef CONFIG_SMP # ifdef CONFIG_PHYS_ADDR_T_64BIT @@ -1598,15 +1630,17 @@ build_pte_present(u32 **p, struct uasm_reloc **r, uasm_il_bbit0(p, r, pte, ilog2(_PAGE_PRESENT), lid); uasm_i_nop(p); } else { - uasm_i_andi(p, t, pte, _PAGE_PRESENT); + uasm_i_srl(p, t, pte, _PAGE_PRESENT_SHIFT); + uasm_i_andi(p, t, t, 1); uasm_il_beqz(p, r, t, lid); if (pte == t) /* You lose the SMP race :-(*/ iPTE_LW(p, pte, ptr); } } else { - uasm_i_andi(p, t, pte, _PAGE_PRESENT | _PAGE_READ); - uasm_i_xori(p, t, t, _PAGE_PRESENT | _PAGE_READ); + uasm_i_srl(p, t, pte, _PAGE_PRESENT_SHIFT); + uasm_i_andi(p, t, t, 3); + uasm_i_xori(p, t, t, 3); uasm_il_bnez(p, r, t, lid); if (pte == t) /* You lose the SMP race :-(*/ @@ -1635,8 +1669,9 @@ build_pte_writable(u32 **p, struct uasm_reloc **r, { int t = scratch >= 0 ? scratch : pte; - uasm_i_andi(p, t, pte, _PAGE_PRESENT | _PAGE_WRITE); - uasm_i_xori(p, t, t, _PAGE_PRESENT | _PAGE_WRITE); + uasm_i_srl(p, t, pte, _PAGE_PRESENT_SHIFT); + uasm_i_andi(p, t, t, 5); + uasm_i_xori(p, t, t, 5); uasm_il_bnez(p, r, t, lid); if (pte == t) /* You lose the SMP race :-(*/ @@ -1672,7 +1707,8 @@ build_pte_modifiable(u32 **p, struct uasm_reloc **r, uasm_i_nop(p); } else { int t = scratch >= 0 ? scratch : pte; - uasm_i_andi(p, t, pte, _PAGE_WRITE); + uasm_i_srl(p, t, pte, _PAGE_WRITE_SHIFT); + uasm_i_andi(p, t, t, 1); uasm_il_beqz(p, r, t, lid); if (pte == t) /* You lose the SMP race :-(*/ @@ -2285,6 +2321,11 @@ static void config_htw_params(void) pwsize = ilog2(PTRS_PER_PGD) << MIPS_PWSIZE_GDW_SHIFT; pwsize |= ilog2(PTRS_PER_PTE) << MIPS_PWSIZE_PTW_SHIFT; + + /* If XPA has been enabled, PTEs are 64-bit in size. */ + if (read_c0_pagegrain() & PG_ELPA) + pwsize |= 1; + write_c0_pwsize(pwsize); /* Make sure everything is set before we enable the HTW */ @@ -2298,6 +2339,28 @@ static void config_htw_params(void) print_htw_config(); } +static void config_xpa_params(void) +{ +#ifdef CONFIG_XPA + unsigned int pagegrain; + + if (mips_xpa_disabled) { + pr_info("Extended Physical Addressing (XPA) disabled\n"); + return; + } + + pagegrain = read_c0_pagegrain(); + write_c0_pagegrain(pagegrain | PG_ELPA); + back_to_back_c0_hazard(); + pagegrain = read_c0_pagegrain(); + + if (pagegrain & PG_ELPA) + pr_info("Extended Physical Addressing (XPA) enabled\n"); + else + panic("Extended Physical Addressing (XPA) disabled"); +#endif +} + void build_tlb_refill_handler(void) { /* @@ -2362,8 +2425,9 @@ void build_tlb_refill_handler(void) } if (cpu_has_local_ebase) build_r4000_tlb_refill_handler(); + if (cpu_has_xpa) + config_xpa_params(); if (cpu_has_htw) config_htw_params(); - } } From 2ec459f2a77b808c1e5a3616c88b613d3f720c8d Mon Sep 17 00:00:00 2001 From: Nicolas Schichan Date: Thu, 12 Mar 2015 17:00:58 +0100 Subject: [PATCH 03/33] MIPS: BCM63xx: Move bcm63xx_gpio_init() to bcm63xx_register_devices(). When called from prom init code, bcm63xx_gpio_init() will fail as it will call gpiochip_add() which relies on a working kmalloc() to alloc the gpio_desc array and kmalloc is not useable yet at prom init time. Move bcm63xx_gpio_init() to bcm63xx_register_devices() (an arch_initcall) where kmalloc works. Fixes: 14e85c0e69d5 ("gpio: remove gpio_descs global array") Signed-off-by: Nicolas Schichan Cc: linux-mips@linux-mips.org Cc: linux-kernel@vger.kernel.org Cc: Alexandre Courbot Patchwork: https://patchwork.linux-mips.org/patch/9530/ Signed-off-by: Ralf Baechle --- arch/mips/bcm63xx/prom.c | 4 ---- arch/mips/bcm63xx/setup.c | 4 ++++ 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/mips/bcm63xx/prom.c b/arch/mips/bcm63xx/prom.c index e1f27d653f60..7019e2967009 100644 --- a/arch/mips/bcm63xx/prom.c +++ b/arch/mips/bcm63xx/prom.c @@ -17,7 +17,6 @@ #include #include #include -#include void __init prom_init(void) { @@ -53,9 +52,6 @@ void __init prom_init(void) reg &= ~mask; bcm_perf_writel(reg, PERF_CKCTL_REG); - /* register gpiochip */ - bcm63xx_gpio_init(); - /* do low level board init */ board_prom_init(); diff --git a/arch/mips/bcm63xx/setup.c b/arch/mips/bcm63xx/setup.c index 6660c7ddf87b..240fb4ffa55c 100644 --- a/arch/mips/bcm63xx/setup.c +++ b/arch/mips/bcm63xx/setup.c @@ -20,6 +20,7 @@ #include #include #include +#include void bcm63xx_machine_halt(void) { @@ -160,6 +161,9 @@ void __init plat_mem_setup(void) int __init bcm63xx_register_devices(void) { + /* register gpiochip */ + bcm63xx_gpio_init(); + return board_register_devices(); } From 0add9c2f1cff9f3f1f2eb7e9babefa872a9d14b9 Mon Sep 17 00:00:00 2001 From: Huacai Chen Date: Thu, 12 Mar 2015 11:51:06 +0800 Subject: [PATCH 04/33] MIPS: Loongson-3: Add IRQF_NO_SUSPEND to Cascade irqaction HPET irq is routed to i8259 and then to MIPS CPU irq (cascade). After commit a3e6c1eff5 (MIPS: IRQ: Fix disable_irq on CPU IRQs), if without IRQF_NO_SUSPEND in cascade_irqaction, HPET interrupts will lost during suspend. The result is machine cannot be waken up. Signed-off-by: Huacai Chen Cc: Cc: Steven J. Hill Cc: linux-mips@linux-mips.org Cc: Fuxin Zhang Cc: Zhangjin Wu Patchwork: https://patchwork.linux-mips.org/patch/9528/ Signed-off-by: Ralf Baechle --- arch/mips/loongson/loongson-3/irq.c | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/mips/loongson/loongson-3/irq.c b/arch/mips/loongson/loongson-3/irq.c index 21221edda7a9..0f75b6b3d218 100644 --- a/arch/mips/loongson/loongson-3/irq.c +++ b/arch/mips/loongson/loongson-3/irq.c @@ -44,6 +44,7 @@ void mach_irq_dispatch(unsigned int pending) static struct irqaction cascade_irqaction = { .handler = no_action, + .flags = IRQF_NO_SUSPEND, .name = "cascade", }; From a8667d706dfa394ef9fe5f9013dee92d40a096e8 Mon Sep 17 00:00:00 2001 From: Aaro Koskinen Date: Wed, 4 Mar 2015 23:08:49 +0200 Subject: [PATCH 05/33] MIPS: OCTEON: dma-octeon: fix OHCI USB config check CONFIG_USB_OCTEON_OHCI is deprecated and no longer needed to use OHCI on OCTEON II. Instead, CONFIG_USB_OHCI_HCD_PLATFORM should be used. Signed-off-by: Aaro Koskinen Cc: Aleksey Makarov Cc: David Daney Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/9421/ Signed-off-by: Ralf Baechle --- arch/mips/cavium-octeon/dma-octeon.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/mips/cavium-octeon/dma-octeon.c b/arch/mips/cavium-octeon/dma-octeon.c index 7d8987818ccf..d8960d46417b 100644 --- a/arch/mips/cavium-octeon/dma-octeon.c +++ b/arch/mips/cavium-octeon/dma-octeon.c @@ -306,7 +306,7 @@ void __init plat_swiotlb_setup(void) swiotlbsize = 64 * (1<<20); } #endif -#ifdef CONFIG_USB_OCTEON_OHCI +#ifdef CONFIG_USB_OHCI_HCD_PLATFORM /* OCTEON II ohci is only 32-bit. */ if (OCTEON_IS_OCTEON2() && max_addr >= 0x100000000ul) swiotlbsize = 64 * (1<<20); From 9a49899eb88803dcc0ef437f09912f9a7b7a66fd Mon Sep 17 00:00:00 2001 From: Chandrakala Chavva Date: Fri, 6 Mar 2015 14:02:21 +0300 Subject: [PATCH 06/33] MIPS: OCTEON: Use correct CSR to soft reset Also delete unused cvmx_reset_octeon() This fixes reboot for Octeon III boards Signed-off-by: Chandrakala Chavva Signed-off-by: Aleksey Makarov Cc: linux-mips@linux-mips.org Cc: linux-kernel@vger.kernel.org Cc: David Daney Patchwork: https://patchwork.linux-mips.org/patch/9471/ Signed-off-by: Ralf Baechle --- arch/mips/cavium-octeon/setup.c | 5 ++++- arch/mips/include/asm/octeon/cvmx.h | 8 -------- 2 files changed, 4 insertions(+), 9 deletions(-) diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c index a42110e7edbc..a7f40820e567 100644 --- a/arch/mips/cavium-octeon/setup.c +++ b/arch/mips/cavium-octeon/setup.c @@ -413,7 +413,10 @@ static void octeon_restart(char *command) mb(); while (1) - cvmx_write_csr(CVMX_CIU_SOFT_RST, 1); + if (OCTEON_IS_OCTEON3()) + cvmx_write_csr(CVMX_RST_SOFT_RST, 1); + else + cvmx_write_csr(CVMX_CIU_SOFT_RST, 1); } diff --git a/arch/mips/include/asm/octeon/cvmx.h b/arch/mips/include/asm/octeon/cvmx.h index 33db1c806b01..774bb45834cb 100644 --- a/arch/mips/include/asm/octeon/cvmx.h +++ b/arch/mips/include/asm/octeon/cvmx.h @@ -436,14 +436,6 @@ static inline uint64_t cvmx_get_cycle_global(void) /***************************************************************************/ -static inline void cvmx_reset_octeon(void) -{ - union cvmx_ciu_soft_rst ciu_soft_rst; - ciu_soft_rst.u64 = 0; - ciu_soft_rst.s.soft_rst = 1; - cvmx_write_csr(CVMX_CIU_SOFT_RST, ciu_soft_rst.u64); -} - /* Return the number of cores available in the chip */ static inline uint32_t cvmx_octeon_num_cores(void) { From 5b9593f3bccb9904f260f9ad7f184e1d2921bd1e Mon Sep 17 00:00:00 2001 From: Lars Persson Date: Thu, 26 Feb 2015 14:16:02 +0100 Subject: [PATCH 07/33] Revert "MIPS: Remove race window in page fault handling" Revert commit 2a4a8b1e5d9d ("MIPS: Remove race window in page fault handling") because it increased the number of flushed dcache pages and became a performance problem for some workloads. Signed-off-by: Lars Persson Cc: linux-mips@linux-mips.org Cc: paul.burton@imgtec.com Cc: linux-kernel@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/9345/ Signed-off-by: Ralf Baechle --- arch/mips/include/asm/pgtable.h | 9 +++++---- arch/mips/mm/cache.c | 27 ++++++++------------------- 2 files changed, 13 insertions(+), 23 deletions(-) diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h index bffd46ca3694..819af9d057a8 100644 --- a/arch/mips/include/asm/pgtable.h +++ b/arch/mips/include/asm/pgtable.h @@ -127,10 +127,6 @@ do { \ } \ } while(0) - -extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, - pte_t pteval); - #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) #define pte_none(pte) (!(((pte).pte_high) & ~_PAGE_GLOBAL)) @@ -152,6 +148,7 @@ static inline void set_pte(pte_t *ptep, pte_t pte) buddy->pte_high |= _PAGE_GLOBAL; } } +#define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval) static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { @@ -190,6 +187,7 @@ static inline void set_pte(pte_t *ptep, pte_t pteval) } #endif } +#define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval) static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { @@ -399,12 +397,15 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) extern void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte); +extern void __update_cache(struct vm_area_struct *vma, unsigned long address, + pte_t pte); static inline void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) { pte_t pte = *ptep; __update_tlb(vma, address, pte); + __update_cache(vma, address, pte); } static inline void update_mmu_cache_pmd(struct vm_area_struct *vma, diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c index 7e3ea7766822..f7b91d3a371d 100644 --- a/arch/mips/mm/cache.c +++ b/arch/mips/mm/cache.c @@ -119,36 +119,25 @@ void __flush_anon_page(struct page *page, unsigned long vmaddr) EXPORT_SYMBOL(__flush_anon_page); -static void mips_flush_dcache_from_pte(pte_t pteval, unsigned long address) +void __update_cache(struct vm_area_struct *vma, unsigned long address, + pte_t pte) { struct page *page; - unsigned long pfn = pte_pfn(pteval); + unsigned long pfn, addr; + int exec = (vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc; + pfn = pte_pfn(pte); if (unlikely(!pfn_valid(pfn))) return; - page = pfn_to_page(pfn); if (page_mapping(page) && Page_dcache_dirty(page)) { - unsigned long page_addr = (unsigned long) page_address(page); - - if (!cpu_has_ic_fills_f_dc || - pages_do_alias(page_addr, address & PAGE_MASK)) - flush_data_cache_page(page_addr); + addr = (unsigned long) page_address(page); + if (exec || pages_do_alias(addr, address & PAGE_MASK)) + flush_data_cache_page(addr); ClearPageDcacheDirty(page); } } -void set_pte_at(struct mm_struct *mm, unsigned long addr, - pte_t *ptep, pte_t pteval) -{ - if (cpu_has_dc_aliases || !cpu_has_ic_fills_f_dc) { - if (pte_present(pteval)) - mips_flush_dcache_from_pte(pteval, addr); - } - - set_pte(ptep, pteval); -} - unsigned long _page_cachable_default; EXPORT_SYMBOL(_page_cachable_default); From 4d46a67a3eb827ccf1125959936fd51ba318dabc Mon Sep 17 00:00:00 2001 From: Lars Persson Date: Thu, 26 Feb 2015 14:16:03 +0100 Subject: [PATCH 08/33] MIPS: Fix race condition in lazy cache flushing. The lazy cache flushing implemented in the MIPS kernel suffers from a race condition that is exposed by do_set_pte() in mm/memory.c. A pre-condition is a file-system that writes to the page from the CPU in its readpage method and then calls flush_dcache_page(). One example is ubifs. Another pre-condition is that the dcache flush is postponed in __flush_dcache_page(). Upon a page fault for an executable mapping not existing in the page-cache, the following will happen: 1. Write to the page 2. flush_dcache_page 3. flush_icache_page 4. set_pte_at 5. update_mmu_cache (commits the flush of a dcache-dirty page) Between steps 4 and 5 another thread can hit the same page and it will encounter a valid pte. Because the data still is in the L1 dcache the CPU will fetch stale data from L2 into the icache and execute garbage. This fix moves the commit of the cache flush to step 3 to close the race window. It also reduces the amount of flushes on non-executable mappings because we never enter __flush_dcache_page() for non-aliasing CPUs. Regressions can occur in drivers that mistakenly relies on the flush_dcache_page() in get_user_pages() for DMA operations. [ralf@linux-mips.org: Folded in patch 9346 to fix highmem issue.] Signed-off-by: Lars Persson Cc: linux-mips@linux-mips.org Cc: paul.burton@imgtec.com Cc: linux-kernel@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/9346/ Patchwork: https://patchwork.linux-mips.org/patch/9738/ Signed-off-by: Ralf Baechle --- arch/mips/include/asm/cacheflush.h | 38 ++++++++++++++++++------------ arch/mips/mm/cache.c | 12 ++++++++++ 2 files changed, 35 insertions(+), 15 deletions(-) diff --git a/arch/mips/include/asm/cacheflush.h b/arch/mips/include/asm/cacheflush.h index e08381a37f8b..723229f4cf27 100644 --- a/arch/mips/include/asm/cacheflush.h +++ b/arch/mips/include/asm/cacheflush.h @@ -29,6 +29,20 @@ * - flush_icache_all() flush the entire instruction cache * - flush_data_cache_page() flushes a page from the data cache */ + + /* + * This flag is used to indicate that the page pointed to by a pte + * is dirty and requires cleaning before returning it to the user. + */ +#define PG_dcache_dirty PG_arch_1 + +#define Page_dcache_dirty(page) \ + test_bit(PG_dcache_dirty, &(page)->flags) +#define SetPageDcacheDirty(page) \ + set_bit(PG_dcache_dirty, &(page)->flags) +#define ClearPageDcacheDirty(page) \ + clear_bit(PG_dcache_dirty, &(page)->flags) + extern void (*flush_cache_all)(void); extern void (*__flush_cache_all)(void); extern void (*flush_cache_mm)(struct mm_struct *mm); @@ -37,13 +51,15 @@ extern void (*flush_cache_range)(struct vm_area_struct *vma, unsigned long start, unsigned long end); extern void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page, unsigned long pfn); extern void __flush_dcache_page(struct page *page); +extern void __flush_icache_page(struct vm_area_struct *vma, struct page *page); #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 static inline void flush_dcache_page(struct page *page) { - if (cpu_has_dc_aliases || !cpu_has_ic_fills_f_dc) + if (cpu_has_dc_aliases) __flush_dcache_page(page); - + else if (!cpu_has_ic_fills_f_dc) + SetPageDcacheDirty(page); } #define flush_dcache_mmap_lock(mapping) do { } while (0) @@ -61,6 +77,11 @@ static inline void flush_anon_page(struct vm_area_struct *vma, static inline void flush_icache_page(struct vm_area_struct *vma, struct page *page) { + if (!cpu_has_ic_fills_f_dc && (vma->vm_flags & VM_EXEC) && + Page_dcache_dirty(page)) { + __flush_icache_page(vma, page); + ClearPageDcacheDirty(page); + } } extern void (*flush_icache_range)(unsigned long start, unsigned long end); @@ -95,19 +116,6 @@ extern void (*flush_icache_all)(void); extern void (*local_flush_data_cache_page)(void * addr); extern void (*flush_data_cache_page)(unsigned long addr); -/* - * This flag is used to indicate that the page pointed to by a pte - * is dirty and requires cleaning before returning it to the user. - */ -#define PG_dcache_dirty PG_arch_1 - -#define Page_dcache_dirty(page) \ - test_bit(PG_dcache_dirty, &(page)->flags) -#define SetPageDcacheDirty(page) \ - set_bit(PG_dcache_dirty, &(page)->flags) -#define ClearPageDcacheDirty(page) \ - clear_bit(PG_dcache_dirty, &(page)->flags) - /* Run kernel code uncached, useful for cache probing functions. */ unsigned long run_uncached(void *func); diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c index f7b91d3a371d..77d96db8253c 100644 --- a/arch/mips/mm/cache.c +++ b/arch/mips/mm/cache.c @@ -119,6 +119,18 @@ void __flush_anon_page(struct page *page, unsigned long vmaddr) EXPORT_SYMBOL(__flush_anon_page); +void __flush_icache_page(struct vm_area_struct *vma, struct page *page) +{ + unsigned long addr; + + if (PageHighMem(page)) + return; + + addr = (unsigned long) page_address(page); + flush_data_cache_page(addr); +} +EXPORT_SYMBOL_GPL(__flush_icache_page); + void __update_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) { From 73bf3c2a500b2db8ac966469591196bf55afb409 Mon Sep 17 00:00:00 2001 From: Alexander Sverdlin Date: Wed, 18 Mar 2015 14:05:21 +0100 Subject: [PATCH 09/33] MIPS: Octeon: Remove udelay() causing huge IRQ latency udelay() in PCI/PCIe read/write callbacks cause 30ms IRQ latency on Octeon platforms because these operations are called from PCI_OP_READ() and PCI_OP_WRITE() under raw_spin_lock_irqsave(). Signed-off-by: Alexander Sverdlin Cc: linux-mips@linux-mips.org Cc: David Daney Cc: Rob Herring Cc: Jiri Kosina Cc: Randy Dunlap Cc: Masanari Iida Cc: Bjorn Helgaas Cc: Mathias Patchwork: https://patchwork.linux-mips.org/patch/9576/ Signed-off-by: Ralf Baechle --- arch/mips/include/asm/octeon/pci-octeon.h | 3 --- arch/mips/pci/pci-octeon.c | 6 ------ arch/mips/pci/pcie-octeon.c | 8 -------- 3 files changed, 17 deletions(-) diff --git a/arch/mips/include/asm/octeon/pci-octeon.h b/arch/mips/include/asm/octeon/pci-octeon.h index 64ba56a02843..1884609741a8 100644 --- a/arch/mips/include/asm/octeon/pci-octeon.h +++ b/arch/mips/include/asm/octeon/pci-octeon.h @@ -11,9 +11,6 @@ #include -/* Some PCI cards require delays when accessing config space. */ -#define PCI_CONFIG_SPACE_DELAY 10000 - /* * The physical memory base mapped by BAR1. 256MB at the end of the * first 4GB. diff --git a/arch/mips/pci/pci-octeon.c b/arch/mips/pci/pci-octeon.c index a04af55d89f1..01c604a5ac36 100644 --- a/arch/mips/pci/pci-octeon.c +++ b/arch/mips/pci/pci-octeon.c @@ -271,9 +271,6 @@ static int octeon_read_config(struct pci_bus *bus, unsigned int devfn, pci_addr.s.func = devfn & 0x7; pci_addr.s.reg = reg; -#if PCI_CONFIG_SPACE_DELAY - udelay(PCI_CONFIG_SPACE_DELAY); -#endif switch (size) { case 4: *val = le32_to_cpu(cvmx_read64_uint32(pci_addr.u64)); @@ -308,9 +305,6 @@ static int octeon_write_config(struct pci_bus *bus, unsigned int devfn, pci_addr.s.func = devfn & 0x7; pci_addr.s.reg = reg; -#if PCI_CONFIG_SPACE_DELAY - udelay(PCI_CONFIG_SPACE_DELAY); -#endif switch (size) { case 4: cvmx_write64_uint32(pci_addr.u64, cpu_to_le32(val)); diff --git a/arch/mips/pci/pcie-octeon.c b/arch/mips/pci/pcie-octeon.c index 1bb0b2bf8d6e..99f3db4f0a9b 100644 --- a/arch/mips/pci/pcie-octeon.c +++ b/arch/mips/pci/pcie-octeon.c @@ -1762,14 +1762,6 @@ static int octeon_pcie_write_config(unsigned int pcie_port, struct pci_bus *bus, default: return PCIBIOS_FUNC_NOT_SUPPORTED; } -#if PCI_CONFIG_SPACE_DELAY - /* - * Delay on writes so that devices have time to come up. Some - * bridges need this to allow time for the secondary busses to - * work - */ - udelay(PCI_CONFIG_SPACE_DELAY); -#endif return PCIBIOS_SUCCESSFUL; } From b083518c52ab75a345d668ca7fa41e530df08d51 Mon Sep 17 00:00:00 2001 From: Aaro Koskinen Date: Sun, 22 Mar 2015 17:55:39 +0200 Subject: [PATCH 10/33] MIPS: OCTEON: fix PCI interrupt mapping for D-Link DSR-1000N Fix PCI interrupt mapping for DSR1000N. This will get the PCI slot interrupts working. The mapping is based on D-Link GPL tarball. Signed-off-by: Aaro Koskinen Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/9593/ Signed-off-by: Ralf Baechle --- arch/mips/pci/pci-octeon.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/mips/pci/pci-octeon.c b/arch/mips/pci/pci-octeon.c index 01c604a5ac36..c258cd406fbb 100644 --- a/arch/mips/pci/pci-octeon.c +++ b/arch/mips/pci/pci-octeon.c @@ -214,6 +214,8 @@ const char *octeon_get_pci_interrupts(void) return "AAABAAAAAAAAAAAAAAAAAAAAAAAAAAAA"; case CVMX_BOARD_TYPE_BBGW_REF: return "AABCD"; + case CVMX_BOARD_TYPE_CUST_DSR1000N: + return "CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC"; case CVMX_BOARD_TYPE_THUNDER: case CVMX_BOARD_TYPE_EBH3000: default: From 872cd4c2c617bb3a203ebe18115fd0c697112b87 Mon Sep 17 00:00:00 2001 From: Ganesan Ramalingam Date: Wed, 7 Jan 2015 16:58:26 +0530 Subject: [PATCH 11/33] MIPS: Netlogic: Fix for SATA PHY init Update to the SATA PHY initialization. This is needed for SATA detection to succeed in all configurations. Signed-off-by: Ganesan Ramalingam Signed-off-by: Jayachandran C Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/8886/ Signed-off-by: Ralf Baechle --- arch/mips/netlogic/xlp/ahci-init-xlp2.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/arch/mips/netlogic/xlp/ahci-init-xlp2.c b/arch/mips/netlogic/xlp/ahci-init-xlp2.c index c83dbf3689e2..7b066a44e679 100644 --- a/arch/mips/netlogic/xlp/ahci-init-xlp2.c +++ b/arch/mips/netlogic/xlp/ahci-init-xlp2.c @@ -203,6 +203,7 @@ static u8 read_phy_reg(u64 regbase, u32 addr, u32 physel) static void config_sata_phy(u64 regbase) { u32 port, i, reg; + u8 val; for (port = 0; port < 2; port++) { for (i = 0, reg = RXCDRCALFOSC0; reg <= CALDUTY; reg++, i++) @@ -210,6 +211,18 @@ static void config_sata_phy(u64 regbase) for (i = 0, reg = RXDPIF; reg <= PPMDRIFTMAX_HI; reg++, i++) write_phy_reg(regbase, reg, port, sata_phy_config2[i]); + + /* Fix for PHY link up failures at lower temperatures */ + write_phy_reg(regbase, 0x800F, port, 0x1f); + + val = read_phy_reg(regbase, 0x0029, port); + write_phy_reg(regbase, 0x0029, port, val | (0x7 << 1)); + + val = read_phy_reg(regbase, 0x0056, port); + write_phy_reg(regbase, 0x0056, port, val & ~(1 << 3)); + + val = read_phy_reg(regbase, 0x0018, port); + write_phy_reg(regbase, 0x0018, port, val & ~(0x7 << 0)); } } From 60cd7e08e453bc6828ac4b539f949e4acd80f143 Mon Sep 17 00:00:00 2001 From: Markos Chandras Date: Mon, 9 Mar 2015 14:54:49 +0000 Subject: [PATCH 12/33] MIPS: asm: asm-eva: Introduce kernel load/store variants Introduce new macros for kernel load/store variants which will be used to perform regular kernel space load/store operations in EVA mode. Signed-off-by: Markos Chandras Cc: # v3.15+ Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/9500/ Signed-off-by: Ralf Baechle --- arch/mips/include/asm/asm-eva.h | 137 ++++++++++++++++++++++---------- 1 file changed, 93 insertions(+), 44 deletions(-) diff --git a/arch/mips/include/asm/asm-eva.h b/arch/mips/include/asm/asm-eva.h index e41c56e375b1..1e38f0e1ea3e 100644 --- a/arch/mips/include/asm/asm-eva.h +++ b/arch/mips/include/asm/asm-eva.h @@ -11,6 +11,36 @@ #define __ASM_ASM_EVA_H #ifndef __ASSEMBLY__ + +/* Kernel variants */ + +#define kernel_cache(op, base) "cache " op ", " base "\n" +#define kernel_ll(reg, addr) "ll " reg ", " addr "\n" +#define kernel_sc(reg, addr) "sc " reg ", " addr "\n" +#define kernel_lw(reg, addr) "lw " reg ", " addr "\n" +#define kernel_lwl(reg, addr) "lwl " reg ", " addr "\n" +#define kernel_lwr(reg, addr) "lwr " reg ", " addr "\n" +#define kernel_lh(reg, addr) "lh " reg ", " addr "\n" +#define kernel_lb(reg, addr) "lb " reg ", " addr "\n" +#define kernel_lbu(reg, addr) "lbu " reg ", " addr "\n" +#define kernel_sw(reg, addr) "sw " reg ", " addr "\n" +#define kernel_swl(reg, addr) "swl " reg ", " addr "\n" +#define kernel_swr(reg, addr) "swr " reg ", " addr "\n" +#define kernel_sh(reg, addr) "sh " reg ", " addr "\n" +#define kernel_sb(reg, addr) "sb " reg ", " addr "\n" + +#ifdef CONFIG_32BIT +/* + * No 'sd' or 'ld' instructions in 32-bit but the code will + * do the correct thing + */ +#define kernel_sd(reg, addr) user_sw(reg, addr) +#define kernel_ld(reg, addr) user_lw(reg, addr) +#else +#define kernel_sd(reg, addr) "sd " reg", " addr "\n" +#define kernel_ld(reg, addr) "ld " reg", " addr "\n" +#endif /* CONFIG_32BIT */ + #ifdef CONFIG_EVA #define __BUILD_EVA_INSN(insn, reg, addr) \ @@ -41,37 +71,60 @@ #else -#define user_cache(op, base) "cache " op ", " base "\n" -#define user_ll(reg, addr) "ll " reg ", " addr "\n" -#define user_sc(reg, addr) "sc " reg ", " addr "\n" -#define user_lw(reg, addr) "lw " reg ", " addr "\n" -#define user_lwl(reg, addr) "lwl " reg ", " addr "\n" -#define user_lwr(reg, addr) "lwr " reg ", " addr "\n" -#define user_lh(reg, addr) "lh " reg ", " addr "\n" -#define user_lb(reg, addr) "lb " reg ", " addr "\n" -#define user_lbu(reg, addr) "lbu " reg ", " addr "\n" -#define user_sw(reg, addr) "sw " reg ", " addr "\n" -#define user_swl(reg, addr) "swl " reg ", " addr "\n" -#define user_swr(reg, addr) "swr " reg ", " addr "\n" -#define user_sh(reg, addr) "sh " reg ", " addr "\n" -#define user_sb(reg, addr) "sb " reg ", " addr "\n" +#define user_cache(op, base) kernel_cache(op, base) +#define user_ll(reg, addr) kernel_ll(reg, addr) +#define user_sc(reg, addr) kernel_sc(reg, addr) +#define user_lw(reg, addr) kernel_lw(reg, addr) +#define user_lwl(reg, addr) kernel_lwl(reg, addr) +#define user_lwr(reg, addr) kernel_lwr(reg, addr) +#define user_lh(reg, addr) kernel_lh(reg, addr) +#define user_lb(reg, addr) kernel_lb(reg, addr) +#define user_lbu(reg, addr) kernel_lbu(reg, addr) +#define user_sw(reg, addr) kernel_sw(reg, addr) +#define user_swl(reg, addr) kernel_swl(reg, addr) +#define user_swr(reg, addr) kernel_swr(reg, addr) +#define user_sh(reg, addr) kernel_sh(reg, addr) +#define user_sb(reg, addr) kernel_sb(reg, addr) + +#ifdef CONFIG_32BIT +#define user_sd(reg, addr) kernel_sw(reg, addr) +#define user_ld(reg, addr) kernel_lw(reg, addr) +#else +#define user_sd(reg, addr) kernel_sd(reg, addr) +#define user_ld(reg, addr) kernel_ld(reg, addr) +#endif /* CONFIG_32BIT */ + +#endif /* CONFIG_EVA */ + +#else /* __ASSEMBLY__ */ + +#define kernel_cache(op, base) cache op, base +#define kernel_ll(reg, addr) ll reg, addr +#define kernel_sc(reg, addr) sc reg, addr +#define kernel_lw(reg, addr) lw reg, addr +#define kernel_lwl(reg, addr) lwl reg, addr +#define kernel_lwr(reg, addr) lwr reg, addr +#define kernel_lh(reg, addr) lh reg, addr +#define kernel_lb(reg, addr) lb reg, addr +#define kernel_lbu(reg, addr) lbu reg, addr +#define kernel_sw(reg, addr) sw reg, addr +#define kernel_swl(reg, addr) swl reg, addr +#define kernel_swr(reg, addr) swr reg, addr +#define kernel_sh(reg, addr) sh reg, addr +#define kernel_sb(reg, addr) sb reg, addr #ifdef CONFIG_32BIT /* * No 'sd' or 'ld' instructions in 32-bit but the code will * do the correct thing */ -#define user_sd(reg, addr) user_sw(reg, addr) -#define user_ld(reg, addr) user_lw(reg, addr) +#define kernel_sd(reg, addr) user_sw(reg, addr) +#define kernel_ld(reg, addr) user_lw(reg, addr) #else -#define user_sd(reg, addr) "sd " reg", " addr "\n" -#define user_ld(reg, addr) "ld " reg", " addr "\n" +#define kernel_sd(reg, addr) sd reg, addr +#define kernel_ld(reg, addr) ld reg, addr #endif /* CONFIG_32BIT */ -#endif /* CONFIG_EVA */ - -#else /* __ASSEMBLY__ */ - #ifdef CONFIG_EVA #define __BUILD_EVA_INSN(insn, reg, addr) \ @@ -101,31 +154,27 @@ #define user_sd(reg, addr) user_sw(reg, addr) #else -#define user_cache(op, base) cache op, base -#define user_ll(reg, addr) ll reg, addr -#define user_sc(reg, addr) sc reg, addr -#define user_lw(reg, addr) lw reg, addr -#define user_lwl(reg, addr) lwl reg, addr -#define user_lwr(reg, addr) lwr reg, addr -#define user_lh(reg, addr) lh reg, addr -#define user_lb(reg, addr) lb reg, addr -#define user_lbu(reg, addr) lbu reg, addr -#define user_sw(reg, addr) sw reg, addr -#define user_swl(reg, addr) swl reg, addr -#define user_swr(reg, addr) swr reg, addr -#define user_sh(reg, addr) sh reg, addr -#define user_sb(reg, addr) sb reg, addr +#define user_cache(op, base) kernel_cache(op, base) +#define user_ll(reg, addr) kernel_ll(reg, addr) +#define user_sc(reg, addr) kernel_sc(reg, addr) +#define user_lw(reg, addr) kernel_lw(reg, addr) +#define user_lwl(reg, addr) kernel_lwl(reg, addr) +#define user_lwr(reg, addr) kernel_lwr(reg, addr) +#define user_lh(reg, addr) kernel_lh(reg, addr) +#define user_lb(reg, addr) kernel_lb(reg, addr) +#define user_lbu(reg, addr) kernel_lbu(reg, addr) +#define user_sw(reg, addr) kernel_sw(reg, addr) +#define user_swl(reg, addr) kernel_swl(reg, addr) +#define user_swr(reg, addr) kernel_swr(reg, addr) +#define user_sh(reg, addr) kernel_sh(reg, addr) +#define user_sb(reg, addr) kernel_sb(reg, addr) #ifdef CONFIG_32BIT -/* - * No 'sd' or 'ld' instructions in 32-bit but the code will - * do the correct thing - */ -#define user_sd(reg, addr) user_sw(reg, addr) -#define user_ld(reg, addr) user_lw(reg, addr) +#define user_sd(reg, addr) kernel_sw(reg, addr) +#define user_ld(reg, addr) kernel_lw(reg, addr) #else -#define user_sd(reg, addr) sd reg, addr -#define user_ld(reg, addr) ld reg, addr +#define user_sd(reg, addr) kernel_sd(reg, addr) +#define user_ld(reg, addr) kernel_sd(reg, addr) #endif /* CONFIG_32BIT */ #endif /* CONFIG_EVA */ From eeb538950367e3966cbf0237ab1a1dc30e059818 Mon Sep 17 00:00:00 2001 From: Markos Chandras Date: Mon, 9 Mar 2015 14:54:50 +0000 Subject: [PATCH 13/33] MIPS: unaligned: Prevent EVA instructions on kernel unaligned accesses Commit c1771216ab48 ("MIPS: kernel: unaligned: Handle unaligned accesses for EVA") allowed unaligned accesses to be emulated for EVA. However, when emulating regular load/store unaligned accesses, we need to use the appropriate "address space" instructions for that. Previously, an unaligned load/store instruction in kernel space would have used the corresponding EVA instructions to emulate it which led to segmentation faults because of the address translation that happens with EVA instructions. This is now fixed by using the EVA instruction only when emulating EVA unaligned accesses. Signed-off-by: Markos Chandras Fixes: c1771216ab48 ("MIPS: kernel: unaligned: Handle unaligned accesses for EVA") Cc: # v3.15+ Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/9501/ Signed-off-by: Ralf Baechle --- arch/mips/kernel/unaligned.c | 172 +++++++++++++++++++---------------- 1 file changed, 94 insertions(+), 78 deletions(-) diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c index bbb69695a0a1..7a5707eea898 100644 --- a/arch/mips/kernel/unaligned.c +++ b/arch/mips/kernel/unaligned.c @@ -109,10 +109,10 @@ static u32 unaligned_action; extern void show_registers(struct pt_regs *regs); #ifdef __BIG_ENDIAN -#define LoadHW(addr, value, res) \ +#define _LoadHW(addr, value, res, type) \ __asm__ __volatile__ (".set\tnoat\n" \ - "1:\t"user_lb("%0", "0(%2)")"\n" \ - "2:\t"user_lbu("$1", "1(%2)")"\n\t" \ + "1:\t"type##_lb("%0", "0(%2)")"\n" \ + "2:\t"type##_lbu("$1", "1(%2)")"\n\t"\ "sll\t%0, 0x8\n\t" \ "or\t%0, $1\n\t" \ "li\t%1, 0\n" \ @@ -130,10 +130,10 @@ extern void show_registers(struct pt_regs *regs); : "r" (addr), "i" (-EFAULT)); #ifndef CONFIG_CPU_MIPSR6 -#define LoadW(addr, value, res) \ +#define _LoadW(addr, value, res, type) \ __asm__ __volatile__ ( \ - "1:\t"user_lwl("%0", "(%2)")"\n" \ - "2:\t"user_lwr("%0", "3(%2)")"\n\t" \ + "1:\t"type##_lwl("%0", "(%2)")"\n" \ + "2:\t"type##_lwr("%0", "3(%2)")"\n\t"\ "li\t%1, 0\n" \ "3:\n\t" \ ".insn\n\t" \ @@ -149,18 +149,18 @@ extern void show_registers(struct pt_regs *regs); : "r" (addr), "i" (-EFAULT)); #else /* MIPSR6 has no lwl instruction */ -#define LoadW(addr, value, res) \ +#define _LoadW(addr, value, res, type) \ __asm__ __volatile__ ( \ ".set\tpush\n" \ ".set\tnoat\n\t" \ - "1:"user_lb("%0", "0(%2)")"\n\t" \ - "2:"user_lbu("$1", "1(%2)")"\n\t" \ + "1:"type##_lb("%0", "0(%2)")"\n\t" \ + "2:"type##_lbu("$1", "1(%2)")"\n\t" \ "sll\t%0, 0x8\n\t" \ "or\t%0, $1\n\t" \ - "3:"user_lbu("$1", "2(%2)")"\n\t" \ + "3:"type##_lbu("$1", "2(%2)")"\n\t" \ "sll\t%0, 0x8\n\t" \ "or\t%0, $1\n\t" \ - "4:"user_lbu("$1", "3(%2)")"\n\t" \ + "4:"type##_lbu("$1", "3(%2)")"\n\t" \ "sll\t%0, 0x8\n\t" \ "or\t%0, $1\n\t" \ "li\t%1, 0\n" \ @@ -181,11 +181,11 @@ extern void show_registers(struct pt_regs *regs); : "r" (addr), "i" (-EFAULT)); #endif /* CONFIG_CPU_MIPSR6 */ -#define LoadHWU(addr, value, res) \ +#define _LoadHWU(addr, value, res, type) \ __asm__ __volatile__ ( \ ".set\tnoat\n" \ - "1:\t"user_lbu("%0", "0(%2)")"\n" \ - "2:\t"user_lbu("$1", "1(%2)")"\n\t" \ + "1:\t"type##_lbu("%0", "0(%2)")"\n" \ + "2:\t"type##_lbu("$1", "1(%2)")"\n\t"\ "sll\t%0, 0x8\n\t" \ "or\t%0, $1\n\t" \ "li\t%1, 0\n" \ @@ -204,10 +204,10 @@ extern void show_registers(struct pt_regs *regs); : "r" (addr), "i" (-EFAULT)); #ifndef CONFIG_CPU_MIPSR6 -#define LoadWU(addr, value, res) \ +#define _LoadWU(addr, value, res, type) \ __asm__ __volatile__ ( \ - "1:\t"user_lwl("%0", "(%2)")"\n" \ - "2:\t"user_lwr("%0", "3(%2)")"\n\t" \ + "1:\t"type##_lwl("%0", "(%2)")"\n" \ + "2:\t"type##_lwr("%0", "3(%2)")"\n\t"\ "dsll\t%0, %0, 32\n\t" \ "dsrl\t%0, %0, 32\n\t" \ "li\t%1, 0\n" \ @@ -224,7 +224,7 @@ extern void show_registers(struct pt_regs *regs); : "=&r" (value), "=r" (res) \ : "r" (addr), "i" (-EFAULT)); -#define LoadDW(addr, value, res) \ +#define _LoadDW(addr, value, res) \ __asm__ __volatile__ ( \ "1:\tldl\t%0, (%2)\n" \ "2:\tldr\t%0, 7(%2)\n\t" \ @@ -243,18 +243,18 @@ extern void show_registers(struct pt_regs *regs); : "r" (addr), "i" (-EFAULT)); #else /* MIPSR6 has not lwl and ldl instructions */ -#define LoadWU(addr, value, res) \ +#define _LoadWU(addr, value, res, type) \ __asm__ __volatile__ ( \ ".set\tpush\n\t" \ ".set\tnoat\n\t" \ - "1:"user_lbu("%0", "0(%2)")"\n\t" \ - "2:"user_lbu("$1", "1(%2)")"\n\t" \ + "1:"type##_lbu("%0", "0(%2)")"\n\t" \ + "2:"type##_lbu("$1", "1(%2)")"\n\t" \ "sll\t%0, 0x8\n\t" \ "or\t%0, $1\n\t" \ - "3:"user_lbu("$1", "2(%2)")"\n\t" \ + "3:"type##_lbu("$1", "2(%2)")"\n\t" \ "sll\t%0, 0x8\n\t" \ "or\t%0, $1\n\t" \ - "4:"user_lbu("$1", "3(%2)")"\n\t" \ + "4:"type##_lbu("$1", "3(%2)")"\n\t" \ "sll\t%0, 0x8\n\t" \ "or\t%0, $1\n\t" \ "li\t%1, 0\n" \ @@ -274,7 +274,7 @@ extern void show_registers(struct pt_regs *regs); : "=&r" (value), "=r" (res) \ : "r" (addr), "i" (-EFAULT)); -#define LoadDW(addr, value, res) \ +#define _LoadDW(addr, value, res) \ __asm__ __volatile__ ( \ ".set\tpush\n\t" \ ".set\tnoat\n\t" \ @@ -323,12 +323,12 @@ extern void show_registers(struct pt_regs *regs); #endif /* CONFIG_CPU_MIPSR6 */ -#define StoreHW(addr, value, res) \ +#define _StoreHW(addr, value, res, type) \ __asm__ __volatile__ ( \ ".set\tnoat\n" \ - "1:\t"user_sb("%1", "1(%2)")"\n" \ + "1:\t"type##_sb("%1", "1(%2)")"\n" \ "srl\t$1, %1, 0x8\n" \ - "2:\t"user_sb("$1", "0(%2)")"\n" \ + "2:\t"type##_sb("$1", "0(%2)")"\n" \ ".set\tat\n\t" \ "li\t%0, 0\n" \ "3:\n\t" \ @@ -345,10 +345,10 @@ extern void show_registers(struct pt_regs *regs); : "r" (value), "r" (addr), "i" (-EFAULT)); #ifndef CONFIG_CPU_MIPSR6 -#define StoreW(addr, value, res) \ +#define _StoreW(addr, value, res, type) \ __asm__ __volatile__ ( \ - "1:\t"user_swl("%1", "(%2)")"\n" \ - "2:\t"user_swr("%1", "3(%2)")"\n\t" \ + "1:\t"type##_swl("%1", "(%2)")"\n" \ + "2:\t"type##_swr("%1", "3(%2)")"\n\t"\ "li\t%0, 0\n" \ "3:\n\t" \ ".insn\n\t" \ @@ -363,7 +363,7 @@ extern void show_registers(struct pt_regs *regs); : "=r" (res) \ : "r" (value), "r" (addr), "i" (-EFAULT)); -#define StoreDW(addr, value, res) \ +#define _StoreDW(addr, value, res) \ __asm__ __volatile__ ( \ "1:\tsdl\t%1,(%2)\n" \ "2:\tsdr\t%1, 7(%2)\n\t" \ @@ -382,17 +382,17 @@ extern void show_registers(struct pt_regs *regs); : "r" (value), "r" (addr), "i" (-EFAULT)); #else /* MIPSR6 has no swl and sdl instructions */ -#define StoreW(addr, value, res) \ +#define _StoreW(addr, value, res, type) \ __asm__ __volatile__ ( \ ".set\tpush\n\t" \ ".set\tnoat\n\t" \ - "1:"user_sb("%1", "3(%2)")"\n\t" \ + "1:"type##_sb("%1", "3(%2)")"\n\t" \ "srl\t$1, %1, 0x8\n\t" \ - "2:"user_sb("$1", "2(%2)")"\n\t" \ + "2:"type##_sb("$1", "2(%2)")"\n\t" \ "srl\t$1, $1, 0x8\n\t" \ - "3:"user_sb("$1", "1(%2)")"\n\t" \ + "3:"type##_sb("$1", "1(%2)")"\n\t" \ "srl\t$1, $1, 0x8\n\t" \ - "4:"user_sb("$1", "0(%2)")"\n\t" \ + "4:"type##_sb("$1", "0(%2)")"\n\t" \ ".set\tpop\n\t" \ "li\t%0, 0\n" \ "10:\n\t" \ @@ -456,10 +456,10 @@ extern void show_registers(struct pt_regs *regs); #else /* __BIG_ENDIAN */ -#define LoadHW(addr, value, res) \ +#define _LoadHW(addr, value, res, type) \ __asm__ __volatile__ (".set\tnoat\n" \ - "1:\t"user_lb("%0", "1(%2)")"\n" \ - "2:\t"user_lbu("$1", "0(%2)")"\n\t" \ + "1:\t"type##_lb("%0", "1(%2)")"\n" \ + "2:\t"type##_lbu("$1", "0(%2)")"\n\t"\ "sll\t%0, 0x8\n\t" \ "or\t%0, $1\n\t" \ "li\t%1, 0\n" \ @@ -477,10 +477,10 @@ extern void show_registers(struct pt_regs *regs); : "r" (addr), "i" (-EFAULT)); #ifndef CONFIG_CPU_MIPSR6 -#define LoadW(addr, value, res) \ +#define _LoadW(addr, value, res, type) \ __asm__ __volatile__ ( \ - "1:\t"user_lwl("%0", "3(%2)")"\n" \ - "2:\t"user_lwr("%0", "(%2)")"\n\t" \ + "1:\t"type##_lwl("%0", "3(%2)")"\n" \ + "2:\t"type##_lwr("%0", "(%2)")"\n\t"\ "li\t%1, 0\n" \ "3:\n\t" \ ".insn\n\t" \ @@ -496,18 +496,18 @@ extern void show_registers(struct pt_regs *regs); : "r" (addr), "i" (-EFAULT)); #else /* MIPSR6 has no lwl instruction */ -#define LoadW(addr, value, res) \ +#define _LoadW(addr, value, res, type) \ __asm__ __volatile__ ( \ ".set\tpush\n" \ ".set\tnoat\n\t" \ - "1:"user_lb("%0", "3(%2)")"\n\t" \ - "2:"user_lbu("$1", "2(%2)")"\n\t" \ + "1:"type##_lb("%0", "3(%2)")"\n\t" \ + "2:"type##_lbu("$1", "2(%2)")"\n\t" \ "sll\t%0, 0x8\n\t" \ "or\t%0, $1\n\t" \ - "3:"user_lbu("$1", "1(%2)")"\n\t" \ + "3:"type##_lbu("$1", "1(%2)")"\n\t" \ "sll\t%0, 0x8\n\t" \ "or\t%0, $1\n\t" \ - "4:"user_lbu("$1", "0(%2)")"\n\t" \ + "4:"type##_lbu("$1", "0(%2)")"\n\t" \ "sll\t%0, 0x8\n\t" \ "or\t%0, $1\n\t" \ "li\t%1, 0\n" \ @@ -529,11 +529,11 @@ extern void show_registers(struct pt_regs *regs); #endif /* CONFIG_CPU_MIPSR6 */ -#define LoadHWU(addr, value, res) \ +#define _LoadHWU(addr, value, res, type) \ __asm__ __volatile__ ( \ ".set\tnoat\n" \ - "1:\t"user_lbu("%0", "1(%2)")"\n" \ - "2:\t"user_lbu("$1", "0(%2)")"\n\t" \ + "1:\t"type##_lbu("%0", "1(%2)")"\n" \ + "2:\t"type##_lbu("$1", "0(%2)")"\n\t"\ "sll\t%0, 0x8\n\t" \ "or\t%0, $1\n\t" \ "li\t%1, 0\n" \ @@ -552,10 +552,10 @@ extern void show_registers(struct pt_regs *regs); : "r" (addr), "i" (-EFAULT)); #ifndef CONFIG_CPU_MIPSR6 -#define LoadWU(addr, value, res) \ +#define _LoadWU(addr, value, res, type) \ __asm__ __volatile__ ( \ - "1:\t"user_lwl("%0", "3(%2)")"\n" \ - "2:\t"user_lwr("%0", "(%2)")"\n\t" \ + "1:\t"type##_lwl("%0", "3(%2)")"\n" \ + "2:\t"type##_lwr("%0", "(%2)")"\n\t"\ "dsll\t%0, %0, 32\n\t" \ "dsrl\t%0, %0, 32\n\t" \ "li\t%1, 0\n" \ @@ -572,7 +572,7 @@ extern void show_registers(struct pt_regs *regs); : "=&r" (value), "=r" (res) \ : "r" (addr), "i" (-EFAULT)); -#define LoadDW(addr, value, res) \ +#define _LoadDW(addr, value, res) \ __asm__ __volatile__ ( \ "1:\tldl\t%0, 7(%2)\n" \ "2:\tldr\t%0, (%2)\n\t" \ @@ -591,18 +591,18 @@ extern void show_registers(struct pt_regs *regs); : "r" (addr), "i" (-EFAULT)); #else /* MIPSR6 has not lwl and ldl instructions */ -#define LoadWU(addr, value, res) \ +#define _LoadWU(addr, value, res, type) \ __asm__ __volatile__ ( \ ".set\tpush\n\t" \ ".set\tnoat\n\t" \ - "1:"user_lbu("%0", "3(%2)")"\n\t" \ - "2:"user_lbu("$1", "2(%2)")"\n\t" \ + "1:"type##_lbu("%0", "3(%2)")"\n\t" \ + "2:"type##_lbu("$1", "2(%2)")"\n\t" \ "sll\t%0, 0x8\n\t" \ "or\t%0, $1\n\t" \ - "3:"user_lbu("$1", "1(%2)")"\n\t" \ + "3:"type##_lbu("$1", "1(%2)")"\n\t" \ "sll\t%0, 0x8\n\t" \ "or\t%0, $1\n\t" \ - "4:"user_lbu("$1", "0(%2)")"\n\t" \ + "4:"type##_lbu("$1", "0(%2)")"\n\t" \ "sll\t%0, 0x8\n\t" \ "or\t%0, $1\n\t" \ "li\t%1, 0\n" \ @@ -622,7 +622,7 @@ extern void show_registers(struct pt_regs *regs); : "=&r" (value), "=r" (res) \ : "r" (addr), "i" (-EFAULT)); -#define LoadDW(addr, value, res) \ +#define _LoadDW(addr, value, res) \ __asm__ __volatile__ ( \ ".set\tpush\n\t" \ ".set\tnoat\n\t" \ @@ -670,12 +670,12 @@ extern void show_registers(struct pt_regs *regs); : "r" (addr), "i" (-EFAULT)); #endif /* CONFIG_CPU_MIPSR6 */ -#define StoreHW(addr, value, res) \ +#define _StoreHW(addr, value, res, type) \ __asm__ __volatile__ ( \ ".set\tnoat\n" \ - "1:\t"user_sb("%1", "0(%2)")"\n" \ + "1:\t"type##_sb("%1", "0(%2)")"\n" \ "srl\t$1,%1, 0x8\n" \ - "2:\t"user_sb("$1", "1(%2)")"\n" \ + "2:\t"type##_sb("$1", "1(%2)")"\n" \ ".set\tat\n\t" \ "li\t%0, 0\n" \ "3:\n\t" \ @@ -691,10 +691,10 @@ extern void show_registers(struct pt_regs *regs); : "=r" (res) \ : "r" (value), "r" (addr), "i" (-EFAULT)); #ifndef CONFIG_CPU_MIPSR6 -#define StoreW(addr, value, res) \ +#define _StoreW(addr, value, res, type) \ __asm__ __volatile__ ( \ - "1:\t"user_swl("%1", "3(%2)")"\n" \ - "2:\t"user_swr("%1", "(%2)")"\n\t" \ + "1:\t"type##_swl("%1", "3(%2)")"\n" \ + "2:\t"type##_swr("%1", "(%2)")"\n\t"\ "li\t%0, 0\n" \ "3:\n\t" \ ".insn\n\t" \ @@ -709,7 +709,7 @@ extern void show_registers(struct pt_regs *regs); : "=r" (res) \ : "r" (value), "r" (addr), "i" (-EFAULT)); -#define StoreDW(addr, value, res) \ +#define _StoreDW(addr, value, res) \ __asm__ __volatile__ ( \ "1:\tsdl\t%1, 7(%2)\n" \ "2:\tsdr\t%1, (%2)\n\t" \ @@ -728,17 +728,17 @@ extern void show_registers(struct pt_regs *regs); : "r" (value), "r" (addr), "i" (-EFAULT)); #else /* MIPSR6 has no swl and sdl instructions */ -#define StoreW(addr, value, res) \ +#define _StoreW(addr, value, res, type) \ __asm__ __volatile__ ( \ ".set\tpush\n\t" \ ".set\tnoat\n\t" \ - "1:"user_sb("%1", "0(%2)")"\n\t" \ + "1:"type##_sb("%1", "0(%2)")"\n\t" \ "srl\t$1, %1, 0x8\n\t" \ - "2:"user_sb("$1", "1(%2)")"\n\t" \ + "2:"type##_sb("$1", "1(%2)")"\n\t" \ "srl\t$1, $1, 0x8\n\t" \ - "3:"user_sb("$1", "2(%2)")"\n\t" \ + "3:"type##_sb("$1", "2(%2)")"\n\t" \ "srl\t$1, $1, 0x8\n\t" \ - "4:"user_sb("$1", "3(%2)")"\n\t" \ + "4:"type##_sb("$1", "3(%2)")"\n\t" \ ".set\tpop\n\t" \ "li\t%0, 0\n" \ "10:\n\t" \ @@ -757,7 +757,7 @@ extern void show_registers(struct pt_regs *regs); : "r" (value), "r" (addr), "i" (-EFAULT) \ : "memory"); -#define StoreDW(addr, value, res) \ +#define _StoreDW(addr, value, res) \ __asm__ __volatile__ ( \ ".set\tpush\n\t" \ ".set\tnoat\n\t" \ @@ -801,6 +801,22 @@ extern void show_registers(struct pt_regs *regs); #endif /* CONFIG_CPU_MIPSR6 */ #endif +#define LoadHWU(addr, value, res) _LoadHWU(addr, value, res, kernel) +#define LoadHWUE(addr, value, res) _LoadHWU(addr, value, res, user) +#define LoadWU(addr, value, res) _LoadWU(addr, value, res, kernel) +#define LoadWUE(addr, value, res) _LoadWU(addr, value, res, user) +#define LoadHW(addr, value, res) _LoadHW(addr, value, res, kernel) +#define LoadHWE(addr, value, res) _LoadHW(addr, value, res, user) +#define LoadW(addr, value, res) _LoadW(addr, value, res, kernel) +#define LoadWE(addr, value, res) _LoadW(addr, value, res, user) +#define LoadDW(addr, value, res) _LoadDW(addr, value, res) + +#define StoreHW(addr, value, res) _StoreHW(addr, value, res, kernel) +#define StoreHWE(addr, value, res) _StoreHW(addr, value, res, user) +#define StoreW(addr, value, res) _StoreW(addr, value, res, kernel) +#define StoreWE(addr, value, res) _StoreW(addr, value, res, user) +#define StoreDW(addr, value, res) _StoreDW(addr, value, res) + static void emulate_load_store_insn(struct pt_regs *regs, void __user *addr, unsigned int __user *pc) { @@ -872,7 +888,7 @@ static void emulate_load_store_insn(struct pt_regs *regs, set_fs(seg); goto sigbus; } - LoadHW(addr, value, res); + LoadHWE(addr, value, res); if (res) { set_fs(seg); goto fault; @@ -885,7 +901,7 @@ static void emulate_load_store_insn(struct pt_regs *regs, set_fs(seg); goto sigbus; } - LoadW(addr, value, res); + LoadWE(addr, value, res); if (res) { set_fs(seg); goto fault; @@ -898,7 +914,7 @@ static void emulate_load_store_insn(struct pt_regs *regs, set_fs(seg); goto sigbus; } - LoadHWU(addr, value, res); + LoadHWUE(addr, value, res); if (res) { set_fs(seg); goto fault; @@ -913,7 +929,7 @@ static void emulate_load_store_insn(struct pt_regs *regs, } compute_return_epc(regs); value = regs->regs[insn.spec3_format.rt]; - StoreHW(addr, value, res); + StoreHWE(addr, value, res); if (res) { set_fs(seg); goto fault; @@ -926,7 +942,7 @@ static void emulate_load_store_insn(struct pt_regs *regs, } compute_return_epc(regs); value = regs->regs[insn.spec3_format.rt]; - StoreW(addr, value, res); + StoreWE(addr, value, res); if (res) { set_fs(seg); goto fault; From 3563c32d6532ece53c9dd8905a8e41983ef9952f Mon Sep 17 00:00:00 2001 From: Markos Chandras Date: Mon, 9 Mar 2015 14:54:51 +0000 Subject: [PATCH 14/33] MIPS: unaligned: Surround load/store macros in do {} while statements It's best to surround such complex macros with do {} while statements so they can appear as independent logical blocks when used within other control blocks. Signed-off-by: Markos Chandras Cc: # v3.15+ Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/9502/ Signed-off-by: Ralf Baechle --- arch/mips/kernel/unaligned.c | 116 +++++++++++++++++++++++++++-------- 1 file changed, 90 insertions(+), 26 deletions(-) diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c index 7a5707eea898..ab475903175f 100644 --- a/arch/mips/kernel/unaligned.c +++ b/arch/mips/kernel/unaligned.c @@ -110,6 +110,7 @@ extern void show_registers(struct pt_regs *regs); #ifdef __BIG_ENDIAN #define _LoadHW(addr, value, res, type) \ +do { \ __asm__ __volatile__ (".set\tnoat\n" \ "1:\t"type##_lb("%0", "0(%2)")"\n" \ "2:\t"type##_lbu("$1", "1(%2)")"\n\t"\ @@ -127,10 +128,12 @@ extern void show_registers(struct pt_regs *regs); STR(PTR)"\t2b, 4b\n\t" \ ".previous" \ : "=&r" (value), "=r" (res) \ - : "r" (addr), "i" (-EFAULT)); + : "r" (addr), "i" (-EFAULT)); \ +} while(0) #ifndef CONFIG_CPU_MIPSR6 #define _LoadW(addr, value, res, type) \ +do { \ __asm__ __volatile__ ( \ "1:\t"type##_lwl("%0", "(%2)")"\n" \ "2:\t"type##_lwr("%0", "3(%2)")"\n\t"\ @@ -146,10 +149,13 @@ extern void show_registers(struct pt_regs *regs); STR(PTR)"\t2b, 4b\n\t" \ ".previous" \ : "=&r" (value), "=r" (res) \ - : "r" (addr), "i" (-EFAULT)); + : "r" (addr), "i" (-EFAULT)); \ +} while(0) + #else /* MIPSR6 has no lwl instruction */ #define _LoadW(addr, value, res, type) \ +do { \ __asm__ __volatile__ ( \ ".set\tpush\n" \ ".set\tnoat\n\t" \ @@ -178,10 +184,13 @@ extern void show_registers(struct pt_regs *regs); STR(PTR)"\t4b, 11b\n\t" \ ".previous" \ : "=&r" (value), "=r" (res) \ - : "r" (addr), "i" (-EFAULT)); + : "r" (addr), "i" (-EFAULT)); \ +} while(0) + #endif /* CONFIG_CPU_MIPSR6 */ #define _LoadHWU(addr, value, res, type) \ +do { \ __asm__ __volatile__ ( \ ".set\tnoat\n" \ "1:\t"type##_lbu("%0", "0(%2)")"\n" \ @@ -201,10 +210,12 @@ extern void show_registers(struct pt_regs *regs); STR(PTR)"\t2b, 4b\n\t" \ ".previous" \ : "=&r" (value), "=r" (res) \ - : "r" (addr), "i" (-EFAULT)); + : "r" (addr), "i" (-EFAULT)); \ +} while(0) #ifndef CONFIG_CPU_MIPSR6 #define _LoadWU(addr, value, res, type) \ +do { \ __asm__ __volatile__ ( \ "1:\t"type##_lwl("%0", "(%2)")"\n" \ "2:\t"type##_lwr("%0", "3(%2)")"\n\t"\ @@ -222,9 +233,11 @@ extern void show_registers(struct pt_regs *regs); STR(PTR)"\t2b, 4b\n\t" \ ".previous" \ : "=&r" (value), "=r" (res) \ - : "r" (addr), "i" (-EFAULT)); + : "r" (addr), "i" (-EFAULT)); \ +} while(0) #define _LoadDW(addr, value, res) \ +do { \ __asm__ __volatile__ ( \ "1:\tldl\t%0, (%2)\n" \ "2:\tldr\t%0, 7(%2)\n\t" \ @@ -240,10 +253,13 @@ extern void show_registers(struct pt_regs *regs); STR(PTR)"\t2b, 4b\n\t" \ ".previous" \ : "=&r" (value), "=r" (res) \ - : "r" (addr), "i" (-EFAULT)); + : "r" (addr), "i" (-EFAULT)); \ +} while(0) + #else /* MIPSR6 has not lwl and ldl instructions */ #define _LoadWU(addr, value, res, type) \ +do { \ __asm__ __volatile__ ( \ ".set\tpush\n\t" \ ".set\tnoat\n\t" \ @@ -272,9 +288,11 @@ extern void show_registers(struct pt_regs *regs); STR(PTR)"\t4b, 11b\n\t" \ ".previous" \ : "=&r" (value), "=r" (res) \ - : "r" (addr), "i" (-EFAULT)); + : "r" (addr), "i" (-EFAULT)); \ +} while(0) #define _LoadDW(addr, value, res) \ +do { \ __asm__ __volatile__ ( \ ".set\tpush\n\t" \ ".set\tnoat\n\t" \ @@ -319,11 +337,14 @@ extern void show_registers(struct pt_regs *regs); STR(PTR)"\t8b, 11b\n\t" \ ".previous" \ : "=&r" (value), "=r" (res) \ - : "r" (addr), "i" (-EFAULT)); + : "r" (addr), "i" (-EFAULT)); \ +} while(0) + #endif /* CONFIG_CPU_MIPSR6 */ #define _StoreHW(addr, value, res, type) \ +do { \ __asm__ __volatile__ ( \ ".set\tnoat\n" \ "1:\t"type##_sb("%1", "1(%2)")"\n" \ @@ -342,10 +363,12 @@ extern void show_registers(struct pt_regs *regs); STR(PTR)"\t2b, 4b\n\t" \ ".previous" \ : "=r" (res) \ - : "r" (value), "r" (addr), "i" (-EFAULT)); + : "r" (value), "r" (addr), "i" (-EFAULT));\ +} while(0) #ifndef CONFIG_CPU_MIPSR6 #define _StoreW(addr, value, res, type) \ +do { \ __asm__ __volatile__ ( \ "1:\t"type##_swl("%1", "(%2)")"\n" \ "2:\t"type##_swr("%1", "3(%2)")"\n\t"\ @@ -361,9 +384,11 @@ extern void show_registers(struct pt_regs *regs); STR(PTR)"\t2b, 4b\n\t" \ ".previous" \ : "=r" (res) \ - : "r" (value), "r" (addr), "i" (-EFAULT)); + : "r" (value), "r" (addr), "i" (-EFAULT)); \ +} while(0) #define _StoreDW(addr, value, res) \ +do { \ __asm__ __volatile__ ( \ "1:\tsdl\t%1,(%2)\n" \ "2:\tsdr\t%1, 7(%2)\n\t" \ @@ -379,10 +404,13 @@ extern void show_registers(struct pt_regs *regs); STR(PTR)"\t2b, 4b\n\t" \ ".previous" \ : "=r" (res) \ - : "r" (value), "r" (addr), "i" (-EFAULT)); + : "r" (value), "r" (addr), "i" (-EFAULT)); \ +} while(0) + #else /* MIPSR6 has no swl and sdl instructions */ #define _StoreW(addr, value, res, type) \ +do { \ __asm__ __volatile__ ( \ ".set\tpush\n\t" \ ".set\tnoat\n\t" \ @@ -409,9 +437,11 @@ extern void show_registers(struct pt_regs *regs); ".previous" \ : "=&r" (res) \ : "r" (value), "r" (addr), "i" (-EFAULT) \ - : "memory"); + : "memory"); \ +} while(0) #define StoreDW(addr, value, res) \ +do { \ __asm__ __volatile__ ( \ ".set\tpush\n\t" \ ".set\tnoat\n\t" \ @@ -451,12 +481,15 @@ extern void show_registers(struct pt_regs *regs); ".previous" \ : "=&r" (res) \ : "r" (value), "r" (addr), "i" (-EFAULT) \ - : "memory"); + : "memory"); \ +} while(0) + #endif /* CONFIG_CPU_MIPSR6 */ #else /* __BIG_ENDIAN */ #define _LoadHW(addr, value, res, type) \ +do { \ __asm__ __volatile__ (".set\tnoat\n" \ "1:\t"type##_lb("%0", "1(%2)")"\n" \ "2:\t"type##_lbu("$1", "0(%2)")"\n\t"\ @@ -474,10 +507,12 @@ extern void show_registers(struct pt_regs *regs); STR(PTR)"\t2b, 4b\n\t" \ ".previous" \ : "=&r" (value), "=r" (res) \ - : "r" (addr), "i" (-EFAULT)); + : "r" (addr), "i" (-EFAULT)); \ +} while(0) #ifndef CONFIG_CPU_MIPSR6 #define _LoadW(addr, value, res, type) \ +do { \ __asm__ __volatile__ ( \ "1:\t"type##_lwl("%0", "3(%2)")"\n" \ "2:\t"type##_lwr("%0", "(%2)")"\n\t"\ @@ -493,10 +528,13 @@ extern void show_registers(struct pt_regs *regs); STR(PTR)"\t2b, 4b\n\t" \ ".previous" \ : "=&r" (value), "=r" (res) \ - : "r" (addr), "i" (-EFAULT)); + : "r" (addr), "i" (-EFAULT)); \ +} while(0) + #else /* MIPSR6 has no lwl instruction */ #define _LoadW(addr, value, res, type) \ +do { \ __asm__ __volatile__ ( \ ".set\tpush\n" \ ".set\tnoat\n\t" \ @@ -525,11 +563,14 @@ extern void show_registers(struct pt_regs *regs); STR(PTR)"\t4b, 11b\n\t" \ ".previous" \ : "=&r" (value), "=r" (res) \ - : "r" (addr), "i" (-EFAULT)); + : "r" (addr), "i" (-EFAULT)); \ +} while(0) + #endif /* CONFIG_CPU_MIPSR6 */ #define _LoadHWU(addr, value, res, type) \ +do { \ __asm__ __volatile__ ( \ ".set\tnoat\n" \ "1:\t"type##_lbu("%0", "1(%2)")"\n" \ @@ -549,10 +590,12 @@ extern void show_registers(struct pt_regs *regs); STR(PTR)"\t2b, 4b\n\t" \ ".previous" \ : "=&r" (value), "=r" (res) \ - : "r" (addr), "i" (-EFAULT)); + : "r" (addr), "i" (-EFAULT)); \ +} while(0) #ifndef CONFIG_CPU_MIPSR6 #define _LoadWU(addr, value, res, type) \ +do { \ __asm__ __volatile__ ( \ "1:\t"type##_lwl("%0", "3(%2)")"\n" \ "2:\t"type##_lwr("%0", "(%2)")"\n\t"\ @@ -570,9 +613,11 @@ extern void show_registers(struct pt_regs *regs); STR(PTR)"\t2b, 4b\n\t" \ ".previous" \ : "=&r" (value), "=r" (res) \ - : "r" (addr), "i" (-EFAULT)); + : "r" (addr), "i" (-EFAULT)); \ +} while(0) #define _LoadDW(addr, value, res) \ +do { \ __asm__ __volatile__ ( \ "1:\tldl\t%0, 7(%2)\n" \ "2:\tldr\t%0, (%2)\n\t" \ @@ -588,10 +633,13 @@ extern void show_registers(struct pt_regs *regs); STR(PTR)"\t2b, 4b\n\t" \ ".previous" \ : "=&r" (value), "=r" (res) \ - : "r" (addr), "i" (-EFAULT)); + : "r" (addr), "i" (-EFAULT)); \ +} while(0) + #else /* MIPSR6 has not lwl and ldl instructions */ #define _LoadWU(addr, value, res, type) \ +do { \ __asm__ __volatile__ ( \ ".set\tpush\n\t" \ ".set\tnoat\n\t" \ @@ -620,9 +668,11 @@ extern void show_registers(struct pt_regs *regs); STR(PTR)"\t4b, 11b\n\t" \ ".previous" \ : "=&r" (value), "=r" (res) \ - : "r" (addr), "i" (-EFAULT)); + : "r" (addr), "i" (-EFAULT)); \ +} while(0) #define _LoadDW(addr, value, res) \ +do { \ __asm__ __volatile__ ( \ ".set\tpush\n\t" \ ".set\tnoat\n\t" \ @@ -667,10 +717,12 @@ extern void show_registers(struct pt_regs *regs); STR(PTR)"\t8b, 11b\n\t" \ ".previous" \ : "=&r" (value), "=r" (res) \ - : "r" (addr), "i" (-EFAULT)); + : "r" (addr), "i" (-EFAULT)); \ +} while(0) #endif /* CONFIG_CPU_MIPSR6 */ #define _StoreHW(addr, value, res, type) \ +do { \ __asm__ __volatile__ ( \ ".set\tnoat\n" \ "1:\t"type##_sb("%1", "0(%2)")"\n" \ @@ -689,9 +741,12 @@ extern void show_registers(struct pt_regs *regs); STR(PTR)"\t2b, 4b\n\t" \ ".previous" \ : "=r" (res) \ - : "r" (value), "r" (addr), "i" (-EFAULT)); + : "r" (value), "r" (addr), "i" (-EFAULT));\ +} while(0) + #ifndef CONFIG_CPU_MIPSR6 #define _StoreW(addr, value, res, type) \ +do { \ __asm__ __volatile__ ( \ "1:\t"type##_swl("%1", "3(%2)")"\n" \ "2:\t"type##_swr("%1", "(%2)")"\n\t"\ @@ -707,9 +762,11 @@ extern void show_registers(struct pt_regs *regs); STR(PTR)"\t2b, 4b\n\t" \ ".previous" \ : "=r" (res) \ - : "r" (value), "r" (addr), "i" (-EFAULT)); + : "r" (value), "r" (addr), "i" (-EFAULT)); \ +} while(0) #define _StoreDW(addr, value, res) \ +do { \ __asm__ __volatile__ ( \ "1:\tsdl\t%1, 7(%2)\n" \ "2:\tsdr\t%1, (%2)\n\t" \ @@ -725,10 +782,13 @@ extern void show_registers(struct pt_regs *regs); STR(PTR)"\t2b, 4b\n\t" \ ".previous" \ : "=r" (res) \ - : "r" (value), "r" (addr), "i" (-EFAULT)); + : "r" (value), "r" (addr), "i" (-EFAULT)); \ +} while(0) + #else /* MIPSR6 has no swl and sdl instructions */ #define _StoreW(addr, value, res, type) \ +do { \ __asm__ __volatile__ ( \ ".set\tpush\n\t" \ ".set\tnoat\n\t" \ @@ -755,9 +815,11 @@ extern void show_registers(struct pt_regs *regs); ".previous" \ : "=&r" (res) \ : "r" (value), "r" (addr), "i" (-EFAULT) \ - : "memory"); + : "memory"); \ +} while(0) #define _StoreDW(addr, value, res) \ +do { \ __asm__ __volatile__ ( \ ".set\tpush\n\t" \ ".set\tnoat\n\t" \ @@ -797,7 +859,9 @@ extern void show_registers(struct pt_regs *regs); ".previous" \ : "=&r" (res) \ : "r" (value), "r" (addr), "i" (-EFAULT) \ - : "memory"); + : "memory"); \ +} while(0) + #endif /* CONFIG_CPU_MIPSR6 */ #endif From 6eae35485b26f9e51ab896eb8a936bed9908fdf6 Mon Sep 17 00:00:00 2001 From: Markos Chandras Date: Mon, 9 Mar 2015 14:54:52 +0000 Subject: [PATCH 15/33] MIPS: unaligned: Fix regular load/store instruction emulation for EVA When emulating a regular lh/lw/lhu/sh/sw we need to use the appropriate instruction if we are in EVA mode. This is necessary for userspace applications which trigger alignment exceptions. In such case, the userspace load/store instruction needs to be emulated with the correct eva/non-eva instruction by the kernel emulator. Signed-off-by: Markos Chandras Fixes: c1771216ab48 ("MIPS: kernel: unaligned: Handle unaligned accesses for EVA") Cc: # v3.15+ Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/9503/ Signed-off-by: Ralf Baechle --- arch/mips/kernel/unaligned.c | 52 ++++++++++++++++++++++++++++++++---- 1 file changed, 47 insertions(+), 5 deletions(-) diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c index ab475903175f..7659da224fcd 100644 --- a/arch/mips/kernel/unaligned.c +++ b/arch/mips/kernel/unaligned.c @@ -1023,7 +1023,15 @@ static void emulate_load_store_insn(struct pt_regs *regs, if (!access_ok(VERIFY_READ, addr, 2)) goto sigbus; - LoadHW(addr, value, res); + if (config_enabled(CONFIG_EVA)) { + if (segment_eq(get_fs(), get_ds())) + LoadHW(addr, value, res); + else + LoadHWE(addr, value, res); + } else { + LoadHW(addr, value, res); + } + if (res) goto fault; compute_return_epc(regs); @@ -1034,7 +1042,15 @@ static void emulate_load_store_insn(struct pt_regs *regs, if (!access_ok(VERIFY_READ, addr, 4)) goto sigbus; - LoadW(addr, value, res); + if (config_enabled(CONFIG_EVA)) { + if (segment_eq(get_fs(), get_ds())) + LoadW(addr, value, res); + else + LoadWE(addr, value, res); + } else { + LoadW(addr, value, res); + } + if (res) goto fault; compute_return_epc(regs); @@ -1045,7 +1061,15 @@ static void emulate_load_store_insn(struct pt_regs *regs, if (!access_ok(VERIFY_READ, addr, 2)) goto sigbus; - LoadHWU(addr, value, res); + if (config_enabled(CONFIG_EVA)) { + if (segment_eq(get_fs(), get_ds())) + LoadHWU(addr, value, res); + else + LoadHWUE(addr, value, res); + } else { + LoadHWU(addr, value, res); + } + if (res) goto fault; compute_return_epc(regs); @@ -1104,7 +1128,16 @@ static void emulate_load_store_insn(struct pt_regs *regs, compute_return_epc(regs); value = regs->regs[insn.i_format.rt]; - StoreHW(addr, value, res); + + if (config_enabled(CONFIG_EVA)) { + if (segment_eq(get_fs(), get_ds())) + StoreHW(addr, value, res); + else + StoreHWE(addr, value, res); + } else { + StoreHW(addr, value, res); + } + if (res) goto fault; break; @@ -1115,7 +1148,16 @@ static void emulate_load_store_insn(struct pt_regs *regs, compute_return_epc(regs); value = regs->regs[insn.i_format.rt]; - StoreW(addr, value, res); + + if (config_enabled(CONFIG_EVA)) { + if (segment_eq(get_fs(), get_ds())) + StoreW(addr, value, res); + else + StoreWE(addr, value, res); + } else { + StoreW(addr, value, res); + } + if (res) goto fault; break; From 07edf0d46c07568d08feee95bbaa38c71b084150 Mon Sep 17 00:00:00 2001 From: Markos Chandras Date: Tue, 10 Mar 2015 12:30:56 +0000 Subject: [PATCH 16/33] MIPS: Kconfig: Fix typo for the r2-to-r6 emulator kernel parameter Commit b0a668fb2038 ("MIPS: kernel: mips-r2-to-r6-emul: Add R2 emulator for MIPS R6") added the mips r2-to-r6 emulator so an R2 userland can be executed on R6 kernels. This needed both build time and runtime support. The runtime support needed the "mipsr2emu" kernel parameter instead of the "mipsr2emul" listed in the Kconfig help message. Signed-off-by: Markos Chandras Fixes: b0a668fb2038 ("MIPS: kernel: mips-r2-to-r6-emul: Add R2 emulator for MIPS R6") Cc: linux-mips@linux-mips.org Cc: Markos Chandras Patchwork: https://patchwork.linux-mips.org/patch/9504/ Signed-off-by: Ralf Baechle --- arch/mips/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 69a3b0fab926..7222592ec334 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -2107,7 +2107,7 @@ config MIPSR2_TO_R6_EMULATOR help Choose this option if you want to run non-R6 MIPS userland code. Even if you say 'Y' here, the emulator will still be disabled by - default. You can enable it using the 'mipsr2emul' kernel option. + default. You can enable it using the 'mipsr2emu' kernel option. The only reason this is a build-time option is to save ~14K from the final kernel image. comment "MIPS R2-to-R6 emulator is only available for UP kernels" From f6b39ae6f4d6ee835bb16e452086121aa010f1a7 Mon Sep 17 00:00:00 2001 From: Markos Chandras Date: Tue, 3 Mar 2015 18:48:47 +0000 Subject: [PATCH 17/33] MIPS: r4kcache: Use correct base register for MIPS R6 cache flushes Commit 934c79231c1b("MIPS: asm: r4kcache: Add MIPS R6 cache unroll functions") added support for MIPS R6 cache flushes but it used the wrong base address register to perform the flushes so the same lines were flushed over and over. Moreover, replace the "addiu" instructions with LONG_ADDIU so the correct base address is calculated for 64-bit cores. Signed-off-by: Markos Chandras Fixes: 934c79231c1b("MIPS: asm: r4kcache: Add MIPS R6 cache unroll functions") Cc: linux-mips@linux-mips.org Reviewed-by: Maciej W. Rozycki Patchwork: https://patchwork.linux-mips.org/patch/9384/ Signed-off-by: Ralf Baechle --- arch/mips/include/asm/r4kcache.h | 89 ++++++++++++++++---------------- 1 file changed, 45 insertions(+), 44 deletions(-) diff --git a/arch/mips/include/asm/r4kcache.h b/arch/mips/include/asm/r4kcache.h index 1b22d2da88a1..38902bf97adc 100644 --- a/arch/mips/include/asm/r4kcache.h +++ b/arch/mips/include/asm/r4kcache.h @@ -12,6 +12,8 @@ #ifndef _ASM_R4KCACHE_H #define _ASM_R4KCACHE_H +#include + #include #include #include @@ -344,7 +346,7 @@ static inline void invalidate_tcache_page(unsigned long addr) " cache %1, 0x0a0(%0); cache %1, 0x0b0(%0)\n" \ " cache %1, 0x0c0(%0); cache %1, 0x0d0(%0)\n" \ " cache %1, 0x0e0(%0); cache %1, 0x0f0(%0)\n" \ - " addiu $1, $0, 0x100 \n" \ + " "__stringify(LONG_ADDIU)" $1, %0, 0x100 \n" \ " cache %1, 0x000($1); cache %1, 0x010($1)\n" \ " cache %1, 0x020($1); cache %1, 0x030($1)\n" \ " cache %1, 0x040($1); cache %1, 0x050($1)\n" \ @@ -368,17 +370,17 @@ static inline void invalidate_tcache_page(unsigned long addr) " cache %1, 0x040(%0); cache %1, 0x060(%0)\n" \ " cache %1, 0x080(%0); cache %1, 0x0a0(%0)\n" \ " cache %1, 0x0c0(%0); cache %1, 0x0e0(%0)\n" \ - " addiu $1, %0, 0x100\n" \ + " "__stringify(LONG_ADDIU)" $1, %0, 0x100 \n" \ " cache %1, 0x000($1); cache %1, 0x020($1)\n" \ " cache %1, 0x040($1); cache %1, 0x060($1)\n" \ " cache %1, 0x080($1); cache %1, 0x0a0($1)\n" \ " cache %1, 0x0c0($1); cache %1, 0x0e0($1)\n" \ - " addiu $1, $1, 0x100\n" \ + " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \ " cache %1, 0x000($1); cache %1, 0x020($1)\n" \ " cache %1, 0x040($1); cache %1, 0x060($1)\n" \ " cache %1, 0x080($1); cache %1, 0x0a0($1)\n" \ " cache %1, 0x0c0($1); cache %1, 0x0e0($1)\n" \ - " addiu $1, $1, 0x100\n" \ + " "__stringify(LONG_ADDIU)" $1, $1, 0x100\n" \ " cache %1, 0x000($1); cache %1, 0x020($1)\n" \ " cache %1, 0x040($1); cache %1, 0x060($1)\n" \ " cache %1, 0x080($1); cache %1, 0x0a0($1)\n" \ @@ -396,25 +398,25 @@ static inline void invalidate_tcache_page(unsigned long addr) " .set noat\n" \ " cache %1, 0x000(%0); cache %1, 0x040(%0)\n" \ " cache %1, 0x080(%0); cache %1, 0x0c0(%0)\n" \ - " addiu $1, %0, 0x100\n" \ + " "__stringify(LONG_ADDIU)" $1, %0, 0x100 \n" \ " cache %1, 0x000($1); cache %1, 0x040($1)\n" \ " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \ - " addiu $1, %0, 0x100\n" \ + " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \ " cache %1, 0x000($1); cache %1, 0x040($1)\n" \ " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \ - " addiu $1, %0, 0x100\n" \ + " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \ " cache %1, 0x000($1); cache %1, 0x040($1)\n" \ " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \ - " addiu $1, %0, 0x100\n" \ + " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \ " cache %1, 0x000($1); cache %1, 0x040($1)\n" \ " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \ - " addiu $1, %0, 0x100\n" \ + " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \ " cache %1, 0x000($1); cache %1, 0x040($1)\n" \ " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \ - " addiu $1, %0, 0x100\n" \ + " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \ " cache %1, 0x000($1); cache %1, 0x040($1)\n" \ " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \ - " addiu $1, %0, 0x100\n" \ + " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \ " cache %1, 0x000($1); cache %1, 0x040($1)\n" \ " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \ " .set pop\n" \ @@ -429,39 +431,38 @@ static inline void invalidate_tcache_page(unsigned long addr) " .set mips64r6\n" \ " .set noat\n" \ " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ - " addiu $1, %0, 0x100\n" \ - " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ - " addiu $1, %0, 0x100\n" \ - " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ - " addiu $1, %0, 0x100\n" \ - " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ - " addiu $1, %0, 0x100\n" \ - " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ - " addiu $1, %0, 0x100\n" \ - " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ - " addiu $1, %0, 0x100\n" \ - " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ - " addiu $1, %0, 0x100\n" \ - " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ - " addiu $1, %0, 0x100\n" \ - " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ - " addiu $1, %0, 0x100\n" \ - " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ - " addiu $1, %0, 0x100\n" \ - " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ - " addiu $1, %0, 0x100\n" \ - " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ - " addiu $1, %0, 0x100\n" \ - " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ - " addiu $1, %0, 0x100\n" \ - " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ - " addiu $1, %0, 0x100\n" \ - " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ - " addiu $1, %0, 0x100\n" \ - " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ - " addiu $1, %0, 0x100\n" \ - " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ - " addiu $1, %0, 0x100\n" \ + " "__stringify(LONG_ADDIU)" $1, %0, 0x100 \n" \ + " cache %1, 0x000($1); cache %1, 0x080($1)\n" \ + " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \ + " cache %1, 0x000($1); cache %1, 0x080($1)\n" \ + " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \ + " cache %1, 0x000($1); cache %1, 0x080($1)\n" \ + " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \ + " cache %1, 0x000($1); cache %1, 0x080($1)\n" \ + " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \ + " cache %1, 0x000($1); cache %1, 0x080($1)\n" \ + " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \ + " cache %1, 0x000($1); cache %1, 0x080($1)\n" \ + " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \ + " cache %1, 0x000($1); cache %1, 0x080($1)\n" \ + " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \ + " cache %1, 0x000($1); cache %1, 0x080($1)\n" \ + " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \ + " cache %1, 0x000($1); cache %1, 0x080($1)\n" \ + " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \ + " cache %1, 0x000($1); cache %1, 0x080($1)\n" \ + " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \ + " cache %1, 0x000($1); cache %1, 0x080($1)\n" \ + " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \ + " cache %1, 0x000($1); cache %1, 0x080($1)\n" \ + " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \ + " cache %1, 0x000($1); cache %1, 0x080($1)\n" \ + " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \ + " cache %1, 0x000($1); cache %1, 0x080($1)\n" \ + " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \ + " cache %1, 0x000($1); cache %1, 0x080($1)\n" \ + " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \ + " cache %1, 0x000($1); cache %1, 0x080($1)\n" \ " .set pop\n" \ : \ : "r" (base), \ From 518222161d4a2d3f3b2700098563b62383f83878 Mon Sep 17 00:00:00 2001 From: Markos Chandras Date: Tue, 3 Mar 2015 18:48:48 +0000 Subject: [PATCH 18/33] MIPS: asm: spinlock: Fix addiu instruction for R10000_LLSC_WAR case Commit 5753762cbd1c("MIPS: asm: spinlock: Replace "sub" instruction with "addiu") replaced the "sub" instruction with addiu but it did not update the immediate value in the R10000_LLSC_WAR case. Signed-off-by: Markos Chandras Fixes: 5753762cbd1c("MIPS: asm: spinlock: Replace "sub" instruction with "addiu"") Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/9385/ Signed-off-by: Ralf Baechle --- arch/mips/include/asm/spinlock.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/mips/include/asm/spinlock.h b/arch/mips/include/asm/spinlock.h index b4548690ade9..1fca2e0793dc 100644 --- a/arch/mips/include/asm/spinlock.h +++ b/arch/mips/include/asm/spinlock.h @@ -263,7 +263,7 @@ static inline void arch_read_unlock(arch_rwlock_t *rw) if (R10000_LLSC_WAR) { __asm__ __volatile__( "1: ll %1, %2 # arch_read_unlock \n" - " addiu %1, 1 \n" + " addiu %1, -1 \n" " sc %1, %0 \n" " beqzl %1, 1b \n" : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp) From aebac99384f7a6d83a3dcd42bf2481eed2670083 Mon Sep 17 00:00:00 2001 From: Markos Chandras Date: Tue, 3 Mar 2015 18:48:49 +0000 Subject: [PATCH 19/33] MIPS: kernel: entry.S: Set correct ISA level for mips_ihb Commit 6ebb496ffc7e("MIPS: kernel: entry.S: Add MIPS R6 related definitions") added the MIPSR6 definition but it did not update the ISA level of the actual assembly code so a pre-MIPSR6 jr.hb instruction was generated instead. Fix this by using the MISP_ISA_LEVEL_RAW macro. Signed-off-by: Markos Chandras Fixes: 6ebb496ffc7e("MIPS: kernel: entry.S: Add MIPS R6 related definitions") Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/9386/ Signed-off-by: Ralf Baechle --- arch/mips/kernel/entry.S | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S index af41ba6db960..7791840cf22c 100644 --- a/arch/mips/kernel/entry.S +++ b/arch/mips/kernel/entry.S @@ -10,6 +10,7 @@ #include #include +#include #include #include #include @@ -185,7 +186,7 @@ syscall_exit_work: * For C code use the inline version named instruction_hazard(). */ LEAF(mips_ihb) - .set mips32r2 + .set MIPS_ISA_LEVEL_RAW jr.hb ra nop END(mips_ihb) From 9cdf30bd3bac697fc533988f44a117434a858f69 Mon Sep 17 00:00:00 2001 From: Ralf Baechle Date: Wed, 25 Mar 2015 13:14:16 +0100 Subject: [PATCH 20/33] MIPS: Fix cpu_has_mips_r2_exec_hazard. Returns a non-zero value if the current processor implementation requires an IHB instruction to deal with an instruction hazard as per MIPS R2 architecture specification, zero otherwise. For a discussion, see http://patchwork.linux-mips.org/patch/9539/. Signed-off-by: Ralf Baechle --- arch/mips/include/asm/cpu-features.h | 33 +++++++++++++++++++++++++++- 1 file changed, 32 insertions(+), 1 deletion(-) diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h index a324751b02ff..49c7a29a1f9e 100644 --- a/arch/mips/include/asm/cpu-features.h +++ b/arch/mips/include/asm/cpu-features.h @@ -238,8 +238,39 @@ /* MIPSR2 and MIPSR6 have a lot of similarities */ #define cpu_has_mips_r2_r6 (cpu_has_mips_r2 | cpu_has_mips_r6) +/* + * cpu_has_mips_r2_exec_hazard - return if IHB is required on current processor + * + * Returns non-zero value if the current processor implementation requires + * an IHB instruction to deal with an instruction hazard as per MIPS R2 + * architecture specification, zero otherwise. + */ #ifndef cpu_has_mips_r2_exec_hazard -#define cpu_has_mips_r2_exec_hazard (cpu_has_mips_r2 | cpu_has_mips_r6) +#define cpu_has_mips_r2_exec_hazard \ +({ \ + int __res; \ + \ + switch (current_cpu_type()) { \ + case CPU_M14KC: \ + case CPU_74K: \ + case CPU_1074K: \ + case CPU_PROAPTIV: \ + case CPU_P5600: \ + case CPU_M5150: \ + case CPU_QEMU_GENERIC: \ + case CPU_CAVIUM_OCTEON: \ + case CPU_CAVIUM_OCTEON_PLUS: \ + case CPU_CAVIUM_OCTEON2: \ + case CPU_CAVIUM_OCTEON3: \ + __res = 0; \ + break; \ + \ + default: \ + __res = 1; \ + } \ + \ + __res; \ +}) #endif /* From f05ff43355e6997c18f82ddcee370a6e5f8643ce Mon Sep 17 00:00:00 2001 From: Ralf Baechle Date: Wed, 25 Mar 2015 13:21:51 +0100 Subject: [PATCH 21/33] MIPS: Octeon: Delete override of cpu_has_mips_r2_exec_hazard. This is no longer needed with the fixed, new and improved definition of cpu_has_mips_r2_exec_hazard in . For a discussion, see http://patchwork.linux-mips.org/patch/9539/. Signed-off-by: Ralf Baechle --- arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h | 1 - 1 file changed, 1 deletion(-) diff --git a/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h b/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h index fa1f3cfbae8d..d68e685cde60 100644 --- a/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h +++ b/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h @@ -50,7 +50,6 @@ #define cpu_has_mips32r2 0 #define cpu_has_mips64r1 0 #define cpu_has_mips64r2 1 -#define cpu_has_mips_r2_exec_hazard 0 #define cpu_has_dsp 0 #define cpu_has_dsp2 0 #define cpu_has_mipsmt 0 From 9eaffa84a8a46adab065c983401fc9d5949c958f Mon Sep 17 00:00:00 2001 From: Ralf Baechle Date: Wed, 25 Mar 2015 13:18:27 +0100 Subject: [PATCH 22/33] Revert "MIPS: Avoid pipeline stalls on some MIPS32R2 cores." For a discussion, see http://patchwork.linux-mips.org/patch/9539/. This reverts commit 625c0a21700bdb90844d926a1508a17a77e369c9. --- arch/mips/mm/tlbex.c | 21 ++------------------- 1 file changed, 2 insertions(+), 19 deletions(-) diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index 7709920e0cef..971b1ee51234 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c @@ -512,26 +512,9 @@ static void build_tlb_write_entry(u32 **p, struct uasm_label **l, case tlb_indexed: tlbw = uasm_i_tlbwi; break; } - if (cpu_has_mips_r2_exec_hazard) { - /* - * The architecture spec says an ehb is required here, - * but a number of cores do not have the hazard and - * using an ehb causes an expensive pipeline stall. - */ - switch (current_cpu_type()) { - case CPU_M14KC: - case CPU_74K: - case CPU_1074K: - case CPU_PROAPTIV: - case CPU_P5600: - case CPU_M5150: - case CPU_QEMU_GENERIC: - break; - - default: + if (cpu_has_mips_r2_r6) { + if (cpu_has_mips_r2_exec_hazard) uasm_i_ehb(p); - break; - } tlbw(p); return; } From f7f8aea4b97c4d48e42f02cb37026bee445f239f Mon Sep 17 00:00:00 2001 From: Markos Chandras Date: Fri, 27 Feb 2015 07:51:32 +0000 Subject: [PATCH 23/33] MIPS: Malta: Detect and fix bad memsize values memsize denotes the amount of RAM we can access from kseg{0,1} and that should be up to 256M. In case the bootloader reports a value higher than that (perhaps reporting all the available RAM) it's best if we fix it ourselves and just warn the user about that. This is usually a problem with the bootloader and/or its environment. [ralf@linux-mips.org: Remove useless parens as suggested bei Sergei. Reformat long pr_warn statement to fit into 80 column limit.] Signed-off-by: Markos Chandras Cc: # v3.15+ Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/9362/ Signed-off-by: Ralf Baechle --- arch/mips/mti-malta/malta-memory.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/arch/mips/mti-malta/malta-memory.c b/arch/mips/mti-malta/malta-memory.c index 8fddd2cdbff7..efe366d618b1 100644 --- a/arch/mips/mti-malta/malta-memory.c +++ b/arch/mips/mti-malta/malta-memory.c @@ -53,6 +53,12 @@ fw_memblock_t * __init fw_getmdesc(int eva) pr_warn("memsize not set in YAMON, set to default (32Mb)\n"); physical_memsize = 0x02000000; } else { + if (memsize > (256 << 20)) { /* memsize should be capped to 256M */ + pr_warn("Unsupported memsize value (0x%lx) detected! " + "Using 0x10000000 (256M) instead\n", + memsize); + memsize = 256 << 20; + } /* If ememsize is set, then set physical_memsize to that */ physical_memsize = ememsize ? : memsize; } From 179fa46fb666c8f2aa2bbb1f3114d5d826d59d3d Mon Sep 17 00:00:00 2001 From: Adrien Schildknecht Date: Wed, 25 Mar 2015 16:31:42 +0100 Subject: [PATCH 24/33] SSB: fix Kconfig dependencies The commit 21400f252a97 ("MIPS: BCM47XX: Make ssb init NVRAM instead of bcm47xx polling it") introduces a dependency to SSB_SFLASH but did not add it to the Kconfig. drivers/ssb/driver_mipscore.c:216:36: error: 'struct ssb_mipscore' has no member named 'sflash' struct ssb_sflash *sflash = &mcore->sflash; ^ drivers/ssb/driver_mipscore.c:249:12: error: dereferencing pointer to incomplete type if (sflash->present) { ^ Signed-off-by: Adrien Schildknecht Cc: m@bues.ch Cc: zajec5@gmail.com Cc: linux-mips@linux-mips.org Cc: linux-kernel@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/9598/ Signed-off-by: Ralf Baechle --- drivers/ssb/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/ssb/Kconfig b/drivers/ssb/Kconfig index 75b3603906c1..f0d22cdb51cd 100644 --- a/drivers/ssb/Kconfig +++ b/drivers/ssb/Kconfig @@ -130,6 +130,7 @@ config SSB_DRIVER_MIPS bool "SSB Broadcom MIPS core driver" depends on SSB && MIPS select SSB_SERIAL + select SSB_SFLASH help Driver for the Sonics Silicon Backplane attached Broadcom MIPS core. From 93a7de8819a661d06eb11f4de3d6888b9a842b30 Mon Sep 17 00:00:00 2001 From: John Crispin Date: Mon, 23 Feb 2015 06:17:32 +0100 Subject: [PATCH 25/33] MIPS: ralink: Fix bad config symbol in PCI makefile. A wrong symbol is referenced by commit 187c26ddf0b2 ("MIPS: ralink: add rt2880 pci driver"). Fix this by changing it to the correct symbol. Signed-off-by: John Crispin Reported-by: Paul Bolle Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/9298/ Signed-off-by: Ralf Baechle --- arch/mips/pci/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/mips/pci/Makefile b/arch/mips/pci/Makefile index 300591c6278d..2eda01e6e08f 100644 --- a/arch/mips/pci/Makefile +++ b/arch/mips/pci/Makefile @@ -43,7 +43,7 @@ obj-$(CONFIG_SIBYTE_BCM1x80) += pci-bcm1480.o pci-bcm1480ht.o obj-$(CONFIG_SNI_RM) += fixup-sni.o ops-sni.o obj-$(CONFIG_LANTIQ) += fixup-lantiq.o obj-$(CONFIG_PCI_LANTIQ) += pci-lantiq.o ops-lantiq.o -obj-$(CONFIG_SOC_RT2880) += pci-rt2880.o +obj-$(CONFIG_SOC_RT288X) += pci-rt2880.o obj-$(CONFIG_SOC_RT3883) += pci-rt3883.o obj-$(CONFIG_TANBAC_TB0219) += fixup-tb0219.o obj-$(CONFIG_TANBAC_TB0226) += fixup-tb0226.o From a7b7aad383c5dd9221a06e378197350dd27c1163 Mon Sep 17 00:00:00 2001 From: John Crispin Date: Mon, 23 Feb 2015 06:17:33 +0100 Subject: [PATCH 26/33] MIPS: ralink: add missing symbol for RALINK_ILL_ACC A driver was added in commit 5433acd81e87 ("MIPS: ralink: add illegal access driver") without the Kconfig section being added. Fix this by adding the symbol to the Kconfig file. Signed-off-by: John Crispin Reported-by: Paul Bolle Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/9299/ Signed-off-by: Ralf Baechle --- arch/mips/ralink/Kconfig | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/arch/mips/ralink/Kconfig b/arch/mips/ralink/Kconfig index b1c52ca580f9..e9bc8c96174e 100644 --- a/arch/mips/ralink/Kconfig +++ b/arch/mips/ralink/Kconfig @@ -7,6 +7,11 @@ config CLKEVT_RT3352 select CLKSRC_OF select CLKSRC_MMIO +config RALINK_ILL_ACC + bool + depends on SOC_RT305X + default y + choice prompt "Ralink SoC selection" default SOC_RT305X From f8483988cadd7dd22de928db29ed3bcbe02faf78 Mon Sep 17 00:00:00 2001 From: James Hogan Date: Wed, 25 Feb 2015 13:08:05 +0000 Subject: [PATCH 27/33] MIPS: lose_fpu(): Disable FPU when MSA enabled The lose_fpu() function only disables the FPU in CP0_Status.CU1 if the FPU is in use and MSA isn't enabled. This isn't necessarily a problem because KSTK_STATUS(current), the version of CP0_Status stored on the kernel stack on entry from user mode, does always get updated and gets restored when returning to user mode, but I don't think it was intended, and it is inconsistent with the case of only the FPU being in use. Sometimes leaving the FPU enabled may also mask kernel bugs where FPU operations are executed when the FPU might not be enabled. So lets disable the FPU in the MSA case too. Fixes: 33c771ba5c5d ("MIPS: save/disable MSA in lose_fpu") Signed-off-by: James Hogan Cc: Ralf Baechle Cc: Paul Burton Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/9323/ Signed-off-by: Ralf Baechle --- arch/mips/include/asm/fpu.h | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/mips/include/asm/fpu.h b/arch/mips/include/asm/fpu.h index dd083e999b08..9f26b079cc6a 100644 --- a/arch/mips/include/asm/fpu.h +++ b/arch/mips/include/asm/fpu.h @@ -170,6 +170,7 @@ static inline void lose_fpu(int save) } disable_msa(); clear_thread_flag(TIF_USEDMSA); + __disable_fpu(); } else if (is_fpu_owner()) { if (save) _save_fp(current); From 90db024f140d0d6ad960cc5f090e3c8ed890ca55 Mon Sep 17 00:00:00 2001 From: Niklas Cassel Date: Thu, 15 Jan 2015 16:41:13 +0100 Subject: [PATCH 28/33] MIPS: smp-cps: cpu_set FPU mask if FPU present If we have an FPU, enroll ourselves in the FPU-full mask. Matching the MT_SMP and CMP implementations of smp_setup. Signed-off-by: Niklas Cassel Cc: paul.burton@imgtec.com Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/8948/ Signed-off-by: Ralf Baechle --- arch/mips/kernel/smp-cps.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/arch/mips/kernel/smp-cps.c b/arch/mips/kernel/smp-cps.c index bed7590e475f..d5589bedd0a4 100644 --- a/arch/mips/kernel/smp-cps.c +++ b/arch/mips/kernel/smp-cps.c @@ -88,6 +88,12 @@ static void __init cps_smp_setup(void) /* Make core 0 coherent with everything */ write_gcr_cl_coherence(0xff); + +#ifdef CONFIG_MIPS_MT_FPAFF + /* If we have an FPU, enroll ourselves in the FPU-full mask */ + if (cpu_has_fpu) + cpu_set(0, mt_fpu_cpumask); +#endif /* CONFIG_MIPS_MT_FPAFF */ } static void __init cps_prepare_cpus(unsigned int max_cpus) From a843d00d038b11267279e3b5388222320f9ddc1d Mon Sep 17 00:00:00 2001 From: Huacai Chen Date: Sun, 29 Mar 2015 10:54:05 +0800 Subject: [PATCH 29/33] MIPS: Hibernate: flush TLB entries earlier We found that TLB mismatch not only happens after kernel resume, but also happens during snapshot restore. So move it to the beginning of swsusp_arch_suspend(). Signed-off-by: Huacai Chen Cc: Cc: Steven J. Hill Cc: linux-mips@linux-mips.org Cc: Fuxin Zhang Cc: Zhangjin Wu Cc: stable@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/9621/ Signed-off-by: Ralf Baechle --- arch/mips/power/hibernate.S | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/mips/power/hibernate.S b/arch/mips/power/hibernate.S index 32a7c828f073..e7567c8a9e79 100644 --- a/arch/mips/power/hibernate.S +++ b/arch/mips/power/hibernate.S @@ -30,6 +30,8 @@ LEAF(swsusp_arch_suspend) END(swsusp_arch_suspend) LEAF(swsusp_arch_resume) + /* Avoid TLB mismatch during and after kernel resume */ + jal local_flush_tlb_all PTR_L t0, restore_pblist 0: PTR_L t1, PBE_ADDRESS(t0) /* source */ @@ -43,7 +45,6 @@ LEAF(swsusp_arch_resume) bne t1, t3, 1b PTR_L t0, PBE_NEXT(t0) bnez t0, 0b - jal local_flush_tlb_all /* Avoid TLB mismatch after kernel resume */ PTR_LA t0, saved_regs PTR_L ra, PT_R31(t0) PTR_L sp, PT_R29(t0) From 6ca716f2e5571d25a3899c6c5c91ff72ea6d6f5e Mon Sep 17 00:00:00 2001 From: Markos Chandras Date: Tue, 25 Nov 2014 09:15:45 +0000 Subject: [PATCH 30/33] MIPS: Kconfig: Disable SMP/CPS for 64-bit A 64-bit build for Malta produces far too many build problems when SMP/CPS is selected. Moreover, there is currently no 64-bit product with SMP/CPS so we disable SMP/CPS when building for 64-bit until it is properly supported. Signed-off-by: Markos Chandras Cc: Paul Burton Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/8573/ Signed-off-by: Ralf Baechle --- arch/mips/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 7222592ec334..6e5323821cd2 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -2177,7 +2177,7 @@ config MIPS_CMP config MIPS_CPS bool "MIPS Coherent Processing System support" - depends on SYS_SUPPORTS_MIPS_CPS + depends on SYS_SUPPORTS_MIPS_CPS && !64BIT select MIPS_CM select MIPS_CPC select MIPS_CPS_PM if HOTPLUG_CPU From 96f7c21363e0e0d19f3471f54a719ed06d708513 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= Date: Wed, 1 Apr 2015 16:01:02 +0200 Subject: [PATCH 31/33] MIPS: BCM47XX: Fix detecting Microsoft MN-700 & Asus WL500G MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Since the day of adding this code it was broken. We were iterating over a wrong array and checking for wrong NVRAM entry. Signed-off-by: Rafał Miłecki Cc: linux-mips@linux-mips.org Cc: Hauke Mehrtens Patchwork: https://patchwork.linux-mips.org/patch/9654/ Signed-off-by: Ralf Baechle --- arch/mips/bcm47xx/board.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/mips/bcm47xx/board.c b/arch/mips/bcm47xx/board.c index b3ae068ca4fa..3fd369d74444 100644 --- a/arch/mips/bcm47xx/board.c +++ b/arch/mips/bcm47xx/board.c @@ -247,8 +247,8 @@ static __init const struct bcm47xx_board_type *bcm47xx_board_get_nvram(void) } if (bcm47xx_nvram_getenv("hardware_version", buf1, sizeof(buf1)) >= 0 && - bcm47xx_nvram_getenv("boardtype", buf2, sizeof(buf2)) >= 0) { - for (e2 = bcm47xx_board_list_boot_hw; e2->value1; e2++) { + bcm47xx_nvram_getenv("boardnum", buf2, sizeof(buf2)) >= 0) { + for (e2 = bcm47xx_board_list_hw_version_num; e2->value1; e2++) { if (!strstarts(buf1, e2->value1) && !strcmp(buf2, e2->value2)) return &e2->board; From 48f8eaee3f59848809644507fc47363b37e54450 Mon Sep 17 00:00:00 2001 From: Markos Chandras Date: Thu, 26 Feb 2015 11:11:30 +0000 Subject: [PATCH 32/33] MIPS: asm: elf: Set O32 default FPU flags Set good default FPU flags (FR0) for O32 binaries similar to what the kernel does for the N64/N32 ones. This also fixes a regression introduced in commit 46490b572544 ("MIPS: kernel: elf: Improve the overall ABI and FPU mode checks") when MIPS_O32_FP64_SUPPORT is disabled. In that case, the mips_set_personality_fp() did not set the FPU mode at all because it assumed that the FPU mode was already set properly. That led to O32 userland problems. Signed-off-by: Markos Chandras Reported-by: Mans Rullgard Fixes: 46490b572544 ("MIPS: kernel: elf: Improve the overall ABI and FPU mode checks") Tested-by: Mans Rullgard Tested-by: Aaro Koskinen Cc: Matthew Fortune Cc: Paul Burton Cc: linux-mips@linux-mips.org Patchwork: http://patchwork.linux-mips.org/patch/9344/ Signed-off-by: Ralf Baechle --- arch/mips/include/asm/elf.h | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h index 535f196ffe02..694925a26924 100644 --- a/arch/mips/include/asm/elf.h +++ b/arch/mips/include/asm/elf.h @@ -294,6 +294,9 @@ do { \ if (personality(current->personality) != PER_LINUX) \ set_personality(PER_LINUX); \ \ + clear_thread_flag(TIF_HYBRID_FPREGS); \ + set_thread_flag(TIF_32BIT_FPREGS); \ + \ mips_set_personality_fp(state); \ \ current->thread.abi = &mips_abi; \ @@ -319,6 +322,8 @@ do { \ do { \ set_thread_flag(TIF_32BIT_REGS); \ set_thread_flag(TIF_32BIT_ADDR); \ + clear_thread_flag(TIF_HYBRID_FPREGS); \ + set_thread_flag(TIF_32BIT_FPREGS); \ \ mips_set_personality_fp(state); \ \ From 5306a5450824691e27d68f711758515debedeeac Mon Sep 17 00:00:00 2001 From: Markos Chandras Date: Thu, 2 Apr 2015 14:42:52 +0100 Subject: [PATCH 33/33] MIPS: Makefile: Fix MIPS ASE detection code Commit 32098ec7bcba ("MIPS: Makefile: Move the ASEs checks after setting the core's CFLAGS") re-arranged the MIPS ASE detection code and also added the current cflags to the detection logic. However, this introduced a few bugs. First of all, the mips-cflags should not be quoted since that ends up being passed as a string to subsequent commands leading to broken detection from the cc-option-* tools. Moreover, in order to avoid duplicating the cflags-y because of how cc-option works, we rework the logic so we pass only those cflags which are needed by the selected ASE. Finally, fix some typos resulting in MSA not being detected correctly. Signed-off-by: Markos Chandras Fixes: Commit 32098ec7bcba ("MIPS: Makefile: Move the ASEs checks after setting the core's CFLAGS") Cc: Maciej W. Rozycki Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/9661/ Signed-off-by: Ralf Baechle --- arch/mips/Makefile | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/arch/mips/Makefile b/arch/mips/Makefile index 8f57fc72d62c..1b4dab1e6ab8 100644 --- a/arch/mips/Makefile +++ b/arch/mips/Makefile @@ -197,11 +197,17 @@ endif # Warning: the 64-bit MIPS architecture does not support the `smartmips' extension # Pass -Wa,--no-warn to disable all assembler warnings until the kernel code has # been fixed properly. -mips-cflags := "$(cflags-y)" -cflags-$(CONFIG_CPU_HAS_SMARTMIPS) += $(call cc-option,$(mips-cflags),-msmartmips) -Wa,--no-warn -cflags-$(CONFIG_CPU_MICROMIPS) += $(call cc-option,$(mips-cflags),-mmicromips) +mips-cflags := $(cflags-y) +ifeq ($(CONFIG_CPU_HAS_SMARTMIPS),y) +smartmips-ase := $(call cc-option-yn,$(mips-cflags) -msmartmips) +cflags-$(smartmips-ase) += -msmartmips -Wa,--no-warn +endif +ifeq ($(CONFIG_CPU_MICROMIPS),y) +micromips-ase := $(call cc-option-yn,$(mips-cflags) -mmicromips) +cflags-$(micromips-ase) += -mmicromips +endif ifeq ($(CONFIG_CPU_HAS_MSA),y) -toolchain-msa := $(call cc-option-yn,-$(mips-cflags),mhard-float -mfp64 -Wa$(comma)-mmsa) +toolchain-msa := $(call cc-option-yn,$(mips-cflags) -mhard-float -mfp64 -Wa$(comma)-mmsa) cflags-$(toolchain-msa) += -DTOOLCHAIN_SUPPORTS_MSA endif