1
0
Fork 0

arm64 fixes:

- Fix FPSIMD context switch regression introduced in -rc2
 
 - Fix ABI break with SVE CPUID register reporting
 
 - Fix use of uninitialised variable
 
 - Fixes to hardware access/dirty management and sanity checking
 
 - CPU erratum workaround for Falkor CPUs
 
 - Fix reporting of writeable+executable mappings
 
 - Fix signal reporting for RAS errors
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1
 
 iQEcBAABCgAGBQJaNAZcAAoJELescNyEwWM0brIH/i69foOwEb5CFE8B6Bwh1yMR
 WtiNMiuLeaOoOmAzTLz5ZMi0W087cth+ycgiXuvnMQtzLIAFXK0gWCZ+CLBHgsiz
 Q6ba7Li0JbFuSqOyxjxcLMeDRFsD6eVZuoVhBeVi+bjz6Ni44nXF4+TXhep82+Ws
 xMfK5S8qjhAwFqFuOlL6Goq1zg5lGVJQjpBHkipiWRpmU8AdY16dsajsvMvbZl0A
 4LhIntEo5qx1+6un+8w3xoMt5uzb0BeVseTKCEghDgZ2wE351pwQEEQUam0KVhv4
 6b803ccpiBbl3oo4yAbgvXigTW6HBjyKA9e/Xy9SG9gpSFZdUNhBcGoCOnaIF/A=
 =kjAU
 -----END PGP SIGNATURE-----

Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux

Pull arm64 fixes from Will Deacon:
 "There are some significant fixes in here for FP state corruption,
  hardware access/dirty PTE corruption and an erratum workaround for the
  Falkor CPU.

  I'm hoping that things finally settle down now, but never say never...

  Summary:

   - Fix FPSIMD context switch regression introduced in -rc2

   - Fix ABI break with SVE CPUID register reporting

   - Fix use of uninitialised variable

   - Fixes to hardware access/dirty management and sanity checking

   - CPU erratum workaround for Falkor CPUs

   - Fix reporting of writeable+executable mappings

   - Fix signal reporting for RAS errors"

* tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
  arm64: fpsimd: Fix copying of FP state from signal frame into task struct
  arm64/sve: Report SVE to userspace via CPUID only if supported
  arm64: fix CONFIG_DEBUG_WX address reporting
  arm64: fault: avoid send SIGBUS two times
  arm64: hw_breakpoint: Use linux/uaccess.h instead of asm/uaccess.h
  arm64: Add software workaround for Falkor erratum 1041
  arm64: Define cputype macros for Falkor CPU
  arm64: mm: Fix false positives in set_pte_at access/dirty race detection
  arm64: mm: Fix pte_mkclean, pte_mkdirty semantics
  arm64: Initialise high_memory global variable earlier
hifive-unleashed-5.1
Linus Torvalds 2017-12-15 12:44:49 -08:00
commit 06f976ecc7
17 changed files with 64 additions and 28 deletions

View File

@ -75,3 +75,4 @@ stable kernels.
| Qualcomm Tech. | Falkor v1 | E1003 | QCOM_FALKOR_ERRATUM_1003 |
| Qualcomm Tech. | Falkor v1 | E1009 | QCOM_FALKOR_ERRATUM_1009 |
| Qualcomm Tech. | QDF2400 ITS | E0065 | QCOM_QDF2400_ERRATUM_0065 |
| Qualcomm Tech. | Falkor v{1,2} | E1041 | QCOM_FALKOR_ERRATUM_1041 |

View File

@ -557,7 +557,6 @@ config QCOM_QDF2400_ERRATUM_0065
If unsure, say Y.
config SOCIONEXT_SYNQUACER_PREITS
bool "Socionext Synquacer: Workaround for GICv3 pre-ITS"
default y
@ -576,6 +575,17 @@ config HISILICON_ERRATUM_161600802
a 128kB offset to be applied to the target address in this commands.
If unsure, say Y.
config QCOM_FALKOR_ERRATUM_E1041
bool "Falkor E1041: Speculative instruction fetches might cause errant memory access"
default y
help
Falkor CPU may speculatively fetch instructions from an improper
memory location when MMU translation is changed from SCTLR_ELn[M]=1
to SCTLR_ELn[M]=0. Prefix an ISB instruction to fix the problem.
If unsure, say Y.
endmenu

View File

@ -512,4 +512,14 @@ alternative_else_nop_endif
#endif
.endm
/**
* Errata workaround prior to disable MMU. Insert an ISB immediately prior
* to executing the MSR that will change SCTLR_ELn[M] from a value of 1 to 0.
*/
.macro pre_disable_mmu_workaround
#ifdef CONFIG_QCOM_FALKOR_ERRATUM_E1041
isb
#endif
.endm
#endif /* __ASM_ASSEMBLER_H */

View File

@ -60,6 +60,9 @@ enum ftr_type {
#define FTR_VISIBLE true /* Feature visible to the user space */
#define FTR_HIDDEN false /* Feature is hidden from the user */
#define FTR_VISIBLE_IF_IS_ENABLED(config) \
(IS_ENABLED(config) ? FTR_VISIBLE : FTR_HIDDEN)
struct arm64_ftr_bits {
bool sign; /* Value is signed ? */
bool visible;

View File

@ -91,6 +91,7 @@
#define BRCM_CPU_PART_VULCAN 0x516
#define QCOM_CPU_PART_FALKOR_V1 0x800
#define QCOM_CPU_PART_FALKOR 0xC00
#define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53)
#define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57)
@ -99,6 +100,7 @@
#define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX)
#define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX)
#define MIDR_QCOM_FALKOR_V1 MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR_V1)
#define MIDR_QCOM_FALKOR MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR)
#ifndef __ASSEMBLY__

View File

@ -42,6 +42,8 @@
#include <asm/cmpxchg.h>
#include <asm/fixmap.h>
#include <linux/mmdebug.h>
#include <linux/mm_types.h>
#include <linux/sched.h>
extern void __pte_error(const char *file, int line, unsigned long val);
extern void __pmd_error(const char *file, int line, unsigned long val);
@ -149,12 +151,20 @@ static inline pte_t pte_mkwrite(pte_t pte)
static inline pte_t pte_mkclean(pte_t pte)
{
return clear_pte_bit(pte, __pgprot(PTE_DIRTY));
pte = clear_pte_bit(pte, __pgprot(PTE_DIRTY));
pte = set_pte_bit(pte, __pgprot(PTE_RDONLY));
return pte;
}
static inline pte_t pte_mkdirty(pte_t pte)
{
return set_pte_bit(pte, __pgprot(PTE_DIRTY));
pte = set_pte_bit(pte, __pgprot(PTE_DIRTY));
if (pte_write(pte))
pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY));
return pte;
}
static inline pte_t pte_mkold(pte_t pte)
@ -207,9 +217,6 @@ static inline void set_pte(pte_t *ptep, pte_t pte)
}
}
struct mm_struct;
struct vm_area_struct;
extern void __sync_icache_dcache(pte_t pteval, unsigned long addr);
/*
@ -238,7 +245,8 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
* hardware updates of the pte (ptep_set_access_flags safely changes
* valid ptes without going through an invalid entry).
*/
if (pte_valid(*ptep) && pte_valid(pte)) {
if (IS_ENABLED(CONFIG_DEBUG_VM) && pte_valid(*ptep) && pte_valid(pte) &&
(mm == current->active_mm || atomic_read(&mm->mm_users) > 1)) {
VM_WARN_ONCE(!pte_young(pte),
"%s: racy access flag clearing: 0x%016llx -> 0x%016llx",
__func__, pte_val(*ptep), pte_val(pte));
@ -641,28 +649,23 @@ static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
/*
* ptep_set_wrprotect - mark read-only while preserving the hardware update of
* the Access Flag.
* ptep_set_wrprotect - mark read-only while trasferring potential hardware
* dirty status (PTE_DBM && !PTE_RDONLY) to the software PTE_DIRTY bit.
*/
#define __HAVE_ARCH_PTEP_SET_WRPROTECT
static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep)
{
pte_t old_pte, pte;
/*
* ptep_set_wrprotect() is only called on CoW mappings which are
* private (!VM_SHARED) with the pte either read-only (!PTE_WRITE &&
* PTE_RDONLY) or writable and software-dirty (PTE_WRITE &&
* !PTE_RDONLY && PTE_DIRTY); see is_cow_mapping() and
* protection_map[]. There is no race with the hardware update of the
* dirty state: clearing of PTE_RDONLY when PTE_WRITE (a.k.a. PTE_DBM)
* is set.
*/
VM_WARN_ONCE(pte_write(*ptep) && !pte_dirty(*ptep),
"%s: potential race with hardware DBM", __func__);
pte = READ_ONCE(*ptep);
do {
old_pte = pte;
/*
* If hardware-dirty (PTE_WRITE/DBM bit set and PTE_RDONLY
* clear), set the PTE_DIRTY bit.
*/
if (pte_hw_dirty(pte))
pte = pte_mkdirty(pte);
pte = pte_wrprotect(pte);
pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep),
pte_val(old_pte), pte_val(pte));

View File

@ -37,6 +37,7 @@ ENTRY(__cpu_soft_restart)
mrs x12, sctlr_el1
ldr x13, =SCTLR_ELx_FLAGS
bic x12, x12, x13
pre_disable_mmu_workaround
msr sctlr_el1, x12
isb

View File

@ -145,7 +145,8 @@ static const struct arm64_ftr_bits ftr_id_aa64isar1[] = {
};
static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_SVE_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_SVE_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_GIC_SHIFT, 4, 0),
S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_ASIMD_SHIFT, 4, ID_AA64PFR0_ASIMD_NI),
S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_FP_SHIFT, 4, ID_AA64PFR0_FP_NI),

View File

@ -96,6 +96,7 @@ ENTRY(entry)
mrs x0, sctlr_el2
bic x0, x0, #1 << 0 // clear SCTLR.M
bic x0, x0, #1 << 2 // clear SCTLR.C
pre_disable_mmu_workaround
msr sctlr_el2, x0
isb
b 2f
@ -103,6 +104,7 @@ ENTRY(entry)
mrs x0, sctlr_el1
bic x0, x0, #1 << 0 // clear SCTLR.M
bic x0, x0, #1 << 2 // clear SCTLR.C
pre_disable_mmu_workaround
msr sctlr_el1, x0
isb
2:

View File

@ -1043,7 +1043,7 @@ void fpsimd_update_current_state(struct fpsimd_state *state)
local_bh_disable();
current->thread.fpsimd_state = *state;
current->thread.fpsimd_state.user_fpsimd = state->user_fpsimd;
if (system_supports_sve() && test_thread_flag(TIF_SVE))
fpsimd_to_sve(current);

View File

@ -750,6 +750,7 @@ __primary_switch:
* to take into account by discarding the current kernel mapping and
* creating a new one.
*/
pre_disable_mmu_workaround
msr sctlr_el1, x20 // disable the MMU
isb
bl __create_page_tables // recreate kernel mapping

View File

@ -28,6 +28,7 @@
#include <linux/perf_event.h>
#include <linux/ptrace.h>
#include <linux/smp.h>
#include <linux/uaccess.h>
#include <asm/compat.h>
#include <asm/current.h>
@ -36,7 +37,6 @@
#include <asm/traps.h>
#include <asm/cputype.h>
#include <asm/system_misc.h>
#include <asm/uaccess.h>
/* Breakpoint currently in use for each BRP. */
static DEFINE_PER_CPU(struct perf_event *, bp_on_reg[ARM_MAX_BRP]);

View File

@ -45,6 +45,7 @@ ENTRY(arm64_relocate_new_kernel)
mrs x0, sctlr_el2
ldr x1, =SCTLR_ELx_FLAGS
bic x0, x0, x1
pre_disable_mmu_workaround
msr sctlr_el2, x0
isb
1:

View File

@ -151,6 +151,7 @@ reset:
mrs x5, sctlr_el2
ldr x6, =SCTLR_ELx_FLAGS
bic x5, x5, x6 // Clear SCTL_M and etc
pre_disable_mmu_workaround
msr sctlr_el2, x5
isb

View File

@ -389,7 +389,7 @@ void ptdump_check_wx(void)
.check_wx = true,
};
walk_pgd(&st, &init_mm, 0);
walk_pgd(&st, &init_mm, VA_START);
note_page(&st, 0, 0, 0);
if (st.wx_pages || st.uxn_pages)
pr_warn("Checked W+X mappings: FAILED, %lu W+X pages found, %lu non-UXN pages found\n",

View File

@ -574,7 +574,6 @@ static int do_sea(unsigned long addr, unsigned int esr, struct pt_regs *regs)
{
struct siginfo info;
const struct fault_info *inf;
int ret = 0;
inf = esr_to_fault_info(esr);
pr_err("Synchronous External Abort: %s (0x%08x) at 0x%016lx\n",
@ -589,7 +588,7 @@ static int do_sea(unsigned long addr, unsigned int esr, struct pt_regs *regs)
if (interrupts_enabled(regs))
nmi_enter();
ret = ghes_notify_sea();
ghes_notify_sea();
if (interrupts_enabled(regs))
nmi_exit();
@ -604,7 +603,7 @@ static int do_sea(unsigned long addr, unsigned int esr, struct pt_regs *regs)
info.si_addr = (void __user *)addr;
arm64_notify_die("", regs, &info, esr);
return ret;
return 0;
}
static const struct fault_info fault_info[] = {

View File

@ -476,6 +476,8 @@ void __init arm64_memblock_init(void)
reserve_elfcorehdr();
high_memory = __va(memblock_end_of_DRAM() - 1) + 1;
dma_contiguous_reserve(arm64_dma_phys_limit);
memblock_allow_resize();
@ -502,7 +504,6 @@ void __init bootmem_init(void)
sparse_init();
zone_sizes_init(min, max);
high_memory = __va((max << PAGE_SHIFT) - 1) + 1;
memblock_dump_all();
}