1
0
Fork 0

arm64 fixes for -rc5

- Fix possible memory hotplug failure with KASLR
 
 - Fix FFR value in SVE kselftest
 
 - Fix backtraces reported in /proc/$pid/stack
 
 - Disable broken CnP implementation on NVIDIA Carmel
 
 - Typo fixes and ACPI documentation clarification
 
 - Fix some W=1 warnings
 -----BEGIN PGP SIGNATURE-----
 
 iQFEBAABCgAuFiEEPxTL6PPUbjXGY88ct6xw3ITBYzQFAmBccr0QHHdpbGxAa2Vy
 bmVsLm9yZwAKCRC3rHDchMFjNG6UCACDbz3BO/y40wRhWwMhvDhyFDqtlTlVEQlb
 hxnJzksXOlbqHB1J7yamzXxS1UlCBlhvjrFNTe1s5LJIfB0niMskYLe2p0dJ/voi
 WyysvaiK7/1bZV/RRdF7r+hFtMPHBEAKfgs+ZxFN9mnMcserV8PWqiD5ookCqavE
 xatE/fEgVujiISl/BOkP1pnmWnPM4f9BIMS5DgaZJsNDYtxeu9a3RGnfu9vNHaP2
 gxq5+E3BjZfh1z0++HP6nTuDbdDaxEz12gyoZ+4wejXVhwj1g7NySJNa8RmJG9pU
 gX+jE6HOgeCFIEe9Gx+I2QtAaFia96HVnAAHagGBHB1vfV7GTRxN
 =tzbO
 -----END PGP SIGNATURE-----

Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux

Pull arm64 fixes from Will Deacon:
 "Minor fixes all over, ranging from typos to tests to errata
  workarounds:

   - Fix possible memory hotplug failure with KASLR

   - Fix FFR value in SVE kselftest

   - Fix backtraces reported in /proc/$pid/stack

   - Disable broken CnP implementation on NVIDIA Carmel

   - Typo fixes and ACPI documentation clarification

   - Fix some W=1 warnings"

* tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
  arm64: kernel: disable CNP on Carmel
  arm64/process.c: fix Wmissing-prototypes build warnings
  kselftest/arm64: sve: Do not use non-canonical FFR register value
  arm64: mm: correct the inside linear map range during hotplug check
  arm64: kdump: update ppos when reading elfcorehdr
  arm64: cpuinfo: Fix a typo
  Documentation: arm64/acpi : clarify arm64 support of IBFT
  arm64: stacktrace: don't trace arch_stack_walk()
  arm64: csum: cast to the proper type
rM2-mainline
Linus Torvalds 2021-03-25 11:07:40 -07:00
commit 43f0b56259
15 changed files with 83 additions and 20 deletions

View File

@ -17,12 +17,12 @@ For ACPI on arm64, tables also fall into the following categories:
- Recommended: BERT, EINJ, ERST, HEST, PCCT, SSDT
- Optional: BGRT, CPEP, CSRT, DBG2, DRTM, ECDT, FACS, FPDT, IORT,
MCHI, MPST, MSCT, NFIT, PMTT, RASF, SBST, SLIT, SPMI, SRAT, STAO,
TCPA, TPM2, UEFI, XENV
- Optional: BGRT, CPEP, CSRT, DBG2, DRTM, ECDT, FACS, FPDT, IBFT,
IORT, MCHI, MPST, MSCT, NFIT, PMTT, RASF, SBST, SLIT, SPMI, SRAT,
STAO, TCPA, TPM2, UEFI, XENV
- Not supported: BOOT, DBGP, DMAR, ETDT, HPET, IBFT, IVRS, LPIT,
MSDM, OEMx, PSDT, RSDT, SLIC, WAET, WDAT, WDRT, WPBT
- Not supported: BOOT, DBGP, DMAR, ETDT, HPET, IVRS, LPIT, MSDM, OEMx,
PSDT, RSDT, SLIC, WAET, WDAT, WDRT, WPBT
====== ========================================================================
Table Usage for ARMv8 Linux

View File

@ -130,6 +130,9 @@ stable kernels.
| Marvell | ARM-MMU-500 | #582743 | N/A |
+----------------+-----------------+-----------------+-----------------------------+
+----------------+-----------------+-----------------+-----------------------------+
| NVIDIA | Carmel Core | N/A | NVIDIA_CARMEL_CNP_ERRATUM |
+----------------+-----------------+-----------------+-----------------------------+
+----------------+-----------------+-----------------+-----------------------------+
| Freescale/NXP | LS2080A/LS1043A | A-008585 | FSL_ERRATUM_A008585 |
+----------------+-----------------+-----------------+-----------------------------+
+----------------+-----------------+-----------------+-----------------------------+

View File

@ -810,6 +810,16 @@ config QCOM_FALKOR_ERRATUM_E1041
If unsure, say Y.
config NVIDIA_CARMEL_CNP_ERRATUM
bool "NVIDIA Carmel CNP: CNP on Carmel semantically different than ARM cores"
default y
help
If CNP is enabled on Carmel cores, non-sharable TLBIs on a core will not
invalidate shared TLB entries installed by a different core, as it would
on standard ARM cores.
If unsure, say Y.
config SOCIONEXT_SYNQUACER_PREITS
bool "Socionext Synquacer: Workaround for GICv3 pre-ITS"
default y

View File

@ -37,7 +37,7 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
} while (--n > 0);
sum += ((sum >> 32) | (sum << 32));
return csum_fold((__force u32)(sum >> 32));
return csum_fold((__force __wsum)(sum >> 32));
}
#define ip_fast_csum ip_fast_csum

View File

@ -66,7 +66,8 @@
#define ARM64_WORKAROUND_1508412 58
#define ARM64_HAS_LDAPR 59
#define ARM64_KVM_PROTECTED_MODE 60
#define ARM64_WORKAROUND_NVIDIA_CARMEL_CNP 61
#define ARM64_NCAPS 61
#define ARM64_NCAPS 62
#endif /* __ASM_CPUCAPS_H */

View File

@ -251,6 +251,8 @@ unsigned long get_wchan(struct task_struct *p);
extern struct task_struct *cpu_switch_to(struct task_struct *prev,
struct task_struct *next);
asmlinkage void arm64_preempt_schedule_irq(void);
#define task_pt_regs(p) \
((struct pt_regs *)(THREAD_SIZE + task_stack_page(p)) - 1)

View File

@ -55,6 +55,8 @@ void arch_setup_new_exec(void);
#define arch_setup_new_exec arch_setup_new_exec
void arch_release_task_struct(struct task_struct *tsk);
int arch_dup_task_struct(struct task_struct *dst,
struct task_struct *src);
#endif

View File

@ -525,6 +525,14 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
0, 0,
1, 0),
},
#endif
#ifdef CONFIG_NVIDIA_CARMEL_CNP_ERRATUM
{
/* NVIDIA Carmel */
.desc = "NVIDIA Carmel CNP erratum",
.capability = ARM64_WORKAROUND_NVIDIA_CARMEL_CNP,
ERRATA_MIDR_ALL_VERSIONS(MIDR_NVIDIA_CARMEL),
},
#endif
{
}

View File

@ -1321,7 +1321,10 @@ has_useable_cnp(const struct arm64_cpu_capabilities *entry, int scope)
* may share TLB entries with a CPU stuck in the crashed
* kernel.
*/
if (is_kdump_kernel())
if (is_kdump_kernel())
return false;
if (cpus_have_const_cap(ARM64_WORKAROUND_NVIDIA_CARMEL_CNP))
return false;
return has_cpuid_feature(entry, scope);

View File

@ -353,7 +353,7 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
* with the CLIDR_EL1 fields to avoid triggering false warnings
* when there is a mismatch across the CPUs. Keep track of the
* effective value of the CTR_EL0 in our internal records for
* acurate sanity check and feature enablement.
* accurate sanity check and feature enablement.
*/
info->reg_ctr = read_cpuid_effective_cachetype();
info->reg_dczid = read_cpuid(DCZID_EL0);

View File

@ -64,5 +64,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
ssize_t elfcorehdr_read(char *buf, size_t count, u64 *ppos)
{
memcpy(buf, phys_to_virt((phys_addr_t)*ppos), count);
*ppos += count;
return count;
}

View File

@ -57,6 +57,8 @@
#include <asm/processor.h>
#include <asm/pointer_auth.h>
#include <asm/stacktrace.h>
#include <asm/switch_to.h>
#include <asm/system_misc.h>
#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK)
#include <linux/stackprotector.h>

View File

@ -194,8 +194,9 @@ void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
#ifdef CONFIG_STACKTRACE
void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
struct task_struct *task, struct pt_regs *regs)
noinline void arch_stack_walk(stack_trace_consume_fn consume_entry,
void *cookie, struct task_struct *task,
struct pt_regs *regs)
{
struct stackframe frame;
@ -203,8 +204,8 @@ void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
start_backtrace(&frame, regs->regs[29], regs->pc);
else if (task == current)
start_backtrace(&frame,
(unsigned long)__builtin_frame_address(0),
(unsigned long)arch_stack_walk);
(unsigned long)__builtin_frame_address(1),
(unsigned long)__builtin_return_address(0));
else
start_backtrace(&frame, thread_saved_fp(task),
thread_saved_pc(task));

View File

@ -1448,6 +1448,22 @@ static void __remove_pgd_mapping(pgd_t *pgdir, unsigned long start, u64 size)
struct range arch_get_mappable_range(void)
{
struct range mhp_range;
u64 start_linear_pa = __pa(_PAGE_OFFSET(vabits_actual));
u64 end_linear_pa = __pa(PAGE_END - 1);
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
/*
* Check for a wrap, it is possible because of randomized linear
* mapping the start physical address is actually bigger than
* the end physical address. In this case set start to zero
* because [0, end_linear_pa] range must still be able to cover
* all addressable physical addresses.
*/
if (start_linear_pa > end_linear_pa)
start_linear_pa = 0;
}
WARN_ON(start_linear_pa > end_linear_pa);
/*
* Linear mapping region is the range [PAGE_OFFSET..(PAGE_END - 1)]
@ -1455,8 +1471,9 @@ struct range arch_get_mappable_range(void)
* range which can be mapped inside this linear mapping range, must
* also be derived from its end points.
*/
mhp_range.start = __pa(_PAGE_OFFSET(vabits_actual));
mhp_range.end = __pa(PAGE_END - 1);
mhp_range.start = start_linear_pa;
mhp_range.end = end_linear_pa;
return mhp_range;
}

View File

@ -284,16 +284,28 @@ endfunction
// Set up test pattern in the FFR
// x0: pid
// x2: generation
//
// We need to generate a canonical FFR value, which consists of a number of
// low "1" bits, followed by a number of zeros. This gives us 17 unique values
// per 16 bits of FFR, so we create a 4 bit signature out of the PID and
// generation, and use that as the initial number of ones in the pattern.
// We fill the upper lanes of FFR with zeros.
// Beware: corrupts P0.
function setup_ffr
mov x4, x30
bl pattern
and w0, w0, #0x3
bfi w0, w2, #2, #2
mov w1, #1
lsl w1, w1, w0
sub w1, w1, #1
ldr x0, =ffrref
ldr x1, =scratch
rdvl x2, #1
lsr x2, x2, #3
bl memcpy
strh w1, [x0], 2
rdvl x1, #1
lsr x1, x1, #3
sub x1, x1, #2
bl memclr
mov x0, #0
ldr x1, =ffrref