2019-04-18 00:51:24 -06:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0 */
|
|
|
|
#ifndef _ASM_POWERPC_BOOK3S_64_KUP_RADIX_H
|
|
|
|
#define _ASM_POWERPC_BOOK3S_64_KUP_RADIX_H
|
|
|
|
|
|
|
|
#include <linux/const.h>
|
|
|
|
|
|
|
|
#define AMR_KUAP_BLOCK_READ UL(0x4000000000000000)
|
|
|
|
#define AMR_KUAP_BLOCK_WRITE UL(0x8000000000000000)
|
|
|
|
#define AMR_KUAP_BLOCKED (AMR_KUAP_BLOCK_READ | AMR_KUAP_BLOCK_WRITE)
|
|
|
|
#define AMR_KUAP_SHIFT 62
|
|
|
|
|
|
|
|
#ifdef __ASSEMBLY__
|
|
|
|
|
|
|
|
.macro kuap_restore_amr gpr
|
|
|
|
#ifdef CONFIG_PPC_KUAP
|
|
|
|
BEGIN_MMU_FTR_SECTION_NESTED(67)
|
|
|
|
ld \gpr, STACK_REGS_KUAP(r1)
|
|
|
|
mtspr SPRN_AMR, \gpr
|
|
|
|
END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_RADIX_KUAP, 67)
|
|
|
|
#endif
|
|
|
|
.endm
|
|
|
|
|
|
|
|
.macro kuap_check_amr gpr1, gpr2
|
|
|
|
#ifdef CONFIG_PPC_KUAP_DEBUG
|
|
|
|
BEGIN_MMU_FTR_SECTION_NESTED(67)
|
|
|
|
mfspr \gpr1, SPRN_AMR
|
|
|
|
li \gpr2, (AMR_KUAP_BLOCKED >> AMR_KUAP_SHIFT)
|
|
|
|
sldi \gpr2, \gpr2, AMR_KUAP_SHIFT
|
|
|
|
999: tdne \gpr1, \gpr2
|
|
|
|
EMIT_BUG_ENTRY 999b, __FILE__, __LINE__, (BUGFLAG_WARNING | BUGFLAG_ONCE)
|
|
|
|
END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_RADIX_KUAP, 67)
|
|
|
|
#endif
|
|
|
|
.endm
|
|
|
|
|
|
|
|
.macro kuap_save_amr_and_lock gpr1, gpr2, use_cr, msr_pr_cr
|
|
|
|
#ifdef CONFIG_PPC_KUAP
|
|
|
|
BEGIN_MMU_FTR_SECTION_NESTED(67)
|
|
|
|
.ifnb \msr_pr_cr
|
|
|
|
bne \msr_pr_cr, 99f
|
|
|
|
.endif
|
|
|
|
mfspr \gpr1, SPRN_AMR
|
|
|
|
std \gpr1, STACK_REGS_KUAP(r1)
|
|
|
|
li \gpr2, (AMR_KUAP_BLOCKED >> AMR_KUAP_SHIFT)
|
|
|
|
sldi \gpr2, \gpr2, AMR_KUAP_SHIFT
|
|
|
|
cmpd \use_cr, \gpr1, \gpr2
|
|
|
|
beq \use_cr, 99f
|
|
|
|
// We don't isync here because we very recently entered via rfid
|
|
|
|
mtspr SPRN_AMR, \gpr2
|
|
|
|
isync
|
|
|
|
99:
|
|
|
|
END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_RADIX_KUAP, 67)
|
|
|
|
#endif
|
|
|
|
.endm
|
|
|
|
|
|
|
|
#else /* !__ASSEMBLY__ */
|
|
|
|
|
|
|
|
#ifdef CONFIG_PPC_KUAP
|
|
|
|
|
|
|
|
#include <asm/reg.h>
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We support individually allowing read or write, but we don't support nesting
|
|
|
|
* because that would require an expensive read/modify write of the AMR.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static inline void set_kuap(unsigned long value)
|
|
|
|
{
|
|
|
|
if (!mmu_has_feature(MMU_FTR_RADIX_KUAP))
|
|
|
|
return;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* ISA v3.0B says we need a CSI (Context Synchronising Instruction) both
|
|
|
|
* before and after the move to AMR. See table 6 on page 1134.
|
|
|
|
*/
|
|
|
|
isync();
|
|
|
|
mtspr(SPRN_AMR, value);
|
|
|
|
isync();
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void allow_user_access(void __user *to, const void __user *from,
|
|
|
|
unsigned long size)
|
|
|
|
{
|
|
|
|
// This is written so we can resolve to a single case at build time
|
|
|
|
if (__builtin_constant_p(to) && to == NULL)
|
|
|
|
set_kuap(AMR_KUAP_BLOCK_WRITE);
|
|
|
|
else if (__builtin_constant_p(from) && from == NULL)
|
|
|
|
set_kuap(AMR_KUAP_BLOCK_READ);
|
|
|
|
else
|
|
|
|
set_kuap(0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void prevent_user_access(void __user *to, const void __user *from,
|
|
|
|
unsigned long size)
|
|
|
|
{
|
|
|
|
set_kuap(AMR_KUAP_BLOCKED);
|
|
|
|
}
|
|
|
|
|
powerpc/mm: Detect bad KUAP faults
When KUAP is enabled we have logic to detect page faults that occur
outside of a valid user access region and are blocked by the AMR.
What we don't have at the moment is logic to detect a fault *within* a
valid user access region, that has been incorrectly blocked by AMR.
This is not meant to ever happen, but it can if we incorrectly
save/restore the AMR, or if the AMR was overwritten for some other
reason.
Currently if that happens we assume it's just a regular fault that
will be corrected by handling the fault normally, so we just return.
But there is nothing the fault handling code can do to fix it, so the
fault just happens again and we spin forever, leading to soft lockups.
So add some logic to detect that case and WARN() if we ever see it.
Arguably it should be a BUG(), but it's more polite to fail the access
and let the kernel continue, rather than taking down the box. There
should be no data integrity issue with failing the fault rather than
BUG'ing, as we're just going to disallow an access that should have
been allowed.
To make the code a little easier to follow, unroll the condition at
the end of bad_kernel_fault() and comment each case, before adding the
call to bad_kuap_fault().
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2019-04-18 00:51:25 -06:00
|
|
|
static inline bool bad_kuap_fault(struct pt_regs *regs, bool is_write)
|
|
|
|
{
|
|
|
|
return WARN(mmu_has_feature(MMU_FTR_RADIX_KUAP) &&
|
|
|
|
(regs->kuap & (is_write ? AMR_KUAP_BLOCK_WRITE : AMR_KUAP_BLOCK_READ)),
|
|
|
|
"Bug: %s fault blocked by AMR!", is_write ? "Write" : "Read");
|
|
|
|
}
|
2019-04-18 00:51:24 -06:00
|
|
|
#endif /* CONFIG_PPC_KUAP */
|
|
|
|
|
|
|
|
#endif /* __ASSEMBLY__ */
|
|
|
|
|
|
|
|
#endif /* _ASM_POWERPC_BOOK3S_64_KUP_RADIX_H */
|