1
0
Fork 0

powerpc/book3s64/pkeys: Don't update SPRN_AMR when in kernel mode.

Now that kernel correctly store/restore userspace AMR/IAMR values, avoid
manipulating AMR and IAMR from the kernel on behalf of userspace.

Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Reviewed-by: Sandipan Das <sandipan@linux.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20201127044424.40686-15-aneesh.kumar@linux.ibm.com
zero-sugar-mainline-defconfig
Aneesh Kumar K.V 2020-11-27 10:14:16 +05:30 committed by Michael Ellerman
parent edc541ecaa
commit 48a8ab4eeb
5 changed files with 31 additions and 61 deletions

View File

@ -177,6 +177,27 @@ DECLARE_STATIC_KEY_FALSE(uaccess_flush_key);
#include <asm/mmu.h>
#include <asm/ptrace.h>
/*
* For kernel thread that doesn't have thread.regs return
* default AMR/IAMR values.
*/
static inline u64 current_thread_amr(void)
{
if (current->thread.regs)
return current->thread.regs->amr;
return AMR_KUAP_BLOCKED;
}
static inline u64 current_thread_iamr(void)
{
if (current->thread.regs)
return current->thread.regs->iamr;
return AMR_KUEP_BLOCKED;
}
#endif /* CONFIG_PPC_PKEY */
#ifdef CONFIG_PPC_KUAP
static inline void kuap_user_restore(struct pt_regs *regs)
{
if (!mmu_has_feature(MMU_FTR_PKEY))

View File

@ -226,10 +226,6 @@ struct thread_struct {
struct thread_vr_state ckvr_state; /* Checkpointed VR state */
unsigned long ckvrsave; /* Checkpointed VRSAVE */
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
#ifdef CONFIG_PPC_MEM_KEYS
unsigned long amr;
unsigned long iamr;
#endif
#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
void* kvm_shadow_vcpu; /* KVM internal data */
#endif /* CONFIG_KVM_BOOK3S_32_HANDLER */

View File

@ -589,7 +589,6 @@ static void save_all(struct task_struct *tsk)
__giveup_spe(tsk);
msr_check_and_clear(msr_all_available);
thread_pkey_regs_save(&tsk->thread);
}
void flush_all_to_thread(struct task_struct *tsk)
@ -1160,8 +1159,6 @@ static inline void save_sprs(struct thread_struct *t)
t->tar = mfspr(SPRN_TAR);
}
#endif
thread_pkey_regs_save(t);
}
static inline void restore_sprs(struct thread_struct *old_thread,
@ -1202,7 +1199,6 @@ static inline void restore_sprs(struct thread_struct *old_thread,
mtspr(SPRN_TIDR, new_thread->tidr);
#endif
thread_pkey_regs_restore(new_thread, old_thread);
}
struct task_struct *__switch_to(struct task_struct *prev,

View File

@ -347,12 +347,6 @@ static bool exception_common(int signr, struct pt_regs *regs, int code,
current->thread.trap_nr = code;
/*
* Save all the pkey registers AMR/IAMR/UAMOR. Eg: Core dumps need
* to capture the content, if the task gets killed.
*/
thread_pkey_regs_save(&current->thread);
return true;
}

View File

@ -281,30 +281,17 @@ void __init setup_kuap(bool disabled)
}
#endif
static inline u64 read_amr(void)
static inline void update_current_thread_amr(u64 value)
{
return mfspr(SPRN_AMR);
current->thread.regs->amr = value;
}
static inline void write_amr(u64 value)
{
mtspr(SPRN_AMR, value);
}
static inline u64 read_iamr(void)
{
if (!likely(pkey_execute_disable_supported))
return 0x0UL;
return mfspr(SPRN_IAMR);
}
static inline void write_iamr(u64 value)
static inline void update_current_thread_iamr(u64 value)
{
if (!likely(pkey_execute_disable_supported))
return;
mtspr(SPRN_IAMR, value);
current->thread.regs->iamr = value;
}
#ifdef CONFIG_PPC_MEM_KEYS
@ -319,17 +306,17 @@ void pkey_mm_init(struct mm_struct *mm)
static inline void init_amr(int pkey, u8 init_bits)
{
u64 new_amr_bits = (((u64)init_bits & 0x3UL) << pkeyshift(pkey));
u64 old_amr = read_amr() & ~((u64)(0x3ul) << pkeyshift(pkey));
u64 old_amr = current_thread_amr() & ~((u64)(0x3ul) << pkeyshift(pkey));
write_amr(old_amr | new_amr_bits);
update_current_thread_amr(old_amr | new_amr_bits);
}
static inline void init_iamr(int pkey, u8 init_bits)
{
u64 new_iamr_bits = (((u64)init_bits & 0x1UL) << pkeyshift(pkey));
u64 old_iamr = read_iamr() & ~((u64)(0x1ul) << pkeyshift(pkey));
u64 old_iamr = current_thread_iamr() & ~((u64)(0x1ul) << pkeyshift(pkey));
write_iamr(old_iamr | new_iamr_bits);
update_current_thread_iamr(old_iamr | new_iamr_bits);
}
/*
@ -372,30 +359,6 @@ int __arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
return 0;
}
void thread_pkey_regs_save(struct thread_struct *thread)
{
if (!mmu_has_feature(MMU_FTR_PKEY))
return;
/*
* TODO: Skip saving registers if @thread hasn't used any keys yet.
*/
thread->amr = read_amr();
thread->iamr = read_iamr();
}
void thread_pkey_regs_restore(struct thread_struct *new_thread,
struct thread_struct *old_thread)
{
if (!mmu_has_feature(MMU_FTR_PKEY))
return;
if (old_thread->amr != new_thread->amr)
write_amr(new_thread->amr);
if (old_thread->iamr != new_thread->iamr)
write_iamr(new_thread->iamr);
}
int execute_only_pkey(struct mm_struct *mm)
{
return mm->context.execute_only_pkey;
@ -444,9 +407,9 @@ static bool pkey_access_permitted(int pkey, bool write, bool execute)
pkey_shift = pkeyshift(pkey);
if (execute)
return !(read_iamr() & (IAMR_EX_BIT << pkey_shift));
return !(current_thread_iamr() & (IAMR_EX_BIT << pkey_shift));
amr = read_amr();
amr = current_thread_amr();
if (write)
return !(amr & (AMR_WR_BIT << pkey_shift));