2019-05-29 08:12:40 -06:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-only */
|
2013-01-20 16:28:06 -07:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
|
|
|
|
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef __ARM_KVM_EMULATE_H__
|
|
|
|
#define __ARM_KVM_EMULATE_H__
|
|
|
|
|
|
|
|
#include <linux/kvm_host.h>
|
|
|
|
#include <asm/kvm_asm.h>
|
2013-01-20 16:43:58 -07:00
|
|
|
#include <asm/kvm_mmio.h>
|
2012-09-17 12:27:09 -06:00
|
|
|
#include <asm/kvm_arm.h>
|
2014-06-02 07:37:13 -06:00
|
|
|
#include <asm/cputype.h>
|
2013-01-20 16:28:06 -07:00
|
|
|
|
2017-10-28 20:18:09 -06:00
|
|
|
/* arm64 compatibility macros */
|
KVM: arm/arm64: Correct CPSR on exception entry
commit 3c2483f15499b877ccb53250d88addb8c91da147 upstream.
When KVM injects an exception into a guest, it generates the CPSR value
from scratch, configuring CPSR.{M,A,I,T,E}, and setting all other
bits to zero.
This isn't correct, as the architecture specifies that some CPSR bits
are (conditionally) cleared or set upon an exception, and others are
unchanged from the original context.
This patch adds logic to match the architectural behaviour. To make this
simple to follow/audit/extend, documentation references are provided,
and bits are configured in order of their layout in SPSR_EL2. This
layout can be seen in the diagram on ARM DDI 0487E.a page C5-426.
Note that this code is used by both arm and arm64, and is intended to
fuction with the SPSR_EL2 and SPSR_HYP layouts.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Reviewed-by: Alexandru Elisei <alexandru.elisei@arm.com>
Cc: stable@vger.kernel.org
Link: https://lore.kernel.org/r/20200108134324.46500-3-mark.rutland@arm.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2020-01-08 06:43:23 -07:00
|
|
|
#define PSR_AA32_MODE_FIQ FIQ_MODE
|
|
|
|
#define PSR_AA32_MODE_SVC SVC_MODE
|
2018-07-05 08:16:53 -06:00
|
|
|
#define PSR_AA32_MODE_ABT ABT_MODE
|
|
|
|
#define PSR_AA32_MODE_UND UND_MODE
|
|
|
|
#define PSR_AA32_T_BIT PSR_T_BIT
|
KVM: arm/arm64: Correct CPSR on exception entry
commit 3c2483f15499b877ccb53250d88addb8c91da147 upstream.
When KVM injects an exception into a guest, it generates the CPSR value
from scratch, configuring CPSR.{M,A,I,T,E}, and setting all other
bits to zero.
This isn't correct, as the architecture specifies that some CPSR bits
are (conditionally) cleared or set upon an exception, and others are
unchanged from the original context.
This patch adds logic to match the architectural behaviour. To make this
simple to follow/audit/extend, documentation references are provided,
and bits are configured in order of their layout in SPSR_EL2. This
layout can be seen in the diagram on ARM DDI 0487E.a page C5-426.
Note that this code is used by both arm and arm64, and is intended to
fuction with the SPSR_EL2 and SPSR_HYP layouts.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Reviewed-by: Alexandru Elisei <alexandru.elisei@arm.com>
Cc: stable@vger.kernel.org
Link: https://lore.kernel.org/r/20200108134324.46500-3-mark.rutland@arm.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2020-01-08 06:43:23 -07:00
|
|
|
#define PSR_AA32_F_BIT PSR_F_BIT
|
2018-07-05 08:16:53 -06:00
|
|
|
#define PSR_AA32_I_BIT PSR_I_BIT
|
|
|
|
#define PSR_AA32_A_BIT PSR_A_BIT
|
|
|
|
#define PSR_AA32_E_BIT PSR_E_BIT
|
|
|
|
#define PSR_AA32_IT_MASK PSR_IT_MASK
|
KVM: arm/arm64: Correct CPSR on exception entry
commit 3c2483f15499b877ccb53250d88addb8c91da147 upstream.
When KVM injects an exception into a guest, it generates the CPSR value
from scratch, configuring CPSR.{M,A,I,T,E}, and setting all other
bits to zero.
This isn't correct, as the architecture specifies that some CPSR bits
are (conditionally) cleared or set upon an exception, and others are
unchanged from the original context.
This patch adds logic to match the architectural behaviour. To make this
simple to follow/audit/extend, documentation references are provided,
and bits are configured in order of their layout in SPSR_EL2. This
layout can be seen in the diagram on ARM DDI 0487E.a page C5-426.
Note that this code is used by both arm and arm64, and is intended to
fuction with the SPSR_EL2 and SPSR_HYP layouts.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Reviewed-by: Alexandru Elisei <alexandru.elisei@arm.com>
Cc: stable@vger.kernel.org
Link: https://lore.kernel.org/r/20200108134324.46500-3-mark.rutland@arm.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2020-01-08 06:43:23 -07:00
|
|
|
#define PSR_AA32_GE_MASK 0x000f0000
|
|
|
|
#define PSR_AA32_DIT_BIT 0x00200000
|
|
|
|
#define PSR_AA32_PAN_BIT 0x00400000
|
|
|
|
#define PSR_AA32_SSBS_BIT 0x00800000
|
|
|
|
#define PSR_AA32_Q_BIT PSR_Q_BIT
|
|
|
|
#define PSR_AA32_V_BIT PSR_V_BIT
|
|
|
|
#define PSR_AA32_C_BIT PSR_C_BIT
|
|
|
|
#define PSR_AA32_Z_BIT PSR_Z_BIT
|
|
|
|
#define PSR_AA32_N_BIT PSR_N_BIT
|
2017-10-28 20:18:09 -06:00
|
|
|
|
2012-10-03 04:17:02 -06:00
|
|
|
unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num);
|
2017-10-28 20:18:09 -06:00
|
|
|
|
|
|
|
static inline unsigned long *vcpu_reg32(struct kvm_vcpu *vcpu, u8 reg_num)
|
|
|
|
{
|
|
|
|
return vcpu_reg(vcpu, reg_num);
|
|
|
|
}
|
|
|
|
|
2017-12-27 12:01:52 -07:00
|
|
|
unsigned long *__vcpu_spsr(struct kvm_vcpu *vcpu);
|
|
|
|
|
|
|
|
static inline unsigned long vpcu_read_spsr(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
return *__vcpu_spsr(vcpu);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void vcpu_write_spsr(struct kvm_vcpu *vcpu, unsigned long v)
|
|
|
|
{
|
|
|
|
*__vcpu_spsr(vcpu) = v;
|
|
|
|
}
|
2013-01-20 16:28:06 -07:00
|
|
|
|
KVM: arm/arm64: Correct AArch32 SPSR on exception entry
commit 1cfbb484de158e378e8971ac40f3082e53ecca55 upstream.
Confusingly, there are three SPSR layouts that a kernel may need to deal
with:
(1) An AArch64 SPSR_ELx view of an AArch64 pstate
(2) An AArch64 SPSR_ELx view of an AArch32 pstate
(3) An AArch32 SPSR_* view of an AArch32 pstate
When the KVM AArch32 support code deals with SPSR_{EL2,HYP}, it's either
dealing with #2 or #3 consistently. On arm64 the PSR_AA32_* definitions
match the AArch64 SPSR_ELx view, and on arm the PSR_AA32_* definitions
match the AArch32 SPSR_* view.
However, when we inject an exception into an AArch32 guest, we have to
synthesize the AArch32 SPSR_* that the guest will see. Thus, an AArch64
host needs to synthesize layout #3 from layout #2.
This patch adds a new host_spsr_to_spsr32() helper for this, and makes
use of it in the KVM AArch32 support code. For arm64 we need to shuffle
the DIT bit around, and remove the SS bit, while for arm we can use the
value as-is.
I've open-coded the bit manipulation for now to avoid having to rework
the existing PSR_* definitions into PSR64_AA32_* and PSR32_AA32_*
definitions. I hope to perform a more thorough refactoring in future so
that we can handle pstate view manipulation more consistently across the
kernel tree.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Reviewed-by: Alexandru Elisei <alexandru.elisei@arm.com>
Cc: stable@vger.kernel.org
Link: https://lore.kernel.org/r/20200108134324.46500-4-mark.rutland@arm.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2020-01-08 06:43:24 -07:00
|
|
|
static inline unsigned long host_spsr_to_spsr32(unsigned long spsr)
|
|
|
|
{
|
|
|
|
return spsr;
|
|
|
|
}
|
|
|
|
|
2015-12-04 05:03:11 -07:00
|
|
|
static inline unsigned long vcpu_get_reg(struct kvm_vcpu *vcpu,
|
|
|
|
u8 reg_num)
|
|
|
|
{
|
|
|
|
return *vcpu_reg(vcpu, reg_num);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
|
|
|
|
unsigned long val)
|
|
|
|
{
|
|
|
|
*vcpu_reg(vcpu, reg_num) = val;
|
|
|
|
}
|
|
|
|
|
2016-09-06 02:28:43 -06:00
|
|
|
bool kvm_condition_valid32(const struct kvm_vcpu *vcpu);
|
|
|
|
void kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr);
|
2017-10-28 20:18:09 -06:00
|
|
|
void kvm_inject_undef32(struct kvm_vcpu *vcpu);
|
|
|
|
void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr);
|
|
|
|
void kvm_inject_pabt32(struct kvm_vcpu *vcpu, unsigned long addr);
|
2016-09-06 07:02:09 -06:00
|
|
|
void kvm_inject_vabt(struct kvm_vcpu *vcpu);
|
2017-10-28 20:18:09 -06:00
|
|
|
|
|
|
|
static inline void kvm_inject_undefined(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
kvm_inject_undef32(vcpu);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr)
|
|
|
|
{
|
|
|
|
kvm_inject_dabt32(vcpu, addr);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr)
|
|
|
|
{
|
|
|
|
kvm_inject_pabt32(vcpu, addr);
|
|
|
|
}
|
2013-01-20 16:28:09 -07:00
|
|
|
|
2016-09-06 02:28:43 -06:00
|
|
|
static inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
return kvm_condition_valid32(vcpu);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr)
|
|
|
|
{
|
|
|
|
kvm_skip_instr32(vcpu, is_wide_instr);
|
|
|
|
}
|
|
|
|
|
2014-10-16 09:21:16 -06:00
|
|
|
static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
vcpu->arch.hcr = HCR_GUEST_MASK;
|
|
|
|
}
|
|
|
|
|
2017-08-03 04:09:05 -06:00
|
|
|
static inline unsigned long *vcpu_hcr(const struct kvm_vcpu *vcpu)
|
2014-12-19 09:05:31 -07:00
|
|
|
{
|
2017-08-03 04:09:05 -06:00
|
|
|
return (unsigned long *)&vcpu->arch.hcr;
|
2014-12-19 09:05:31 -07:00
|
|
|
}
|
|
|
|
|
2018-06-21 03:43:59 -06:00
|
|
|
static inline void vcpu_clear_wfe_traps(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
vcpu->arch.hcr &= ~HCR_TWE;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void vcpu_set_wfe_traps(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
vcpu->arch.hcr |= HCR_TWE;
|
|
|
|
}
|
|
|
|
|
2016-09-06 02:28:43 -06:00
|
|
|
static inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu)
|
2013-01-20 16:28:13 -07:00
|
|
|
{
|
2018-08-07 18:04:40 -06:00
|
|
|
return true;
|
2013-01-20 16:28:13 -07:00
|
|
|
}
|
|
|
|
|
2012-10-03 04:17:02 -06:00
|
|
|
static inline unsigned long *vcpu_pc(struct kvm_vcpu *vcpu)
|
2013-01-20 16:28:06 -07:00
|
|
|
{
|
2016-01-03 04:26:01 -07:00
|
|
|
return &vcpu->arch.ctxt.gp_regs.usr_regs.ARM_pc;
|
2013-01-20 16:28:06 -07:00
|
|
|
}
|
|
|
|
|
2016-09-06 02:28:43 -06:00
|
|
|
static inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu)
|
2013-01-20 16:28:06 -07:00
|
|
|
{
|
2016-09-06 02:28:43 -06:00
|
|
|
return (unsigned long *)&vcpu->arch.ctxt.gp_regs.usr_regs.ARM_cpsr;
|
2013-01-20 16:28:06 -07:00
|
|
|
}
|
|
|
|
|
2013-01-20 16:28:13 -07:00
|
|
|
static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
*vcpu_cpsr(vcpu) |= PSR_T_BIT;
|
|
|
|
}
|
|
|
|
|
2013-01-20 16:28:06 -07:00
|
|
|
static inline bool mode_has_spsr(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
2016-01-03 04:26:01 -07:00
|
|
|
unsigned long cpsr_mode = vcpu->arch.ctxt.gp_regs.usr_regs.ARM_cpsr & MODE_MASK;
|
2013-01-20 16:28:06 -07:00
|
|
|
return (cpsr_mode > USR_MODE && cpsr_mode < SYSTEM_MODE);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool vcpu_mode_priv(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
2016-01-03 04:26:01 -07:00
|
|
|
unsigned long cpsr_mode = vcpu->arch.ctxt.gp_regs.usr_regs.ARM_cpsr & MODE_MASK;
|
2018-01-23 08:11:14 -07:00
|
|
|
return cpsr_mode > USR_MODE;
|
2013-01-20 16:28:06 -07:00
|
|
|
}
|
|
|
|
|
2016-09-06 02:28:43 -06:00
|
|
|
static inline u32 kvm_vcpu_get_hsr(const struct kvm_vcpu *vcpu)
|
2012-09-17 12:27:09 -06:00
|
|
|
{
|
|
|
|
return vcpu->arch.fault.hsr;
|
|
|
|
}
|
|
|
|
|
2016-09-06 02:28:43 -06:00
|
|
|
static inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
u32 hsr = kvm_vcpu_get_hsr(vcpu);
|
|
|
|
|
|
|
|
if (hsr & HSR_CV)
|
|
|
|
return (hsr & HSR_COND) >> HSR_COND_SHIFT;
|
|
|
|
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2012-09-17 12:27:09 -06:00
|
|
|
static inline unsigned long kvm_vcpu_get_hfar(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
return vcpu->arch.fault.hxfar;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline phys_addr_t kvm_vcpu_get_fault_ipa(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
return ((phys_addr_t)vcpu->arch.fault.hpfar & HPFAR_MASK) << 8;
|
|
|
|
}
|
|
|
|
|
2012-09-18 04:06:23 -06:00
|
|
|
static inline bool kvm_vcpu_dabt_isvalid(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
return kvm_vcpu_get_hsr(vcpu) & HSR_ISV;
|
|
|
|
}
|
|
|
|
|
2012-09-18 04:12:26 -06:00
|
|
|
static inline bool kvm_vcpu_dabt_iswrite(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
return kvm_vcpu_get_hsr(vcpu) & HSR_WNR;
|
|
|
|
}
|
|
|
|
|
2012-09-18 04:23:02 -06:00
|
|
|
static inline bool kvm_vcpu_dabt_issext(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
return kvm_vcpu_get_hsr(vcpu) & HSR_SSE;
|
|
|
|
}
|
|
|
|
|
2019-12-12 12:50:55 -07:00
|
|
|
static inline bool kvm_vcpu_dabt_issf(const struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2012-09-18 04:28:57 -06:00
|
|
|
static inline int kvm_vcpu_dabt_get_rd(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
return (kvm_vcpu_get_hsr(vcpu) & HSR_SRT_MASK) >> HSR_SRT_SHIFT;
|
|
|
|
}
|
|
|
|
|
2012-09-18 04:37:28 -06:00
|
|
|
static inline bool kvm_vcpu_dabt_iss1tw(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
return kvm_vcpu_get_hsr(vcpu) & HSR_DABT_S1PTW;
|
|
|
|
}
|
|
|
|
|
2016-01-29 08:01:28 -07:00
|
|
|
static inline bool kvm_vcpu_dabt_is_cm(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
return !!(kvm_vcpu_get_hsr(vcpu) & HSR_DABT_CM);
|
|
|
|
}
|
|
|
|
|
2012-09-18 04:43:30 -06:00
|
|
|
/* Get Access Size from a data abort */
|
|
|
|
static inline int kvm_vcpu_dabt_get_as(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
switch ((kvm_vcpu_get_hsr(vcpu) >> 22) & 0x3) {
|
|
|
|
case 0:
|
|
|
|
return 1;
|
|
|
|
case 1:
|
|
|
|
return 2;
|
|
|
|
case 2:
|
|
|
|
return 4;
|
|
|
|
default:
|
|
|
|
kvm_err("Hardware is weird: SAS 0b11 is reserved\n");
|
|
|
|
return -EFAULT;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-09-18 05:07:06 -06:00
|
|
|
/* This one is not specific to Data Abort */
|
|
|
|
static inline bool kvm_vcpu_trap_il_is32bit(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
return kvm_vcpu_get_hsr(vcpu) & HSR_IL;
|
|
|
|
}
|
|
|
|
|
2012-09-18 07:09:58 -06:00
|
|
|
static inline u8 kvm_vcpu_trap_get_class(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
return kvm_vcpu_get_hsr(vcpu) >> HSR_EC_SHIFT;
|
|
|
|
}
|
|
|
|
|
2012-10-15 03:33:38 -06:00
|
|
|
static inline bool kvm_vcpu_trap_is_iabt(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
return kvm_vcpu_trap_get_class(vcpu) == HSR_EC_IABT;
|
|
|
|
}
|
|
|
|
|
2012-09-18 07:14:35 -06:00
|
|
|
static inline u8 kvm_vcpu_trap_get_fault(struct kvm_vcpu *vcpu)
|
2014-09-26 04:29:34 -06:00
|
|
|
{
|
|
|
|
return kvm_vcpu_get_hsr(vcpu) & HSR_FSC;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline u8 kvm_vcpu_trap_get_fault_type(struct kvm_vcpu *vcpu)
|
2012-09-18 07:14:35 -06:00
|
|
|
{
|
|
|
|
return kvm_vcpu_get_hsr(vcpu) & HSR_FSC_TYPE;
|
|
|
|
}
|
|
|
|
|
2017-07-18 06:37:41 -06:00
|
|
|
static inline bool kvm_vcpu_dabt_isextabt(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
2017-10-30 00:05:18 -06:00
|
|
|
switch (kvm_vcpu_trap_get_fault(vcpu)) {
|
2017-07-18 06:37:41 -06:00
|
|
|
case FSC_SEA:
|
|
|
|
case FSC_SEA_TTW0:
|
|
|
|
case FSC_SEA_TTW1:
|
|
|
|
case FSC_SEA_TTW2:
|
|
|
|
case FSC_SEA_TTW3:
|
|
|
|
case FSC_SECC:
|
|
|
|
case FSC_SECC_TTW0:
|
|
|
|
case FSC_SECC_TTW1:
|
|
|
|
case FSC_SECC_TTW2:
|
|
|
|
case FSC_SECC_TTW3:
|
|
|
|
return true;
|
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-01 14:29:58 -06:00
|
|
|
static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
if (kvm_vcpu_trap_is_iabt(vcpu))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return kvm_vcpu_dabt_iswrite(vcpu);
|
|
|
|
}
|
|
|
|
|
2013-02-21 12:26:10 -07:00
|
|
|
static inline u32 kvm_vcpu_hvc_get_imm(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
return kvm_vcpu_get_hsr(vcpu) & HSR_HVC_IMM_MASK;
|
|
|
|
}
|
|
|
|
|
2014-06-02 07:37:13 -06:00
|
|
|
static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
|
2013-10-18 11:19:03 -06:00
|
|
|
{
|
2016-01-03 04:26:01 -07:00
|
|
|
return vcpu_cp15(vcpu, c0_MPIDR) & MPIDR_HWID_BITMASK;
|
2013-10-18 11:19:03 -06:00
|
|
|
}
|
|
|
|
|
2019-05-03 08:27:49 -06:00
|
|
|
static inline bool kvm_arm_get_vcpu_workaround_2_flag(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void kvm_arm_set_vcpu_workaround_2_flag(struct kvm_vcpu *vcpu,
|
|
|
|
bool flag)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2013-11-05 07:12:15 -07:00
|
|
|
static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
*vcpu_cpsr(vcpu) |= PSR_E_BIT;
|
|
|
|
}
|
|
|
|
|
2013-02-12 05:40:22 -07:00
|
|
|
static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
return !!(*vcpu_cpsr(vcpu) & PSR_E_BIT);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu,
|
|
|
|
unsigned long data,
|
|
|
|
unsigned int len)
|
|
|
|
{
|
|
|
|
if (kvm_vcpu_is_be(vcpu)) {
|
|
|
|
switch (len) {
|
|
|
|
case 1:
|
|
|
|
return data & 0xff;
|
|
|
|
case 2:
|
|
|
|
return be16_to_cpu(data & 0xffff);
|
|
|
|
default:
|
|
|
|
return be32_to_cpu(data);
|
|
|
|
}
|
2014-06-12 10:30:05 -06:00
|
|
|
} else {
|
|
|
|
switch (len) {
|
|
|
|
case 1:
|
|
|
|
return data & 0xff;
|
|
|
|
case 2:
|
|
|
|
return le16_to_cpu(data & 0xffff);
|
|
|
|
default:
|
|
|
|
return le32_to_cpu(data);
|
|
|
|
}
|
2013-02-12 05:40:22 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu,
|
|
|
|
unsigned long data,
|
|
|
|
unsigned int len)
|
|
|
|
{
|
|
|
|
if (kvm_vcpu_is_be(vcpu)) {
|
|
|
|
switch (len) {
|
|
|
|
case 1:
|
|
|
|
return data & 0xff;
|
|
|
|
case 2:
|
|
|
|
return cpu_to_be16(data & 0xffff);
|
|
|
|
default:
|
|
|
|
return cpu_to_be32(data);
|
|
|
|
}
|
2014-06-12 10:30:05 -06:00
|
|
|
} else {
|
|
|
|
switch (len) {
|
|
|
|
case 1:
|
|
|
|
return data & 0xff;
|
|
|
|
case 2:
|
|
|
|
return cpu_to_le16(data & 0xffff);
|
|
|
|
default:
|
|
|
|
return cpu_to_le32(data);
|
|
|
|
}
|
2013-02-12 05:40:22 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
KVM: arm/arm64: Context-switch ptrauth registers
When pointer authentication is supported, a guest may wish to use it.
This patch adds the necessary KVM infrastructure for this to work, with
a semi-lazy context switch of the pointer auth state.
Pointer authentication feature is only enabled when VHE is built
in the kernel and present in the CPU implementation so only VHE code
paths are modified.
When we schedule a vcpu, we disable guest usage of pointer
authentication instructions and accesses to the keys. While these are
disabled, we avoid context-switching the keys. When we trap the guest
trying to use pointer authentication functionality, we change to eagerly
context-switching the keys, and enable the feature. The next time the
vcpu is scheduled out/in, we start again. However the host key save is
optimized and implemented inside ptrauth instruction/register access
trap.
Pointer authentication consists of address authentication and generic
authentication, and CPUs in a system might have varied support for
either. Where support for either feature is not uniform, it is hidden
from guests via ID register emulation, as a result of the cpufeature
framework in the host.
Unfortunately, address authentication and generic authentication cannot
be trapped separately, as the architecture provides a single EL2 trap
covering both. If we wish to expose one without the other, we cannot
prevent a (badly-written) guest from intermittently using a feature
which is not uniformly supported (when scheduled on a physical CPU which
supports the relevant feature). Hence, this patch expects both type of
authentication to be present in a cpu.
This switch of key is done from guest enter/exit assembly as preparation
for the upcoming in-kernel pointer authentication support. Hence, these
key switching routines are not implemented in C code as they may cause
pointer authentication key signing error in some situations.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
[Only VHE, key switch in full assembly, vcpu_has_ptrauth checks
, save host key in ptrauth exception trap]
Signed-off-by: Amit Daniel Kachhap <amit.kachhap@arm.com>
Reviewed-by: Julien Thierry <julien.thierry@arm.com>
Cc: Christoffer Dall <christoffer.dall@arm.com>
Cc: kvmarm@lists.cs.columbia.edu
[maz: various fixups]
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
2019-04-22 22:42:35 -06:00
|
|
|
static inline void vcpu_ptrauth_setup_lazy(struct kvm_vcpu *vcpu) {}
|
|
|
|
|
2013-01-20 16:28:06 -07:00
|
|
|
#endif /* __ARM_KVM_EMULATE_H__ */
|