1
0
Fork 0

MIPS: KVM: Generalise fpu_inuse for other state

Rename fpu_inuse and the related definitions to aux_inuse so it can be
used for lazy context management of other auxiliary processor state too,
such as VZ guest timer, watchpoints and performance counters.

Signed-off-by: James Hogan <james.hogan@imgtec.com>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Radim Krčmář <rkrcmar@redhat.com>
Cc: linux-mips@linux-mips.org
Cc: kvm@vger.kernel.org
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
hifive-unleashed-5.1
James Hogan 2016-06-14 09:40:10 +01:00 committed by Paolo Bonzini
parent 35fec26242
commit f943176a72
3 changed files with 27 additions and 27 deletions

View File

@ -323,8 +323,8 @@ struct kvm_mips_tlb {
long tlb_lo[2];
};
#define KVM_MIPS_FPU_FPU 0x1
#define KVM_MIPS_FPU_MSA 0x2
#define KVM_MIPS_AUX_FPU 0x1
#define KVM_MIPS_AUX_MSA 0x2
#define KVM_MIPS_GUEST_TLB_SIZE 64
struct kvm_vcpu_arch {
@ -346,8 +346,8 @@ struct kvm_vcpu_arch {
/* FPU State */
struct mips_fpu_struct fpu;
/* Which FPU state is loaded (KVM_MIPS_FPU_*) */
unsigned int fpu_inuse;
/* Which auxiliary state is loaded (KVM_MIPS_AUX_*) */
unsigned int aux_inuse;
/* COP0 State */
struct mips_coproc *cop0;

View File

@ -1154,7 +1154,7 @@ enum emulation_result kvm_mips_emulate_CP0(u32 inst, u32 *opc, u32 cause,
* it first.
*/
if (change & ST0_CU1 && !(val & ST0_FR) &&
vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA)
vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA)
kvm_lose_fpu(vcpu);
/*
@ -1165,7 +1165,7 @@ enum emulation_result kvm_mips_emulate_CP0(u32 inst, u32 *opc, u32 cause,
* the near future.
*/
if (change & ST0_CU1 &&
vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU)
vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)
change_c0_status(ST0_CU1, val);
preempt_enable();
@ -1200,7 +1200,7 @@ enum emulation_result kvm_mips_emulate_CP0(u32 inst, u32 *opc, u32 cause,
* context is already loaded.
*/
if (change & MIPS_CONF5_FRE &&
vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU)
vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)
change_c0_config5(MIPS_CONF5_FRE, val);
/*
@ -1210,7 +1210,7 @@ enum emulation_result kvm_mips_emulate_CP0(u32 inst, u32 *opc, u32 cause,
* quickly enabled again in the near future.
*/
if (change & MIPS_CONF5_MSAEN &&
vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA)
vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA)
change_c0_config5(MIPS_CONF5_MSAEN,
val);

View File

@ -1447,7 +1447,7 @@ void kvm_own_fpu(struct kvm_vcpu *vcpu)
* not to clobber the status register directly via the commpage.
*/
if (cpu_has_msa && sr & ST0_CU1 && !(sr & ST0_FR) &&
vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA)
vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA)
kvm_lose_fpu(vcpu);
/*
@ -1462,9 +1462,9 @@ void kvm_own_fpu(struct kvm_vcpu *vcpu)
enable_fpu_hazard();
/* If guest FPU state not active, restore it now */
if (!(vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU)) {
if (!(vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)) {
__kvm_restore_fpu(&vcpu->arch);
vcpu->arch.fpu_inuse |= KVM_MIPS_FPU_FPU;
vcpu->arch.aux_inuse |= KVM_MIPS_AUX_FPU;
}
preempt_enable();
@ -1491,8 +1491,8 @@ void kvm_own_msa(struct kvm_vcpu *vcpu)
* interacts with MSA state, so play it safe and save it first.
*/
if (!(sr & ST0_FR) &&
(vcpu->arch.fpu_inuse & (KVM_MIPS_FPU_FPU |
KVM_MIPS_FPU_MSA)) == KVM_MIPS_FPU_FPU)
(vcpu->arch.aux_inuse & (KVM_MIPS_AUX_FPU |
KVM_MIPS_AUX_MSA)) == KVM_MIPS_AUX_FPU)
kvm_lose_fpu(vcpu);
change_c0_status(ST0_CU1 | ST0_FR, sr);
@ -1506,20 +1506,20 @@ void kvm_own_msa(struct kvm_vcpu *vcpu)
set_c0_config5(MIPS_CONF5_MSAEN);
enable_fpu_hazard();
switch (vcpu->arch.fpu_inuse & (KVM_MIPS_FPU_FPU | KVM_MIPS_FPU_MSA)) {
case KVM_MIPS_FPU_FPU:
switch (vcpu->arch.aux_inuse & (KVM_MIPS_AUX_FPU | KVM_MIPS_AUX_MSA)) {
case KVM_MIPS_AUX_FPU:
/*
* Guest FPU state already loaded, only restore upper MSA state
*/
__kvm_restore_msa_upper(&vcpu->arch);
vcpu->arch.fpu_inuse |= KVM_MIPS_FPU_MSA;
vcpu->arch.aux_inuse |= KVM_MIPS_AUX_MSA;
break;
case 0:
/* Neither FPU or MSA already active, restore full MSA state */
__kvm_restore_msa(&vcpu->arch);
vcpu->arch.fpu_inuse |= KVM_MIPS_FPU_MSA;
vcpu->arch.aux_inuse |= KVM_MIPS_AUX_MSA;
if (kvm_mips_guest_has_fpu(&vcpu->arch))
vcpu->arch.fpu_inuse |= KVM_MIPS_FPU_FPU;
vcpu->arch.aux_inuse |= KVM_MIPS_AUX_FPU;
break;
default:
break;
@ -1533,13 +1533,13 @@ void kvm_own_msa(struct kvm_vcpu *vcpu)
void kvm_drop_fpu(struct kvm_vcpu *vcpu)
{
preempt_disable();
if (cpu_has_msa && vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA) {
if (cpu_has_msa && vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) {
disable_msa();
vcpu->arch.fpu_inuse &= ~KVM_MIPS_FPU_MSA;
vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_MSA;
}
if (vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU) {
if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) {
clear_c0_status(ST0_CU1 | ST0_FR);
vcpu->arch.fpu_inuse &= ~KVM_MIPS_FPU_FPU;
vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_FPU;
}
preempt_enable();
}
@ -1555,7 +1555,7 @@ void kvm_lose_fpu(struct kvm_vcpu *vcpu)
*/
preempt_disable();
if (cpu_has_msa && vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA) {
if (cpu_has_msa && vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) {
set_c0_config5(MIPS_CONF5_MSAEN);
enable_fpu_hazard();
@ -1563,17 +1563,17 @@ void kvm_lose_fpu(struct kvm_vcpu *vcpu)
/* Disable MSA & FPU */
disable_msa();
if (vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU) {
if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) {
clear_c0_status(ST0_CU1 | ST0_FR);
disable_fpu_hazard();
}
vcpu->arch.fpu_inuse &= ~(KVM_MIPS_FPU_FPU | KVM_MIPS_FPU_MSA);
} else if (vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU) {
vcpu->arch.aux_inuse &= ~(KVM_MIPS_AUX_FPU | KVM_MIPS_AUX_MSA);
} else if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) {
set_c0_status(ST0_CU1);
enable_fpu_hazard();
__kvm_save_fpu(&vcpu->arch);
vcpu->arch.fpu_inuse &= ~KVM_MIPS_FPU_FPU;
vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_FPU;
/* Disable FPU */
clear_c0_status(ST0_CU1 | ST0_FR);