From fa59cc003804f7606b3d0aa8e16509f9103f7377 Mon Sep 17 00:00:00 2001 From: Liran Alon Date: Sun, 24 Dec 2017 18:12:53 +0200 Subject: [PATCH] KVM: x86: Optimization: Create SVM stubs for sync_pir_to_irr() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit sync_pir_to_irr() is only called if vcpu->arch.apicv_active()==true. In case it is false, VMX code make sure to set sync_pir_to_irr to NULL. Therefore, having SVM stubs allows to remove check for if sync_pir_to_irr != NULL from all calling sites. Signed-off-by: Liran Alon Suggested-by: Paolo Bonzini Reviewed-by: Nikita Leshenko Reviewed-by: Liam Merwick [Return highest IRR in the SVM case. - Paolo] Signed-off-by: Paolo Bonzini Signed-off-by: Radim Krčmář --- arch/x86/kvm/lapic.c | 2 +- arch/x86/kvm/svm.c | 1 + arch/x86/kvm/x86.c | 10 ++++------ 3 files changed, 6 insertions(+), 7 deletions(-) diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index e2c1fb8d35ce..0928608750e3 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -581,7 +581,7 @@ static void pv_eoi_clr_pending(struct kvm_vcpu *vcpu) static int apic_has_interrupt_for_ppr(struct kvm_lapic *apic, u32 ppr) { int highest_irr; - if (kvm_x86_ops->sync_pir_to_irr && apic->vcpu->arch.apicv_active) + if (apic->vcpu->arch.apicv_active) highest_irr = kvm_x86_ops->sync_pir_to_irr(apic->vcpu); else highest_irr = apic_find_highest_irr(apic); diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 5d83f0474020..b613d331d031 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -6740,6 +6740,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = { .load_eoi_exitmap = svm_load_eoi_exitmap, .hwapic_irr_update = svm_hwapic_irr_update, .hwapic_isr_update = svm_hwapic_isr_update, + .sync_pir_to_irr = kvm_lapic_find_highest_irr, .apicv_post_state_restore = avic_post_state_restore, .set_tss_addr = svm_set_tss_addr, diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 6d2e1459adc9..0204b2b8a293 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -2973,7 +2973,7 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s) { - if (kvm_x86_ops->sync_pir_to_irr && vcpu->arch.apicv_active) + if (vcpu->arch.apicv_active) kvm_x86_ops->sync_pir_to_irr(vcpu); return kvm_apic_get_state(vcpu, s); @@ -6820,7 +6820,7 @@ static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu) if (irqchip_split(vcpu->kvm)) kvm_scan_ioapic_routes(vcpu, vcpu->arch.ioapic_handled_vectors); else { - if (kvm_x86_ops->sync_pir_to_irr && vcpu->arch.apicv_active) + if (vcpu->arch.apicv_active) kvm_x86_ops->sync_pir_to_irr(vcpu); kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors); } @@ -7046,10 +7046,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) * This handles the case where a posted interrupt was * notified with kvm_vcpu_kick. */ - if (kvm_lapic_enabled(vcpu)) { - if (kvm_x86_ops->sync_pir_to_irr && vcpu->arch.apicv_active) - kvm_x86_ops->sync_pir_to_irr(vcpu); - } + if (kvm_lapic_enabled(vcpu) && vcpu->arch.apicv_active) + kvm_x86_ops->sync_pir_to_irr(vcpu); if (vcpu->mode == EXITING_GUEST_MODE || kvm_request_pending(vcpu) || need_resched() || signal_pending(current)) {