mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson
synced 2025-09-02 16:44:59 +00:00
KVM: VMX: Handle event vectoring error in check_emulate_instruction()
Move handling of emulation during event vectoring, which KVM doesn't support, into VMX's check_emulate_instruction(), so that KVM detects all unsupported emulation, not just cached emulated MMIO (EPT misconfig). E.g. on emulated MMIO that isn't cached (EPT Violation) or occurs with legacy shadow paging (#PF). Rejecting emulation on other sources of emulation also fixes a largely theoretical flaw (thanks to the "unprotect and retry" logic), where KVM could incorrectly inject a #DF: 1. CPU executes an instruction and hits a #GP 2. While vectoring the #GP, a shadow #PF occurs 3. On the #PF VM-Exit, KVM re-injects #GP 4. KVM emulates because of the write-protected page 5. KVM "successfully" emulates and also detects the #GP 6. KVM synthesizes a #GP, and since #GP has already been injected, incorrectly escalates to a #DF. Fix the comment about EMULTYPE_PF as this flag doesn't necessarily mean MMIO anymore: it can also be set due to the write protection violation. Note, handle_ept_misconfig() checks vmx_check_emulate_instruction() before attempting emulation of any kind. Suggested-by: Sean Christopherson <seanjc@google.com> Signed-off-by: Ivan Orlov <iorlov@amazon.com> Link: https://lore.kernel.org/r/20241217181458.68690-5-iorlov@amazon.com [sean: massage changelog] Signed-off-by: Sean Christopherson <seanjc@google.com>
This commit is contained in:
parent
704fc6021b
commit
47ef3ef843
@ -2032,8 +2032,8 @@ u64 vcpu_tsc_khz(struct kvm_vcpu *vcpu);
|
|||||||
* VMware backdoor emulation handles select instructions
|
* VMware backdoor emulation handles select instructions
|
||||||
* and reinjects the #GP for all other cases.
|
* and reinjects the #GP for all other cases.
|
||||||
*
|
*
|
||||||
* EMULTYPE_PF - Set when emulating MMIO by way of an intercepted #PF, in which
|
* EMULTYPE_PF - Set when an intercepted #PF triggers the emulation, in which case
|
||||||
* case the CR2/GPA value pass on the stack is valid.
|
* the CR2/GPA value pass on the stack is valid.
|
||||||
*
|
*
|
||||||
* EMULTYPE_COMPLETE_USER_EXIT - Set when the emulator should update interruptibility
|
* EMULTYPE_COMPLETE_USER_EXIT - Set when the emulator should update interruptibility
|
||||||
* state and inject single-step #DBs after skipping
|
* state and inject single-step #DBs after skipping
|
||||||
@ -2068,6 +2068,11 @@ u64 vcpu_tsc_khz(struct kvm_vcpu *vcpu);
|
|||||||
#define EMULTYPE_COMPLETE_USER_EXIT (1 << 7)
|
#define EMULTYPE_COMPLETE_USER_EXIT (1 << 7)
|
||||||
#define EMULTYPE_WRITE_PF_TO_SP (1 << 8)
|
#define EMULTYPE_WRITE_PF_TO_SP (1 << 8)
|
||||||
|
|
||||||
|
static inline bool kvm_can_emulate_event_vectoring(int emul_type)
|
||||||
|
{
|
||||||
|
return !(emul_type & EMULTYPE_PF);
|
||||||
|
}
|
||||||
|
|
||||||
int kvm_emulate_instruction(struct kvm_vcpu *vcpu, int emulation_type);
|
int kvm_emulate_instruction(struct kvm_vcpu *vcpu, int emulation_type);
|
||||||
int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu,
|
int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu,
|
||||||
void *insn, int insn_len);
|
void *insn, int insn_len);
|
||||||
|
@ -1705,6 +1705,12 @@ int vmx_check_emulate_instruction(struct kvm_vcpu *vcpu, int emul_type,
|
|||||||
kvm_queue_exception(vcpu, UD_VECTOR);
|
kvm_queue_exception(vcpu, UD_VECTOR);
|
||||||
return X86EMUL_PROPAGATE_FAULT;
|
return X86EMUL_PROPAGATE_FAULT;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Check that emulation is possible during event vectoring */
|
||||||
|
if ((to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) &&
|
||||||
|
!kvm_can_emulate_event_vectoring(emul_type))
|
||||||
|
return X86EMUL_UNHANDLEABLE_VECTORING;
|
||||||
|
|
||||||
return X86EMUL_CONTINUE;
|
return X86EMUL_CONTINUE;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -6540,26 +6546,15 @@ static int __vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Note:
|
|
||||||
* Do not try to fix EXIT_REASON_EPT_MISCONFIG if it caused by
|
|
||||||
* delivery event since it indicates guest is accessing MMIO.
|
|
||||||
* The vm-exit can be triggered again after return to guest that
|
|
||||||
* will cause infinite loop.
|
|
||||||
*/
|
|
||||||
if ((vectoring_info & VECTORING_INFO_VALID_MASK) &&
|
if ((vectoring_info & VECTORING_INFO_VALID_MASK) &&
|
||||||
(exit_reason.basic != EXIT_REASON_EXCEPTION_NMI &&
|
(exit_reason.basic != EXIT_REASON_EXCEPTION_NMI &&
|
||||||
exit_reason.basic != EXIT_REASON_EPT_VIOLATION &&
|
exit_reason.basic != EXIT_REASON_EPT_VIOLATION &&
|
||||||
exit_reason.basic != EXIT_REASON_PML_FULL &&
|
exit_reason.basic != EXIT_REASON_PML_FULL &&
|
||||||
exit_reason.basic != EXIT_REASON_APIC_ACCESS &&
|
exit_reason.basic != EXIT_REASON_APIC_ACCESS &&
|
||||||
exit_reason.basic != EXIT_REASON_TASK_SWITCH &&
|
exit_reason.basic != EXIT_REASON_TASK_SWITCH &&
|
||||||
exit_reason.basic != EXIT_REASON_NOTIFY)) {
|
exit_reason.basic != EXIT_REASON_NOTIFY &&
|
||||||
gpa_t gpa = INVALID_GPA;
|
exit_reason.basic != EXIT_REASON_EPT_MISCONFIG)) {
|
||||||
|
kvm_prepare_event_vectoring_exit(vcpu, INVALID_GPA);
|
||||||
if (exit_reason.basic == EXIT_REASON_EPT_MISCONFIG)
|
|
||||||
gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
|
|
||||||
|
|
||||||
kvm_prepare_event_vectoring_exit(vcpu, gpa);
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user