mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson
synced 2025-08-30 21:52:21 +00:00

Perf and PMU updates: - Add support for new (v3) Hisilicon SLLC and DDRC PMUs - Add support for Arm-NI PMU integrations that share interrupts between clock domains within a given instance - Allow SPE to be configured with a lower sample period than the minimum recommendation advertised by PMSIDR_EL1.Interval - Add suppport for Arm's "Branch Record Buffer Extension" (BRBE) - Adjust the perf watchdog period according to cpu frequency changes - Minor driver fixes and cleanups Hardware features: - Support for MTE store-only checking (FEAT_MTE_STORE_ONLY) - Support for reporting the non-address bits during a synchronous MTE tag check fault (FEAT_MTE_TAGGED_FAR) - Optimise the TLBI when folding/unfolding contiguous PTEs on hardware with FEAT_BBM (break-before-make) level 2 and no TLB conflict aborts Software features: - Enable HAVE_LIVEPATCH after implementing arch_stack_walk_reliable() and using the text-poke API for late module relocations - Force VMAP_STACK always on and change arm64_efi_rt_init() to use arch_alloc_vmap_stack() in order to avoid KASAN false positives ACPI: - Improve SPCR handling and messaging on systems lacking an SPCR table Debug: - Simplify the debug exception entry path - Drop redundant DBG_MDSCR_* macros Kselftests: - Cleanups and improvements for SME, SVE and FPSIMD tests Miscellaneous: - Optimise loop to reduce redundant operations in contpte_ptep_get() - Remove ISB when resetting POR_EL0 during signal handling - Mark the kernel as tainted on SEA and SError panic - Remove redundant gcs_free() call -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEE5RElWfyWxS+3PLO2a9axLQDIXvEFAmiDkgoACgkQa9axLQDI XvFucQ//bYugRP5/Sdlrq5eDKWBGi1HufYzwfDEBLc4S75Eu8mGL/tuThfu9yFn+ qCowtt4U84HdWsZDTSVo6lym6v2vJUpGOMgXzepvJaFBRnqGv9X9NxH6RQO1LTnu Pm7rO+7I9tNpfuc7Zu9pHDggsJEw+WzVfmEF6WPSFlT9mUNv6NbSx4rbLQKU86Dm ouTqXaePEQZ5oiRXVasxyT0otGtiACD20WpgOtNjYGzsfUVwCf/C83V/2DLwwbhr 9cW9lCtFxA/yFdQcA9ThRzWZ9Eo5LAHqjGIq00+zOjuzgDbBtcTT79gpChkhovIR FBIsWHd9j9i3nYxzf4V4eRKQnyqS3NQWv7g7uKFwNgARif1Zk0VJ77QIlAYk5xLI ENTRjLKz5WNGGnhdkeCvDlVyxX+OktgcVTp3vqRxAKCRahMMUqBrwxiM8RzVF37e yzkEQayL8F7uZqy9H7Sjn48UpHZux6frJ1bBQw1oEvR9QmAoAdqavPMSAYIOT3Zr ze4WIljq/cFr3kBPIFP5pK1e0qYMHXZpSKIm8MAv6y/7KmQuVbMjZthpuPbLSIw0 Q7C0KalB8lToPIbO7qMni/he0dCN4K2+E1YHFTR+pzfcoLuW4rjSg7i8tqMLKMJ8 H+SeGLyPtM5A6bdAPTTpqefcgUUe7064ENUqrGUpDEynGXA7boE= =5h1C -----END PGP SIGNATURE----- Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux Pull arm64 updates from Catalin Marinas: "A quick summary: perf support for Branch Record Buffer Extensions (BRBE), typical PMU hardware updates, small additions to MTE for store-only tag checking and exposing non-address bits to signal handlers, HAVE_LIVEPATCH enabled on arm64, VMAP_STACK forced on. There is also a TLBI optimisation on hardware that does not require break-before-make when changing the user PTEs between contiguous and non-contiguous. More details: Perf and PMU updates: - Add support for new (v3) Hisilicon SLLC and DDRC PMUs - Add support for Arm-NI PMU integrations that share interrupts between clock domains within a given instance - Allow SPE to be configured with a lower sample period than the minimum recommendation advertised by PMSIDR_EL1.Interval - Add suppport for Arm's "Branch Record Buffer Extension" (BRBE) - Adjust the perf watchdog period according to cpu frequency changes - Minor driver fixes and cleanups Hardware features: - Support for MTE store-only checking (FEAT_MTE_STORE_ONLY) - Support for reporting the non-address bits during a synchronous MTE tag check fault (FEAT_MTE_TAGGED_FAR) - Optimise the TLBI when folding/unfolding contiguous PTEs on hardware with FEAT_BBM (break-before-make) level 2 and no TLB conflict aborts Software features: - Enable HAVE_LIVEPATCH after implementing arch_stack_walk_reliable() and using the text-poke API for late module relocations - Force VMAP_STACK always on and change arm64_efi_rt_init() to use arch_alloc_vmap_stack() in order to avoid KASAN false positives ACPI: - Improve SPCR handling and messaging on systems lacking an SPCR table Debug: - Simplify the debug exception entry path - Drop redundant DBG_MDSCR_* macros Kselftests: - Cleanups and improvements for SME, SVE and FPSIMD tests Miscellaneous: - Optimise loop to reduce redundant operations in contpte_ptep_get() - Remove ISB when resetting POR_EL0 during signal handling - Mark the kernel as tainted on SEA and SError panic - Remove redundant gcs_free() call" * tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: (93 commits) arm64/gcs: task_gcs_el0_enable() should use passed task arm64: Kconfig: Keep selects somewhat alphabetically ordered arm64: signal: Remove ISB when resetting POR_EL0 kselftest/arm64: Handle attempts to disable SM on SME only systems kselftest/arm64: Fix SVE write data generation for SME only systems kselftest/arm64: Test SME on SME only systems in fp-ptrace kselftest/arm64: Test FPSIMD format data writes via NT_ARM_SVE in fp-ptrace kselftest/arm64: Allow sve-ptrace to run on SME only systems arm64/mm: Drop redundant addr increment in set_huge_pte_at() kselftest/arm4: Provide local defines for AT_HWCAP3 arm64: Mark kernel as tainted on SAE and SError panic arm64/gcs: Don't call gcs_free() when releasing task_struct drivers/perf: hisi: Support PMUs with no interrupt drivers/perf: hisi: Relax the event number check of v2 PMUs drivers/perf: hisi: Add support for HiSilicon SLLC v3 PMU driver drivers/perf: hisi: Use ACPI driver_data to retrieve SLLC PMU information drivers/perf: hisi: Add support for HiSilicon DDRC v3 PMU driver drivers/perf: hisi: Simplify the probe process for each DDRC version perf/arm-ni: Support sharing IRQs within an NI instance perf/arm-ni: Consolidate CPU affinity handling ...
385 lines
11 KiB
C
385 lines
11 KiB
C
// SPDX-License-Identifier: GPL-2.0-only
|
|
/*
|
|
* Copyright (C) 2015 - ARM Ltd
|
|
* Author: Marc Zyngier <marc.zyngier@arm.com>
|
|
*/
|
|
|
|
#include <hyp/switch.h>
|
|
#include <hyp/sysreg-sr.h>
|
|
|
|
#include <linux/arm-smccc.h>
|
|
#include <linux/kvm_host.h>
|
|
#include <linux/types.h>
|
|
#include <linux/jump_label.h>
|
|
#include <uapi/linux/psci.h>
|
|
|
|
#include <kvm/arm_psci.h>
|
|
|
|
#include <asm/barrier.h>
|
|
#include <asm/cpufeature.h>
|
|
#include <asm/kprobes.h>
|
|
#include <asm/kvm_asm.h>
|
|
#include <asm/kvm_emulate.h>
|
|
#include <asm/kvm_hyp.h>
|
|
#include <asm/kvm_mmu.h>
|
|
#include <asm/fpsimd.h>
|
|
#include <asm/debug-monitors.h>
|
|
#include <asm/processor.h>
|
|
|
|
#include <nvhe/mem_protect.h>
|
|
|
|
/* Non-VHE specific context */
|
|
DEFINE_PER_CPU(struct kvm_host_data, kvm_host_data);
|
|
DEFINE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt);
|
|
DEFINE_PER_CPU(unsigned long, kvm_hyp_vector);
|
|
|
|
struct fgt_masks hfgrtr_masks;
|
|
struct fgt_masks hfgwtr_masks;
|
|
struct fgt_masks hfgitr_masks;
|
|
struct fgt_masks hdfgrtr_masks;
|
|
struct fgt_masks hdfgwtr_masks;
|
|
struct fgt_masks hafgrtr_masks;
|
|
struct fgt_masks hfgrtr2_masks;
|
|
struct fgt_masks hfgwtr2_masks;
|
|
struct fgt_masks hfgitr2_masks;
|
|
struct fgt_masks hdfgrtr2_masks;
|
|
struct fgt_masks hdfgwtr2_masks;
|
|
|
|
extern void kvm_nvhe_prepare_backtrace(unsigned long fp, unsigned long pc);
|
|
|
|
static void __activate_traps(struct kvm_vcpu *vcpu)
|
|
{
|
|
___activate_traps(vcpu, vcpu->arch.hcr_el2);
|
|
__activate_traps_common(vcpu);
|
|
__activate_cptr_traps(vcpu);
|
|
|
|
write_sysreg(__this_cpu_read(kvm_hyp_vector), vbar_el2);
|
|
|
|
if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
|
|
struct kvm_cpu_context *ctxt = &vcpu->arch.ctxt;
|
|
|
|
isb();
|
|
/*
|
|
* At this stage, and thanks to the above isb(), S2 is
|
|
* configured and enabled. We can now restore the guest's S1
|
|
* configuration: SCTLR, and only then TCR.
|
|
*/
|
|
write_sysreg_el1(ctxt_sys_reg(ctxt, SCTLR_EL1), SYS_SCTLR);
|
|
isb();
|
|
write_sysreg_el1(ctxt_sys_reg(ctxt, TCR_EL1), SYS_TCR);
|
|
}
|
|
}
|
|
|
|
static void __deactivate_traps(struct kvm_vcpu *vcpu)
|
|
{
|
|
extern char __kvm_hyp_host_vector[];
|
|
|
|
___deactivate_traps(vcpu);
|
|
|
|
if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
|
|
u64 val;
|
|
|
|
/*
|
|
* Set the TCR and SCTLR registers in the exact opposite
|
|
* sequence as __activate_traps (first prevent walks,
|
|
* then force the MMU on). A generous sprinkling of isb()
|
|
* ensure that things happen in this exact order.
|
|
*/
|
|
val = read_sysreg_el1(SYS_TCR);
|
|
write_sysreg_el1(val | TCR_EPD1_MASK | TCR_EPD0_MASK, SYS_TCR);
|
|
isb();
|
|
val = read_sysreg_el1(SYS_SCTLR);
|
|
write_sysreg_el1(val | SCTLR_ELx_M, SYS_SCTLR);
|
|
isb();
|
|
}
|
|
|
|
__deactivate_traps_common(vcpu);
|
|
|
|
write_sysreg_hcr(this_cpu_ptr(&kvm_init_params)->hcr_el2);
|
|
|
|
__deactivate_cptr_traps(vcpu);
|
|
write_sysreg(__kvm_hyp_host_vector, vbar_el2);
|
|
}
|
|
|
|
/* Save VGICv3 state on non-VHE systems */
|
|
static void __hyp_vgic_save_state(struct kvm_vcpu *vcpu)
|
|
{
|
|
if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) {
|
|
__vgic_v3_save_state(&vcpu->arch.vgic_cpu.vgic_v3);
|
|
__vgic_v3_deactivate_traps(&vcpu->arch.vgic_cpu.vgic_v3);
|
|
}
|
|
}
|
|
|
|
/* Restore VGICv3 state on non-VHE systems */
|
|
static void __hyp_vgic_restore_state(struct kvm_vcpu *vcpu)
|
|
{
|
|
if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) {
|
|
__vgic_v3_activate_traps(&vcpu->arch.vgic_cpu.vgic_v3);
|
|
__vgic_v3_restore_state(&vcpu->arch.vgic_cpu.vgic_v3);
|
|
}
|
|
}
|
|
|
|
/*
|
|
* Disable host events, enable guest events
|
|
*/
|
|
#ifdef CONFIG_HW_PERF_EVENTS
|
|
static bool __pmu_switch_to_guest(struct kvm_vcpu *vcpu)
|
|
{
|
|
struct kvm_pmu_events *pmu = &vcpu->arch.pmu.events;
|
|
|
|
if (pmu->events_host)
|
|
write_sysreg(pmu->events_host, pmcntenclr_el0);
|
|
|
|
if (pmu->events_guest)
|
|
write_sysreg(pmu->events_guest, pmcntenset_el0);
|
|
|
|
return (pmu->events_host || pmu->events_guest);
|
|
}
|
|
|
|
/*
|
|
* Disable guest events, enable host events
|
|
*/
|
|
static void __pmu_switch_to_host(struct kvm_vcpu *vcpu)
|
|
{
|
|
struct kvm_pmu_events *pmu = &vcpu->arch.pmu.events;
|
|
|
|
if (pmu->events_guest)
|
|
write_sysreg(pmu->events_guest, pmcntenclr_el0);
|
|
|
|
if (pmu->events_host)
|
|
write_sysreg(pmu->events_host, pmcntenset_el0);
|
|
}
|
|
#else
|
|
#define __pmu_switch_to_guest(v) ({ false; })
|
|
#define __pmu_switch_to_host(v) do {} while (0)
|
|
#endif
|
|
|
|
/*
|
|
* Handler for protected VM MSR, MRS or System instruction execution in AArch64.
|
|
*
|
|
* Returns true if the hypervisor has handled the exit, and control should go
|
|
* back to the guest, or false if it hasn't.
|
|
*/
|
|
static bool kvm_handle_pvm_sys64(struct kvm_vcpu *vcpu, u64 *exit_code)
|
|
{
|
|
/*
|
|
* Make sure we handle the exit for workarounds before the pKVM
|
|
* handling, as the latter could decide to UNDEF.
|
|
*/
|
|
return (kvm_hyp_handle_sysreg(vcpu, exit_code) ||
|
|
kvm_handle_pvm_sysreg(vcpu, exit_code));
|
|
}
|
|
|
|
static const exit_handler_fn hyp_exit_handlers[] = {
|
|
[0 ... ESR_ELx_EC_MAX] = NULL,
|
|
[ESR_ELx_EC_CP15_32] = kvm_hyp_handle_cp15_32,
|
|
[ESR_ELx_EC_SYS64] = kvm_hyp_handle_sysreg,
|
|
[ESR_ELx_EC_SVE] = kvm_hyp_handle_fpsimd,
|
|
[ESR_ELx_EC_FP_ASIMD] = kvm_hyp_handle_fpsimd,
|
|
[ESR_ELx_EC_IABT_LOW] = kvm_hyp_handle_iabt_low,
|
|
[ESR_ELx_EC_DABT_LOW] = kvm_hyp_handle_dabt_low,
|
|
[ESR_ELx_EC_WATCHPT_LOW] = kvm_hyp_handle_watchpt_low,
|
|
[ESR_ELx_EC_MOPS] = kvm_hyp_handle_mops,
|
|
};
|
|
|
|
static const exit_handler_fn pvm_exit_handlers[] = {
|
|
[0 ... ESR_ELx_EC_MAX] = NULL,
|
|
[ESR_ELx_EC_SYS64] = kvm_handle_pvm_sys64,
|
|
[ESR_ELx_EC_SVE] = kvm_handle_pvm_restricted,
|
|
[ESR_ELx_EC_FP_ASIMD] = kvm_hyp_handle_fpsimd,
|
|
[ESR_ELx_EC_IABT_LOW] = kvm_hyp_handle_iabt_low,
|
|
[ESR_ELx_EC_DABT_LOW] = kvm_hyp_handle_dabt_low,
|
|
[ESR_ELx_EC_WATCHPT_LOW] = kvm_hyp_handle_watchpt_low,
|
|
[ESR_ELx_EC_MOPS] = kvm_hyp_handle_mops,
|
|
};
|
|
|
|
static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu)
|
|
{
|
|
if (unlikely(vcpu_is_protected(vcpu)))
|
|
return pvm_exit_handlers;
|
|
|
|
return hyp_exit_handlers;
|
|
}
|
|
|
|
static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
|
|
{
|
|
const exit_handler_fn *handlers = kvm_get_exit_handler_array(vcpu);
|
|
|
|
synchronize_vcpu_pstate(vcpu, exit_code);
|
|
|
|
/*
|
|
* Some guests (e.g., protected VMs) are not be allowed to run in
|
|
* AArch32. The ARMv8 architecture does not give the hypervisor a
|
|
* mechanism to prevent a guest from dropping to AArch32 EL0 if
|
|
* implemented by the CPU. If the hypervisor spots a guest in such a
|
|
* state ensure it is handled, and don't trust the host to spot or fix
|
|
* it. The check below is based on the one in
|
|
* kvm_arch_vcpu_ioctl_run().
|
|
*/
|
|
if (unlikely(vcpu_is_protected(vcpu) && vcpu_mode_is_32bit(vcpu))) {
|
|
/*
|
|
* As we have caught the guest red-handed, decide that it isn't
|
|
* fit for purpose anymore by making the vcpu invalid. The VMM
|
|
* can try and fix it by re-initializing the vcpu with
|
|
* KVM_ARM_VCPU_INIT, however, this is likely not possible for
|
|
* protected VMs.
|
|
*/
|
|
vcpu_clear_flag(vcpu, VCPU_INITIALIZED);
|
|
*exit_code &= BIT(ARM_EXIT_WITH_SERROR_BIT);
|
|
*exit_code |= ARM_EXCEPTION_IL;
|
|
}
|
|
|
|
return __fixup_guest_exit(vcpu, exit_code, handlers);
|
|
}
|
|
|
|
/* Switch to the guest for legacy non-VHE systems */
|
|
int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
|
|
{
|
|
struct kvm_cpu_context *host_ctxt;
|
|
struct kvm_cpu_context *guest_ctxt;
|
|
struct kvm_s2_mmu *mmu;
|
|
bool pmu_switch_needed;
|
|
u64 exit_code;
|
|
|
|
/*
|
|
* Having IRQs masked via PMR when entering the guest means the GIC
|
|
* will not signal the CPU of interrupts of lower priority, and the
|
|
* only way to get out will be via guest exceptions.
|
|
* Naturally, we want to avoid this.
|
|
*/
|
|
if (system_uses_irq_prio_masking()) {
|
|
gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
|
|
pmr_sync();
|
|
}
|
|
|
|
host_ctxt = host_data_ptr(host_ctxt);
|
|
host_ctxt->__hyp_running_vcpu = vcpu;
|
|
guest_ctxt = &vcpu->arch.ctxt;
|
|
|
|
pmu_switch_needed = __pmu_switch_to_guest(vcpu);
|
|
|
|
__sysreg_save_state_nvhe(host_ctxt);
|
|
/*
|
|
* We must flush and disable the SPE buffer for nVHE, as
|
|
* the translation regime(EL1&0) is going to be loaded with
|
|
* that of the guest. And we must do this before we change the
|
|
* translation regime to EL2 (via MDCR_EL2_E2PB == 0) and
|
|
* before we load guest Stage1.
|
|
*/
|
|
__debug_save_host_buffers_nvhe(vcpu);
|
|
|
|
/*
|
|
* We're about to restore some new MMU state. Make sure
|
|
* ongoing page-table walks that have started before we
|
|
* trapped to EL2 have completed. This also synchronises the
|
|
* above disabling of BRBE, SPE and TRBE.
|
|
*
|
|
* See DDI0487I.a D8.1.5 "Out-of-context translation regimes",
|
|
* rule R_LFHQG and subsequent information statements.
|
|
*/
|
|
dsb(nsh);
|
|
|
|
__kvm_adjust_pc(vcpu);
|
|
|
|
/*
|
|
* We must restore the 32-bit state before the sysregs, thanks
|
|
* to erratum #852523 (Cortex-A57) or #853709 (Cortex-A72).
|
|
*
|
|
* Also, and in order to be able to deal with erratum #1319537 (A57)
|
|
* and #1319367 (A72), we must ensure that all VM-related sysreg are
|
|
* restored before we enable S2 translation.
|
|
*/
|
|
__sysreg32_restore_state(vcpu);
|
|
__sysreg_restore_state_nvhe(guest_ctxt);
|
|
|
|
mmu = kern_hyp_va(vcpu->arch.hw_mmu);
|
|
__load_stage2(mmu, kern_hyp_va(mmu->arch));
|
|
__activate_traps(vcpu);
|
|
|
|
__hyp_vgic_restore_state(vcpu);
|
|
__timer_enable_traps(vcpu);
|
|
|
|
__debug_switch_to_guest(vcpu);
|
|
|
|
do {
|
|
/* Jump in the fire! */
|
|
exit_code = __guest_enter(vcpu);
|
|
|
|
/* And we're baaack! */
|
|
} while (fixup_guest_exit(vcpu, &exit_code));
|
|
|
|
__sysreg_save_state_nvhe(guest_ctxt);
|
|
__sysreg32_save_state(vcpu);
|
|
__timer_disable_traps(vcpu);
|
|
__hyp_vgic_save_state(vcpu);
|
|
|
|
/*
|
|
* Same thing as before the guest run: we're about to switch
|
|
* the MMU context, so let's make sure we don't have any
|
|
* ongoing EL1&0 translations.
|
|
*/
|
|
dsb(nsh);
|
|
|
|
__deactivate_traps(vcpu);
|
|
__load_host_stage2();
|
|
|
|
__sysreg_restore_state_nvhe(host_ctxt);
|
|
|
|
if (guest_owns_fp_regs())
|
|
__fpsimd_save_fpexc32(vcpu);
|
|
|
|
__debug_switch_to_host(vcpu);
|
|
/*
|
|
* This must come after restoring the host sysregs, since a non-VHE
|
|
* system may enable SPE here and make use of the TTBRs.
|
|
*/
|
|
__debug_restore_host_buffers_nvhe(vcpu);
|
|
|
|
if (pmu_switch_needed)
|
|
__pmu_switch_to_host(vcpu);
|
|
|
|
/* Returning to host will clear PSR.I, remask PMR if needed */
|
|
if (system_uses_irq_prio_masking())
|
|
gic_write_pmr(GIC_PRIO_IRQOFF);
|
|
|
|
host_ctxt->__hyp_running_vcpu = NULL;
|
|
|
|
return exit_code;
|
|
}
|
|
|
|
asmlinkage void __noreturn hyp_panic(void)
|
|
{
|
|
u64 spsr = read_sysreg_el2(SYS_SPSR);
|
|
u64 elr = read_sysreg_el2(SYS_ELR);
|
|
u64 par = read_sysreg_par();
|
|
struct kvm_cpu_context *host_ctxt;
|
|
struct kvm_vcpu *vcpu;
|
|
|
|
host_ctxt = host_data_ptr(host_ctxt);
|
|
vcpu = host_ctxt->__hyp_running_vcpu;
|
|
|
|
if (vcpu) {
|
|
__timer_disable_traps(vcpu);
|
|
__deactivate_traps(vcpu);
|
|
__load_host_stage2();
|
|
__sysreg_restore_state_nvhe(host_ctxt);
|
|
}
|
|
|
|
/* Prepare to dump kvm nvhe hyp stacktrace */
|
|
kvm_nvhe_prepare_backtrace((unsigned long)__builtin_frame_address(0),
|
|
_THIS_IP_);
|
|
|
|
__hyp_do_panic(host_ctxt, spsr, elr, par);
|
|
unreachable();
|
|
}
|
|
|
|
asmlinkage void __noreturn hyp_panic_bad_stack(void)
|
|
{
|
|
hyp_panic();
|
|
}
|
|
|
|
asmlinkage void kvm_unexpected_el2_exception(void)
|
|
{
|
|
__kvm_unexpected_el2_exception();
|
|
}
|