mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson
synced 2025-09-01 23:46:45 +00:00

* New features: - Support for non-protected guest in protected mode, achieving near feature parity with the non-protected mode - Support for the EL2 timers as part of the ongoing NV support - Allow control of hardware tracing for nVHE/hVHE * Improvements, fixes and cleanups: - Massive cleanup of the debug infrastructure, making it a bit less awkward and definitely easier to maintain. This should pave the way for further optimisations - Complete rewrite of pKVM's fixed-feature infrastructure, aligning it with the rest of KVM and making the code easier to follow - Large simplification of pKVM's memory protection infrastructure - Better handling of RES0/RES1 fields for memory-backed system registers - Add a workaround for Qualcomm's Snapdragon X CPUs, which suffer from a pretty nasty timer bug - Small collection of cleanups and low-impact fixes -----BEGIN PGP SIGNATURE----- iQFEBAABCgAuFiEEPxTL6PPUbjXGY88ct6xw3ITBYzQFAmeYqJcQHHdpbGxAa2Vy bmVsLm9yZwAKCRC3rHDchMFjNLUhCACxUTMVQXhfW3qbh0UQxPd7XXvjI+Hm7SPS wDuVTle4jrFVGHxuZqtgWLmx8hD7bqO965qmFgbevKlwsRY33onH2nbH4i4AcwbA jcdM4yMHZI4+Qmnb4G5ZJ89IwjAhHPZTBOV5KRhyHQ/qtRciHHtOgJde7II9fd68 uIESg4SSSyUzI47YSEHmGVmiBIhdQhq2qust0m6NPFalEGYstPbpluPQ6R1CsDqK v14TIAW7t0vSPucBeODxhA5gEa2JsvNi+sqA+DF/ELH2ZqpkuR7rofgMGblaXCSD JXa5xamRB9dI5zi8vatwfOzYlog+/gzmPqMh/9JXpiDGHxJe0vlz =tQ8F -----END PGP SIGNATURE----- Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux Pull KVM/arm64 updates from Will Deacon: "New features: - Support for non-protected guest in protected mode, achieving near feature parity with the non-protected mode - Support for the EL2 timers as part of the ongoing NV support - Allow control of hardware tracing for nVHE/hVHE Improvements, fixes and cleanups: - Massive cleanup of the debug infrastructure, making it a bit less awkward and definitely easier to maintain. This should pave the way for further optimisations - Complete rewrite of pKVM's fixed-feature infrastructure, aligning it with the rest of KVM and making the code easier to follow - Large simplification of pKVM's memory protection infrastructure - Better handling of RES0/RES1 fields for memory-backed system registers - Add a workaround for Qualcomm's Snapdragon X CPUs, which suffer from a pretty nasty timer bug - Small collection of cleanups and low-impact fixes" * tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: (87 commits) arm64/sysreg: Get rid of TRFCR_ELx SysregFields KVM: arm64: nv: Fix doc header layout for timers KVM: arm64: nv: Apply RESx settings to sysreg reset values KVM: arm64: nv: Always evaluate HCR_EL2 using sanitising accessors KVM: arm64: Fix selftests after sysreg field name update coresight: Pass guest TRFCR value to KVM KVM: arm64: Support trace filtering for guests KVM: arm64: coresight: Give TRBE enabled state to KVM coresight: trbe: Remove redundant disable call arm64/sysreg/tools: Move TRFCR definitions to sysreg tools: arm64: Update sysreg.h header files KVM: arm64: Drop pkvm_mem_transition for host/hyp donations KVM: arm64: Drop pkvm_mem_transition for host/hyp sharing KVM: arm64: Drop pkvm_mem_transition for FF-A KVM: arm64: Explicitly handle BRBE traps as UNDEFINED KVM: arm64: vgic: Use str_enabled_disabled() in vgic_v3_probe() arm64: kvm: Introduce nvhe stack size constants KVM: arm64: Fix nVHE stacktrace VA bits mask KVM: arm64: Fix FEAT_MTE in pKVM Documentation: Update the behaviour of "kvm-arm.mode" ...
168 lines
4.1 KiB
C
168 lines
4.1 KiB
C
// SPDX-License-Identifier: GPL-2.0-only
|
|
/*
|
|
* aarch32_id_regs - Test for ID register behavior on AArch64-only systems
|
|
*
|
|
* Copyright (c) 2022 Google LLC.
|
|
*
|
|
* Test that KVM handles the AArch64 views of the AArch32 ID registers as RAZ
|
|
* and WI from userspace.
|
|
*/
|
|
|
|
#include <stdint.h>
|
|
|
|
#include "kvm_util.h"
|
|
#include "processor.h"
|
|
#include "test_util.h"
|
|
#include <linux/bitfield.h>
|
|
|
|
#define BAD_ID_REG_VAL 0x1badc0deul
|
|
|
|
#define GUEST_ASSERT_REG_RAZ(reg) GUEST_ASSERT_EQ(read_sysreg_s(reg), 0)
|
|
|
|
static void guest_main(void)
|
|
{
|
|
GUEST_ASSERT_REG_RAZ(SYS_ID_PFR0_EL1);
|
|
GUEST_ASSERT_REG_RAZ(SYS_ID_PFR1_EL1);
|
|
GUEST_ASSERT_REG_RAZ(SYS_ID_DFR0_EL1);
|
|
GUEST_ASSERT_REG_RAZ(SYS_ID_AFR0_EL1);
|
|
GUEST_ASSERT_REG_RAZ(SYS_ID_MMFR0_EL1);
|
|
GUEST_ASSERT_REG_RAZ(SYS_ID_MMFR1_EL1);
|
|
GUEST_ASSERT_REG_RAZ(SYS_ID_MMFR2_EL1);
|
|
GUEST_ASSERT_REG_RAZ(SYS_ID_MMFR3_EL1);
|
|
GUEST_ASSERT_REG_RAZ(SYS_ID_ISAR0_EL1);
|
|
GUEST_ASSERT_REG_RAZ(SYS_ID_ISAR1_EL1);
|
|
GUEST_ASSERT_REG_RAZ(SYS_ID_ISAR2_EL1);
|
|
GUEST_ASSERT_REG_RAZ(SYS_ID_ISAR3_EL1);
|
|
GUEST_ASSERT_REG_RAZ(SYS_ID_ISAR4_EL1);
|
|
GUEST_ASSERT_REG_RAZ(SYS_ID_ISAR5_EL1);
|
|
GUEST_ASSERT_REG_RAZ(SYS_ID_MMFR4_EL1);
|
|
GUEST_ASSERT_REG_RAZ(SYS_ID_ISAR6_EL1);
|
|
GUEST_ASSERT_REG_RAZ(SYS_MVFR0_EL1);
|
|
GUEST_ASSERT_REG_RAZ(SYS_MVFR1_EL1);
|
|
GUEST_ASSERT_REG_RAZ(SYS_MVFR2_EL1);
|
|
GUEST_ASSERT_REG_RAZ(sys_reg(3, 0, 0, 3, 3));
|
|
GUEST_ASSERT_REG_RAZ(SYS_ID_PFR2_EL1);
|
|
GUEST_ASSERT_REG_RAZ(SYS_ID_DFR1_EL1);
|
|
GUEST_ASSERT_REG_RAZ(SYS_ID_MMFR5_EL1);
|
|
GUEST_ASSERT_REG_RAZ(sys_reg(3, 0, 0, 3, 7));
|
|
|
|
GUEST_DONE();
|
|
}
|
|
|
|
static void test_guest_raz(struct kvm_vcpu *vcpu)
|
|
{
|
|
struct ucall uc;
|
|
|
|
vcpu_run(vcpu);
|
|
|
|
switch (get_ucall(vcpu, &uc)) {
|
|
case UCALL_ABORT:
|
|
REPORT_GUEST_ASSERT(uc);
|
|
break;
|
|
case UCALL_DONE:
|
|
break;
|
|
default:
|
|
TEST_FAIL("Unexpected ucall: %lu", uc.cmd);
|
|
}
|
|
}
|
|
|
|
static uint64_t raz_wi_reg_ids[] = {
|
|
KVM_ARM64_SYS_REG(SYS_ID_PFR0_EL1),
|
|
KVM_ARM64_SYS_REG(SYS_ID_PFR1_EL1),
|
|
KVM_ARM64_SYS_REG(SYS_ID_DFR0_EL1),
|
|
KVM_ARM64_SYS_REG(SYS_ID_MMFR0_EL1),
|
|
KVM_ARM64_SYS_REG(SYS_ID_MMFR1_EL1),
|
|
KVM_ARM64_SYS_REG(SYS_ID_MMFR2_EL1),
|
|
KVM_ARM64_SYS_REG(SYS_ID_MMFR3_EL1),
|
|
KVM_ARM64_SYS_REG(SYS_ID_ISAR0_EL1),
|
|
KVM_ARM64_SYS_REG(SYS_ID_ISAR1_EL1),
|
|
KVM_ARM64_SYS_REG(SYS_ID_ISAR2_EL1),
|
|
KVM_ARM64_SYS_REG(SYS_ID_ISAR3_EL1),
|
|
KVM_ARM64_SYS_REG(SYS_ID_ISAR4_EL1),
|
|
KVM_ARM64_SYS_REG(SYS_ID_ISAR5_EL1),
|
|
KVM_ARM64_SYS_REG(SYS_ID_MMFR4_EL1),
|
|
KVM_ARM64_SYS_REG(SYS_ID_ISAR6_EL1),
|
|
KVM_ARM64_SYS_REG(SYS_MVFR0_EL1),
|
|
KVM_ARM64_SYS_REG(SYS_MVFR1_EL1),
|
|
KVM_ARM64_SYS_REG(SYS_MVFR2_EL1),
|
|
KVM_ARM64_SYS_REG(SYS_ID_PFR2_EL1),
|
|
KVM_ARM64_SYS_REG(SYS_ID_MMFR5_EL1),
|
|
};
|
|
|
|
static void test_user_raz_wi(struct kvm_vcpu *vcpu)
|
|
{
|
|
int i;
|
|
|
|
for (i = 0; i < ARRAY_SIZE(raz_wi_reg_ids); i++) {
|
|
uint64_t reg_id = raz_wi_reg_ids[i];
|
|
uint64_t val;
|
|
|
|
val = vcpu_get_reg(vcpu, reg_id);
|
|
TEST_ASSERT_EQ(val, 0);
|
|
|
|
/*
|
|
* Expect the ioctl to succeed with no effect on the register
|
|
* value.
|
|
*/
|
|
vcpu_set_reg(vcpu, reg_id, BAD_ID_REG_VAL);
|
|
|
|
val = vcpu_get_reg(vcpu, reg_id);
|
|
TEST_ASSERT_EQ(val, 0);
|
|
}
|
|
}
|
|
|
|
static uint64_t raz_invariant_reg_ids[] = {
|
|
KVM_ARM64_SYS_REG(SYS_ID_AFR0_EL1),
|
|
KVM_ARM64_SYS_REG(sys_reg(3, 0, 0, 3, 3)),
|
|
KVM_ARM64_SYS_REG(SYS_ID_DFR1_EL1),
|
|
KVM_ARM64_SYS_REG(sys_reg(3, 0, 0, 3, 7)),
|
|
};
|
|
|
|
static void test_user_raz_invariant(struct kvm_vcpu *vcpu)
|
|
{
|
|
int i, r;
|
|
|
|
for (i = 0; i < ARRAY_SIZE(raz_invariant_reg_ids); i++) {
|
|
uint64_t reg_id = raz_invariant_reg_ids[i];
|
|
uint64_t val;
|
|
|
|
val = vcpu_get_reg(vcpu, reg_id);
|
|
TEST_ASSERT_EQ(val, 0);
|
|
|
|
r = __vcpu_set_reg(vcpu, reg_id, BAD_ID_REG_VAL);
|
|
TEST_ASSERT(r < 0 && errno == EINVAL,
|
|
"unexpected KVM_SET_ONE_REG error: r=%d, errno=%d", r, errno);
|
|
|
|
val = vcpu_get_reg(vcpu, reg_id);
|
|
TEST_ASSERT_EQ(val, 0);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
static bool vcpu_aarch64_only(struct kvm_vcpu *vcpu)
|
|
{
|
|
uint64_t val, el0;
|
|
|
|
val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1));
|
|
|
|
el0 = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL0), val);
|
|
return el0 == ID_AA64PFR0_EL1_EL0_IMP;
|
|
}
|
|
|
|
int main(void)
|
|
{
|
|
struct kvm_vcpu *vcpu;
|
|
struct kvm_vm *vm;
|
|
|
|
vm = vm_create_with_one_vcpu(&vcpu, guest_main);
|
|
|
|
TEST_REQUIRE(vcpu_aarch64_only(vcpu));
|
|
|
|
test_user_raz_wi(vcpu);
|
|
test_user_raz_invariant(vcpu);
|
|
test_guest_raz(vcpu);
|
|
|
|
kvm_vm_free(vm);
|
|
}
|