mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson
synced 2025-09-03 09:37:17 +00:00

Instead of creating and initializing _all_ hyp vcpus in pKVM when the first host vcpu runs for the first time, initialize _each_ hyp vcpu in conjunction with its corresponding host vcpu. Some of the host vcpu state (e.g., system registers and traps values) is not initialized until the first time the host vcpu is run. Therefore, initializing a hyp vcpu before its corresponding host vcpu has run for the first time might not view the complete host state of these vcpus. Additionally, this behavior is inline with non-protected modes. Acked-by: Will Deacon <will@kernel.org> Reviewed-by: Marc Zyngier <maz@kernel.org> Signed-off-by: Fuad Tabba <tabba@google.com> Link: https://lore.kernel.org/r/20250314111832.4137161-5-tabba@google.com Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
91 lines
2.4 KiB
C
91 lines
2.4 KiB
C
/* SPDX-License-Identifier: GPL-2.0-only */
|
|
/*
|
|
* Copyright (C) 2021 Google LLC
|
|
* Author: Fuad Tabba <tabba@google.com>
|
|
*/
|
|
|
|
#ifndef __ARM64_KVM_NVHE_PKVM_H__
|
|
#define __ARM64_KVM_NVHE_PKVM_H__
|
|
|
|
#include <asm/kvm_pkvm.h>
|
|
|
|
#include <nvhe/gfp.h>
|
|
#include <nvhe/spinlock.h>
|
|
|
|
/*
|
|
* Holds the relevant data for maintaining the vcpu state completely at hyp.
|
|
*/
|
|
struct pkvm_hyp_vcpu {
|
|
struct kvm_vcpu vcpu;
|
|
|
|
/* Backpointer to the host's (untrusted) vCPU instance. */
|
|
struct kvm_vcpu *host_vcpu;
|
|
|
|
/*
|
|
* If this hyp vCPU is loaded, then this is a backpointer to the
|
|
* per-cpu pointer tracking us. Otherwise, NULL if not loaded.
|
|
*/
|
|
struct pkvm_hyp_vcpu **loaded_hyp_vcpu;
|
|
};
|
|
|
|
/*
|
|
* Holds the relevant data for running a protected vm.
|
|
*/
|
|
struct pkvm_hyp_vm {
|
|
struct kvm kvm;
|
|
|
|
/* Backpointer to the host's (untrusted) KVM instance. */
|
|
struct kvm *host_kvm;
|
|
|
|
/* The guest's stage-2 page-table managed by the hypervisor. */
|
|
struct kvm_pgtable pgt;
|
|
struct kvm_pgtable_mm_ops mm_ops;
|
|
struct hyp_pool pool;
|
|
hyp_spinlock_t lock;
|
|
|
|
/* Array of the hyp vCPU structures for this VM. */
|
|
struct pkvm_hyp_vcpu *vcpus[];
|
|
};
|
|
|
|
extern hyp_spinlock_t vm_table_lock;
|
|
|
|
static inline struct pkvm_hyp_vm *
|
|
pkvm_hyp_vcpu_to_hyp_vm(struct pkvm_hyp_vcpu *hyp_vcpu)
|
|
{
|
|
return container_of(hyp_vcpu->vcpu.kvm, struct pkvm_hyp_vm, kvm);
|
|
}
|
|
|
|
static inline bool pkvm_hyp_vcpu_is_protected(struct pkvm_hyp_vcpu *hyp_vcpu)
|
|
{
|
|
return vcpu_is_protected(&hyp_vcpu->vcpu);
|
|
}
|
|
|
|
static inline bool pkvm_hyp_vm_is_protected(struct pkvm_hyp_vm *hyp_vm)
|
|
{
|
|
return kvm_vm_is_protected(&hyp_vm->kvm);
|
|
}
|
|
|
|
void pkvm_hyp_vm_table_init(void *tbl);
|
|
|
|
int __pkvm_init_vm(struct kvm *host_kvm, unsigned long vm_hva,
|
|
unsigned long pgd_hva);
|
|
int __pkvm_init_vcpu(pkvm_handle_t handle, struct kvm_vcpu *host_vcpu,
|
|
unsigned long vcpu_hva);
|
|
int __pkvm_teardown_vm(pkvm_handle_t handle);
|
|
|
|
struct pkvm_hyp_vcpu *pkvm_load_hyp_vcpu(pkvm_handle_t handle,
|
|
unsigned int vcpu_idx);
|
|
void pkvm_put_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu);
|
|
struct pkvm_hyp_vcpu *pkvm_get_loaded_hyp_vcpu(void);
|
|
|
|
struct pkvm_hyp_vm *get_pkvm_hyp_vm(pkvm_handle_t handle);
|
|
struct pkvm_hyp_vm *get_np_pkvm_hyp_vm(pkvm_handle_t handle);
|
|
void put_pkvm_hyp_vm(struct pkvm_hyp_vm *hyp_vm);
|
|
|
|
bool kvm_handle_pvm_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code);
|
|
bool kvm_handle_pvm_restricted(struct kvm_vcpu *vcpu, u64 *exit_code);
|
|
void kvm_init_pvm_id_regs(struct kvm_vcpu *vcpu);
|
|
int kvm_check_pvm_sysreg_table(void);
|
|
|
|
#endif /* __ARM64_KVM_NVHE_PKVM_H__ */
|