mirror of
https://git.proxmox.com/git/mirror_ubuntu-kernels.git
synced 2025-11-25 22:29:43 +00:00
KVM: X86: Don't unsync pagetables when speculative
We'd better only unsync the pagetable when there just was a really write fault on a level-1 pagetable. Signed-off-by: Lai Jiangshan <laijs@linux.alibaba.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Message-Id: <20210918005636.3675-10-jiangshanlai@gmail.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
cc2a8e66bb
commit
f1c4a88c41
@ -2582,7 +2582,8 @@ static void kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
|
|||||||
* were marked unsync (or if there is no shadow page), -EPERM if the SPTE must
|
* were marked unsync (or if there is no shadow page), -EPERM if the SPTE must
|
||||||
* be write-protected.
|
* be write-protected.
|
||||||
*/
|
*/
|
||||||
int mmu_try_to_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn, bool can_unsync)
|
int mmu_try_to_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn, bool can_unsync,
|
||||||
|
bool speculative)
|
||||||
{
|
{
|
||||||
struct kvm_mmu_page *sp;
|
struct kvm_mmu_page *sp;
|
||||||
bool locked = false;
|
bool locked = false;
|
||||||
@ -2608,6 +2609,9 @@ int mmu_try_to_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn, bool can_unsync)
|
|||||||
if (sp->unsync)
|
if (sp->unsync)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
if (speculative)
|
||||||
|
return -EEXIST;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* TDP MMU page faults require an additional spinlock as they
|
* TDP MMU page faults require an additional spinlock as they
|
||||||
* run with mmu_lock held for read, not write, and the unsync
|
* run with mmu_lock held for read, not write, and the unsync
|
||||||
|
|||||||
@ -124,7 +124,8 @@ static inline bool is_nx_huge_page_enabled(void)
|
|||||||
return READ_ONCE(nx_huge_pages);
|
return READ_ONCE(nx_huge_pages);
|
||||||
}
|
}
|
||||||
|
|
||||||
int mmu_try_to_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn, bool can_unsync);
|
int mmu_try_to_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn, bool can_unsync,
|
||||||
|
bool speculative);
|
||||||
|
|
||||||
void kvm_mmu_gfn_disallow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn);
|
void kvm_mmu_gfn_disallow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn);
|
||||||
void kvm_mmu_gfn_allow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn);
|
void kvm_mmu_gfn_allow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn);
|
||||||
|
|||||||
@ -159,7 +159,7 @@ int make_spte(struct kvm_vcpu *vcpu, unsigned int pte_access, int level,
|
|||||||
* e.g. it's write-tracked (upper-level SPs) or has one or more
|
* e.g. it's write-tracked (upper-level SPs) or has one or more
|
||||||
* shadow pages and unsync'ing pages is not allowed.
|
* shadow pages and unsync'ing pages is not allowed.
|
||||||
*/
|
*/
|
||||||
if (mmu_try_to_unsync_pages(vcpu, gfn, can_unsync)) {
|
if (mmu_try_to_unsync_pages(vcpu, gfn, can_unsync, speculative)) {
|
||||||
pgprintk("%s: found shadow page for %llx, marking ro\n",
|
pgprintk("%s: found shadow page for %llx, marking ro\n",
|
||||||
__func__, gfn);
|
__func__, gfn);
|
||||||
ret |= SET_SPTE_WRITE_PROTECTED_PT;
|
ret |= SET_SPTE_WRITE_PROTECTED_PT;
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user