mirror of
https://git.proxmox.com/git/mirror_ubuntu-kernels.git
synced 2026-01-02 20:55:12 +00:00
KVM: X86: Move PTE present check from loop body to __shadow_walk_next()
So far, the loop bodies already ensure the PTE is present before calling __shadow_walk_next(): Some loop bodies simply exit with a !PRESENT directly and some other loop bodies, i.e. FNAME(fetch) and __direct_map() do not currently guard their walks with is_shadow_present_pte, but only because they install present non-leaf SPTEs in the loop itself. But checking pte present in __shadow_walk_next() (which is called from shadow_walk_okay()) is more prudent; walking past a !PRESENT SPTE would lead to attempting to read a the next level SPTE from a garbage iter->shadow_addr. It also allows to remove the is_shadow_present_pte() checks from the loop bodies. Reviewed-by: Sean Christopherson <seanjc@google.com> Signed-off-by: Lai Jiangshan <laijs@linux.alibaba.com> Message-Id: <20210906122547.263316-2-jiangshanlai@gmail.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
5228eb96a4
commit
3e44dce4d0
@ -2220,7 +2220,7 @@ static bool shadow_walk_okay(struct kvm_shadow_walk_iterator *iterator)
|
||||
static void __shadow_walk_next(struct kvm_shadow_walk_iterator *iterator,
|
||||
u64 spte)
|
||||
{
|
||||
if (is_last_spte(spte, iterator->level)) {
|
||||
if (!is_shadow_present_pte(spte) || is_last_spte(spte, iterator->level)) {
|
||||
iterator->level = 0;
|
||||
return;
|
||||
}
|
||||
@ -3189,9 +3189,6 @@ static u64 *fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, gpa_t gpa, u64 *spte)
|
||||
for_each_shadow_entry_lockless(vcpu, gpa, iterator, old_spte) {
|
||||
sptep = iterator.sptep;
|
||||
*spte = old_spte;
|
||||
|
||||
if (!is_shadow_present_pte(old_spte))
|
||||
break;
|
||||
}
|
||||
|
||||
return sptep;
|
||||
@ -3759,9 +3756,6 @@ static int get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes, int *root_level
|
||||
spte = mmu_spte_get_lockless(iterator.sptep);
|
||||
|
||||
sptes[leaf] = spte;
|
||||
|
||||
if (!is_shadow_present_pte(spte))
|
||||
break;
|
||||
}
|
||||
|
||||
return leaf;
|
||||
@ -3877,11 +3871,8 @@ static void shadow_page_table_clear_flood(struct kvm_vcpu *vcpu, gva_t addr)
|
||||
u64 spte;
|
||||
|
||||
walk_shadow_page_lockless_begin(vcpu);
|
||||
for_each_shadow_entry_lockless(vcpu, addr, iterator, spte) {
|
||||
for_each_shadow_entry_lockless(vcpu, addr, iterator, spte)
|
||||
clear_sp_write_flooding_count(iterator.sptep);
|
||||
if (!is_shadow_present_pte(spte))
|
||||
break;
|
||||
}
|
||||
walk_shadow_page_lockless_end(vcpu);
|
||||
}
|
||||
|
||||
|
||||
@ -1002,7 +1002,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa)
|
||||
FNAME(prefetch_gpte)(vcpu, sp, sptep, gpte, false);
|
||||
}
|
||||
|
||||
if (!is_shadow_present_pte(*sptep) || !sp->unsync_children)
|
||||
if (!sp->unsync_children)
|
||||
break;
|
||||
}
|
||||
write_unlock(&vcpu->kvm->mmu_lock);
|
||||
|
||||
Loading…
Reference in New Issue
Block a user