mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson
synced 2025-09-02 16:44:59 +00:00
KVM: x86: Use __try_cmpxchg_user() to update guest PTE A/D bits
Use the recently introduced __try_cmpxchg_user() to update guest PTE A/D
bits instead of mapping the PTE into kernel address space. The VM_PFNMAP
path is broken as it assumes that vm_pgoff is the base pfn of the mapped
VMA range, which is conceptually wrong as vm_pgoff is the offset relative
to the file and has nothing to do with the pfn. The horrific hack worked
for the original use case (backing guest memory with /dev/mem), but leads
to accessing "random" pfns for pretty much any other VM_PFNMAP case.
Fixes: bd53cb35a3
("X86/KVM: Handle PFNs outside of kernel reach when touching GPTEs")
Debugged-by: Tadeusz Struk <tadeusz.struk@linaro.org>
Tested-by: Tadeusz Struk <tadeusz.struk@linaro.org>
Reported-by: syzbot+6cde2282daa792c49ab8@syzkaller.appspotmail.com
Cc: stable@vger.kernel.org
Signed-off-by: Sean Christopherson <seanjc@google.com>
Message-Id: <20220202004945.2540433-4-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
989b5db215
commit
f122dfe447
@ -144,42 +144,6 @@ static bool FNAME(is_rsvd_bits_set)(struct kvm_mmu *mmu, u64 gpte, int level)
|
|||||||
FNAME(is_bad_mt_xwr)(&mmu->guest_rsvd_check, gpte);
|
FNAME(is_bad_mt_xwr)(&mmu->guest_rsvd_check, gpte);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
|
|
||||||
pt_element_t __user *ptep_user, unsigned index,
|
|
||||||
pt_element_t orig_pte, pt_element_t new_pte)
|
|
||||||
{
|
|
||||||
signed char r;
|
|
||||||
|
|
||||||
if (!user_access_begin(ptep_user, sizeof(pt_element_t)))
|
|
||||||
return -EFAULT;
|
|
||||||
|
|
||||||
#ifdef CMPXCHG
|
|
||||||
asm volatile("1:" LOCK_PREFIX CMPXCHG " %[new], %[ptr]\n"
|
|
||||||
"setnz %b[r]\n"
|
|
||||||
"2:"
|
|
||||||
_ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG, %k[r])
|
|
||||||
: [ptr] "+m" (*ptep_user),
|
|
||||||
[old] "+a" (orig_pte),
|
|
||||||
[r] "=q" (r)
|
|
||||||
: [new] "r" (new_pte)
|
|
||||||
: "memory");
|
|
||||||
#else
|
|
||||||
asm volatile("1:" LOCK_PREFIX "cmpxchg8b %[ptr]\n"
|
|
||||||
"setnz %b[r]\n"
|
|
||||||
"2:"
|
|
||||||
_ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG, %k[r])
|
|
||||||
: [ptr] "+m" (*ptep_user),
|
|
||||||
[old] "+A" (orig_pte),
|
|
||||||
[r] "=q" (r)
|
|
||||||
: [new_lo] "b" ((u32)new_pte),
|
|
||||||
[new_hi] "c" ((u32)(new_pte >> 32))
|
|
||||||
: "memory");
|
|
||||||
#endif
|
|
||||||
|
|
||||||
user_access_end();
|
|
||||||
return r;
|
|
||||||
}
|
|
||||||
|
|
||||||
static bool FNAME(prefetch_invalid_gpte)(struct kvm_vcpu *vcpu,
|
static bool FNAME(prefetch_invalid_gpte)(struct kvm_vcpu *vcpu,
|
||||||
struct kvm_mmu_page *sp, u64 *spte,
|
struct kvm_mmu_page *sp, u64 *spte,
|
||||||
u64 gpte)
|
u64 gpte)
|
||||||
@ -278,7 +242,7 @@ static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu,
|
|||||||
if (unlikely(!walker->pte_writable[level - 1]))
|
if (unlikely(!walker->pte_writable[level - 1]))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
ret = FNAME(cmpxchg_gpte)(vcpu, mmu, ptep_user, index, orig_pte, pte);
|
ret = __try_cmpxchg_user(ptep_user, &orig_pte, pte, fault);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user