mirror_ubuntu-kernels/tools/testing/selftests/kvm/lib/memstress.c
Paolo Bonzini 07b4b2f404 KVM: selftests: touch all pages of args on each memstress iteration
Access the same memory addresses on each iteration of the memstress
guest code.  This ensures that the state of KVM's page tables
is the same after every iteration, including the pages that host the
guest page tables for args and vcpu_args.

This difference is visible when running the proposed
dirty_log_page_splitting_test[*] on AMD, or on Intel with pml=0 and
eptad=0.  The tests fail due to different semantics of dirty bits for
page-table pages on AMD (and eptad=0) and Intel.  Both AMD and Intel with
eptad=0 treat page-table accesses as writes, therefore more pages are
dropped before the repopulation phase when dirty logging is disabled.

The "missing" page had been included in the population phase because it
hosts the page tables for vcpu_args, but repopulation does not need it."

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Reviewed-by: Vipin Sharma <vipinsh@google.com>
Link: https://lore.kernel.org/r/20230412200913.1570873-1-pbonzini@redhat.com
[sean: add additional details in changelog]
Signed-off-by: Sean Christopherson <seanjc@google.com>
2023-06-01 14:03:14 -07:00

326 lines
8.8 KiB
C

// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2020, Google LLC.
*/
#define _GNU_SOURCE
#include <inttypes.h>
#include "kvm_util.h"
#include "memstress.h"
#include "processor.h"
struct memstress_args memstress_args;
/*
* Guest virtual memory offset of the testing memory slot.
* Must not conflict with identity mapped test code.
*/
static uint64_t guest_test_virt_mem = DEFAULT_GUEST_TEST_MEM;
struct vcpu_thread {
/* The index of the vCPU. */
int vcpu_idx;
/* The pthread backing the vCPU. */
pthread_t thread;
/* Set to true once the vCPU thread is up and running. */
bool running;
};
/* The vCPU threads involved in this test. */
static struct vcpu_thread vcpu_threads[KVM_MAX_VCPUS];
/* The function run by each vCPU thread, as provided by the test. */
static void (*vcpu_thread_fn)(struct memstress_vcpu_args *);
/* Set to true once all vCPU threads are up and running. */
static bool all_vcpu_threads_running;
static struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
/*
* Continuously write to the first 8 bytes of each page in the
* specified region.
*/
void memstress_guest_code(uint32_t vcpu_idx)
{
struct memstress_args *args = &memstress_args;
struct memstress_vcpu_args *vcpu_args = &args->vcpu_args[vcpu_idx];
struct guest_random_state rand_state;
uint64_t gva;
uint64_t pages;
uint64_t addr;
uint64_t page;
int i;
rand_state = new_guest_random_state(args->random_seed + vcpu_idx);
gva = vcpu_args->gva;
pages = vcpu_args->pages;
/* Make sure vCPU args data structure is not corrupt. */
GUEST_ASSERT(vcpu_args->vcpu_idx == vcpu_idx);
while (true) {
for (i = 0; i < sizeof(memstress_args); i += args->guest_page_size)
(void) *((volatile char *)args + i);
for (i = 0; i < pages; i++) {
if (args->random_access)
page = guest_random_u32(&rand_state) % pages;
else
page = i;
addr = gva + (page * args->guest_page_size);
if (guest_random_u32(&rand_state) % 100 < args->write_percent)
*(uint64_t *)addr = 0x0123456789ABCDEF;
else
READ_ONCE(*(uint64_t *)addr);
}
GUEST_SYNC(1);
}
}
void memstress_setup_vcpus(struct kvm_vm *vm, int nr_vcpus,
struct kvm_vcpu *vcpus[],
uint64_t vcpu_memory_bytes,
bool partition_vcpu_memory_access)
{
struct memstress_args *args = &memstress_args;
struct memstress_vcpu_args *vcpu_args;
int i;
for (i = 0; i < nr_vcpus; i++) {
vcpu_args = &args->vcpu_args[i];
vcpu_args->vcpu = vcpus[i];
vcpu_args->vcpu_idx = i;
if (partition_vcpu_memory_access) {
vcpu_args->gva = guest_test_virt_mem +
(i * vcpu_memory_bytes);
vcpu_args->pages = vcpu_memory_bytes /
args->guest_page_size;
vcpu_args->gpa = args->gpa + (i * vcpu_memory_bytes);
} else {
vcpu_args->gva = guest_test_virt_mem;
vcpu_args->pages = (nr_vcpus * vcpu_memory_bytes) /
args->guest_page_size;
vcpu_args->gpa = args->gpa;
}
vcpu_args_set(vcpus[i], 1, i);
pr_debug("Added VCPU %d with test mem gpa [%lx, %lx)\n",
i, vcpu_args->gpa, vcpu_args->gpa +
(vcpu_args->pages * args->guest_page_size));
}
}
struct kvm_vm *memstress_create_vm(enum vm_guest_mode mode, int nr_vcpus,
uint64_t vcpu_memory_bytes, int slots,
enum vm_mem_backing_src_type backing_src,
bool partition_vcpu_memory_access)
{
struct memstress_args *args = &memstress_args;
struct kvm_vm *vm;
uint64_t guest_num_pages, slot0_pages = 0;
uint64_t backing_src_pagesz = get_backing_src_pagesz(backing_src);
uint64_t region_end_gfn;
int i;
pr_info("Testing guest mode: %s\n", vm_guest_mode_string(mode));
/* By default vCPUs will write to memory. */
args->write_percent = 100;
/*
* Snapshot the non-huge page size. This is used by the guest code to
* access/dirty pages at the logging granularity.
*/
args->guest_page_size = vm_guest_mode_params[mode].page_size;
guest_num_pages = vm_adjust_num_guest_pages(mode,
(nr_vcpus * vcpu_memory_bytes) / args->guest_page_size);
TEST_ASSERT(vcpu_memory_bytes % getpagesize() == 0,
"Guest memory size is not host page size aligned.");
TEST_ASSERT(vcpu_memory_bytes % args->guest_page_size == 0,
"Guest memory size is not guest page size aligned.");
TEST_ASSERT(guest_num_pages % slots == 0,
"Guest memory cannot be evenly divided into %d slots.",
slots);
/*
* If using nested, allocate extra pages for the nested page tables and
* in-memory data structures.
*/
if (args->nested)
slot0_pages += memstress_nested_pages(nr_vcpus);
/*
* Pass guest_num_pages to populate the page tables for test memory.
* The memory is also added to memslot 0, but that's a benign side
* effect as KVM allows aliasing HVAs in meslots.
*/
vm = __vm_create_with_vcpus(mode, nr_vcpus, slot0_pages + guest_num_pages,
memstress_guest_code, vcpus);
args->vm = vm;
/* Put the test region at the top guest physical memory. */
region_end_gfn = vm->max_gfn + 1;
#ifdef __x86_64__
/*
* When running vCPUs in L2, restrict the test region to 48 bits to
* avoid needing 5-level page tables to identity map L2.
*/
if (args->nested)
region_end_gfn = min(region_end_gfn, (1UL << 48) / args->guest_page_size);
#endif
/*
* If there should be more memory in the guest test region than there
* can be pages in the guest, it will definitely cause problems.
*/
TEST_ASSERT(guest_num_pages < region_end_gfn,
"Requested more guest memory than address space allows.\n"
" guest pages: %" PRIx64 " max gfn: %" PRIx64
" nr_vcpus: %d wss: %" PRIx64 "]\n",
guest_num_pages, region_end_gfn - 1, nr_vcpus, vcpu_memory_bytes);
args->gpa = (region_end_gfn - guest_num_pages - 1) * args->guest_page_size;
args->gpa = align_down(args->gpa, backing_src_pagesz);
#ifdef __s390x__
/* Align to 1M (segment size) */
args->gpa = align_down(args->gpa, 1 << 20);
#endif
args->size = guest_num_pages * args->guest_page_size;
pr_info("guest physical test memory: [0x%lx, 0x%lx)\n",
args->gpa, args->gpa + args->size);
/* Add extra memory slots for testing */
for (i = 0; i < slots; i++) {
uint64_t region_pages = guest_num_pages / slots;
vm_paddr_t region_start = args->gpa + region_pages * args->guest_page_size * i;
vm_userspace_mem_region_add(vm, backing_src, region_start,
MEMSTRESS_MEM_SLOT_INDEX + i,
region_pages, 0);
}
/* Do mapping for the demand paging memory slot */
virt_map(vm, guest_test_virt_mem, args->gpa, guest_num_pages);
memstress_setup_vcpus(vm, nr_vcpus, vcpus, vcpu_memory_bytes,
partition_vcpu_memory_access);
if (args->nested) {
pr_info("Configuring vCPUs to run in L2 (nested).\n");
memstress_setup_nested(vm, nr_vcpus, vcpus);
}
/* Export the shared variables to the guest. */
sync_global_to_guest(vm, memstress_args);
return vm;
}
void memstress_destroy_vm(struct kvm_vm *vm)
{
kvm_vm_free(vm);
}
void memstress_set_write_percent(struct kvm_vm *vm, uint32_t write_percent)
{
memstress_args.write_percent = write_percent;
sync_global_to_guest(vm, memstress_args.write_percent);
}
void memstress_set_random_seed(struct kvm_vm *vm, uint32_t random_seed)
{
memstress_args.random_seed = random_seed;
sync_global_to_guest(vm, memstress_args.random_seed);
}
void memstress_set_random_access(struct kvm_vm *vm, bool random_access)
{
memstress_args.random_access = random_access;
sync_global_to_guest(vm, memstress_args.random_access);
}
uint64_t __weak memstress_nested_pages(int nr_vcpus)
{
return 0;
}
void __weak memstress_setup_nested(struct kvm_vm *vm, int nr_vcpus, struct kvm_vcpu **vcpus)
{
pr_info("%s() not support on this architecture, skipping.\n", __func__);
exit(KSFT_SKIP);
}
static void *vcpu_thread_main(void *data)
{
struct vcpu_thread *vcpu = data;
int vcpu_idx = vcpu->vcpu_idx;
if (memstress_args.pin_vcpus)
kvm_pin_this_task_to_pcpu(memstress_args.vcpu_to_pcpu[vcpu_idx]);
WRITE_ONCE(vcpu->running, true);
/*
* Wait for all vCPU threads to be up and running before calling the test-
* provided vCPU thread function. This prevents thread creation (which
* requires taking the mmap_sem in write mode) from interfering with the
* guest faulting in its memory.
*/
while (!READ_ONCE(all_vcpu_threads_running))
;
vcpu_thread_fn(&memstress_args.vcpu_args[vcpu_idx]);
return NULL;
}
void memstress_start_vcpu_threads(int nr_vcpus,
void (*vcpu_fn)(struct memstress_vcpu_args *))
{
int i;
vcpu_thread_fn = vcpu_fn;
WRITE_ONCE(all_vcpu_threads_running, false);
WRITE_ONCE(memstress_args.stop_vcpus, false);
for (i = 0; i < nr_vcpus; i++) {
struct vcpu_thread *vcpu = &vcpu_threads[i];
vcpu->vcpu_idx = i;
WRITE_ONCE(vcpu->running, false);
pthread_create(&vcpu->thread, NULL, vcpu_thread_main, vcpu);
}
for (i = 0; i < nr_vcpus; i++) {
while (!READ_ONCE(vcpu_threads[i].running))
;
}
WRITE_ONCE(all_vcpu_threads_running, true);
}
void memstress_join_vcpu_threads(int nr_vcpus)
{
int i;
WRITE_ONCE(memstress_args.stop_vcpus, true);
for (i = 0; i < nr_vcpus; i++)
pthread_join(vcpu_threads[i].thread, NULL);
}