mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-08-16 19:16:56 +00:00

Patch series "selftests/mm: reuse FORCE_READ to replace "asm volatile("" : "+r" (XXX));" and some cleanup", v2. This series introduces a common FORCE_READ() macro to replace the cryptic asm volatile("" : "+r" (variable)); construct used in several mm selftests. This improves code readability and maintainability by removing duplicated, hard-to-understand code. This patch (of 2): Several mm selftests use the `asm volatile("" : "+r" (variable));` construct to force a read of a variable, preventing the compiler from optimizing away the memory access. This idiom is cryptic and duplicated across multiple test files. Following a suggestion from David[1], this patch refactors this common pattern into a FORCE_READ() macro Link: https://lkml.kernel.org/r/20250717131857.59909-1-lianux.mm@gmail.com Link: https://lkml.kernel.org/r/20250717131857.59909-2-lianux.mm@gmail.com Link: https://lore.kernel.org/lkml/4a3e0759-caa1-4cfa-bc3f-402593f1eee3@redhat.com/ [1] Signed-off-by: wang lian <lianux.mm@gmail.com> Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> Acked-by: David Hildenbrand <david@redhat.com> Reviewed-by: Zi Yan <ziy@nvidia.com> Reviewed-by: Wei Yang <richard.weiyang@gmail.com> Cc: Christian Brauner <brauner@kernel.org> Cc: Jann Horn <jannh@google.com> Cc: Kairui Song <ryncsn@gmail.com> Cc: Liam Howlett <liam.howlett@oracle.com> Cc: Mark Brown <broonie@kernel.org> Cc: SeongJae Park <sj@kernel.org> Cc: Shuah Khan <shuah@kernel.org> Cc: Vlastimil Babka <vbabka@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
314 lines
7.2 KiB
C
314 lines
7.2 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
* The main purpose of the tests here is to exercise the migration entry code
|
|
* paths in the kernel.
|
|
*/
|
|
|
|
#include "../kselftest_harness.h"
|
|
#include "thp_settings.h"
|
|
|
|
#include <strings.h>
|
|
#include <pthread.h>
|
|
#include <numa.h>
|
|
#include <numaif.h>
|
|
#include <sys/mman.h>
|
|
#include <sys/prctl.h>
|
|
#include <sys/types.h>
|
|
#include <signal.h>
|
|
#include <time.h>
|
|
#include "vm_util.h"
|
|
|
|
#define TWOMEG (2<<20)
|
|
#define RUNTIME (20)
|
|
#define MAX_RETRIES 100
|
|
#define ALIGN(x, a) (((x) + (a - 1)) & (~((a) - 1)))
|
|
|
|
FIXTURE(migration)
|
|
{
|
|
pthread_t *threads;
|
|
pid_t *pids;
|
|
int nthreads;
|
|
int n1;
|
|
int n2;
|
|
};
|
|
|
|
FIXTURE_SETUP(migration)
|
|
{
|
|
int n;
|
|
|
|
ASSERT_EQ(numa_available(), 0);
|
|
self->nthreads = numa_num_task_cpus() - 1;
|
|
self->n1 = -1;
|
|
self->n2 = -1;
|
|
|
|
for (n = 0; n < numa_max_possible_node(); n++)
|
|
if (numa_bitmask_isbitset(numa_all_nodes_ptr, n)) {
|
|
if (self->n1 == -1) {
|
|
self->n1 = n;
|
|
} else {
|
|
self->n2 = n;
|
|
break;
|
|
}
|
|
}
|
|
|
|
self->threads = malloc(self->nthreads * sizeof(*self->threads));
|
|
ASSERT_NE(self->threads, NULL);
|
|
self->pids = malloc(self->nthreads * sizeof(*self->pids));
|
|
ASSERT_NE(self->pids, NULL);
|
|
};
|
|
|
|
FIXTURE_TEARDOWN(migration)
|
|
{
|
|
free(self->threads);
|
|
free(self->pids);
|
|
}
|
|
|
|
int migrate(uint64_t *ptr, int n1, int n2)
|
|
{
|
|
int ret, tmp;
|
|
int status = 0;
|
|
struct timespec ts1, ts2;
|
|
int failures = 0;
|
|
|
|
if (clock_gettime(CLOCK_MONOTONIC, &ts1))
|
|
return -1;
|
|
|
|
while (1) {
|
|
if (clock_gettime(CLOCK_MONOTONIC, &ts2))
|
|
return -1;
|
|
|
|
if (ts2.tv_sec - ts1.tv_sec >= RUNTIME)
|
|
return 0;
|
|
|
|
ret = move_pages(0, 1, (void **) &ptr, &n2, &status,
|
|
MPOL_MF_MOVE_ALL);
|
|
if (ret) {
|
|
if (ret > 0) {
|
|
/* Migration is best effort; try again */
|
|
if (++failures < MAX_RETRIES)
|
|
continue;
|
|
printf("Didn't migrate %d pages\n", ret);
|
|
}
|
|
else
|
|
perror("Couldn't migrate pages");
|
|
return -2;
|
|
}
|
|
failures = 0;
|
|
tmp = n2;
|
|
n2 = n1;
|
|
n1 = tmp;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
void *access_mem(void *ptr)
|
|
{
|
|
while (1) {
|
|
pthread_testcancel();
|
|
/* Force a read from the memory pointed to by ptr. This ensures
|
|
* the memory access actually happens and prevents the compiler
|
|
* from optimizing away this entire loop.
|
|
*/
|
|
FORCE_READ((uint64_t *)ptr);
|
|
}
|
|
|
|
return NULL;
|
|
}
|
|
|
|
/*
|
|
* Basic migration entry testing. One thread will move pages back and forth
|
|
* between nodes whilst other threads try and access them triggering the
|
|
* migration entry wait paths in the kernel.
|
|
*/
|
|
TEST_F_TIMEOUT(migration, private_anon, 2*RUNTIME)
|
|
{
|
|
uint64_t *ptr;
|
|
int i;
|
|
|
|
if (self->nthreads < 2 || self->n1 < 0 || self->n2 < 0)
|
|
SKIP(return, "Not enough threads or NUMA nodes available");
|
|
|
|
ptr = mmap(NULL, TWOMEG, PROT_READ | PROT_WRITE,
|
|
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
|
|
ASSERT_NE(ptr, MAP_FAILED);
|
|
|
|
memset(ptr, 0xde, TWOMEG);
|
|
for (i = 0; i < self->nthreads - 1; i++)
|
|
if (pthread_create(&self->threads[i], NULL, access_mem, ptr))
|
|
perror("Couldn't create thread");
|
|
|
|
ASSERT_EQ(migrate(ptr, self->n1, self->n2), 0);
|
|
for (i = 0; i < self->nthreads - 1; i++)
|
|
ASSERT_EQ(pthread_cancel(self->threads[i]), 0);
|
|
}
|
|
|
|
/*
|
|
* Same as the previous test but with shared memory.
|
|
*/
|
|
TEST_F_TIMEOUT(migration, shared_anon, 2*RUNTIME)
|
|
{
|
|
pid_t pid;
|
|
uint64_t *ptr;
|
|
int i;
|
|
|
|
if (self->nthreads < 2 || self->n1 < 0 || self->n2 < 0)
|
|
SKIP(return, "Not enough threads or NUMA nodes available");
|
|
|
|
ptr = mmap(NULL, TWOMEG, PROT_READ | PROT_WRITE,
|
|
MAP_SHARED | MAP_ANONYMOUS, -1, 0);
|
|
ASSERT_NE(ptr, MAP_FAILED);
|
|
|
|
memset(ptr, 0xde, TWOMEG);
|
|
for (i = 0; i < self->nthreads - 1; i++) {
|
|
pid = fork();
|
|
if (!pid) {
|
|
prctl(PR_SET_PDEATHSIG, SIGHUP);
|
|
/* Parent may have died before prctl so check now. */
|
|
if (getppid() == 1)
|
|
kill(getpid(), SIGHUP);
|
|
access_mem(ptr);
|
|
} else {
|
|
self->pids[i] = pid;
|
|
}
|
|
}
|
|
|
|
ASSERT_EQ(migrate(ptr, self->n1, self->n2), 0);
|
|
for (i = 0; i < self->nthreads - 1; i++)
|
|
ASSERT_EQ(kill(self->pids[i], SIGTERM), 0);
|
|
}
|
|
|
|
/*
|
|
* Tests the pmd migration entry paths.
|
|
*/
|
|
TEST_F_TIMEOUT(migration, private_anon_thp, 2*RUNTIME)
|
|
{
|
|
uint64_t *ptr;
|
|
int i;
|
|
|
|
if (!thp_is_enabled())
|
|
SKIP(return, "Transparent Hugepages not available");
|
|
|
|
if (self->nthreads < 2 || self->n1 < 0 || self->n2 < 0)
|
|
SKIP(return, "Not enough threads or NUMA nodes available");
|
|
|
|
ptr = mmap(NULL, 2*TWOMEG, PROT_READ | PROT_WRITE,
|
|
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
|
|
ASSERT_NE(ptr, MAP_FAILED);
|
|
|
|
ptr = (uint64_t *) ALIGN((uintptr_t) ptr, TWOMEG);
|
|
ASSERT_EQ(madvise(ptr, TWOMEG, MADV_HUGEPAGE), 0);
|
|
memset(ptr, 0xde, TWOMEG);
|
|
for (i = 0; i < self->nthreads - 1; i++)
|
|
if (pthread_create(&self->threads[i], NULL, access_mem, ptr))
|
|
perror("Couldn't create thread");
|
|
|
|
ASSERT_EQ(migrate(ptr, self->n1, self->n2), 0);
|
|
for (i = 0; i < self->nthreads - 1; i++)
|
|
ASSERT_EQ(pthread_cancel(self->threads[i]), 0);
|
|
}
|
|
|
|
/*
|
|
* migration test with shared anon THP page
|
|
*/
|
|
|
|
TEST_F_TIMEOUT(migration, shared_anon_thp, 2*RUNTIME)
|
|
{
|
|
pid_t pid;
|
|
uint64_t *ptr;
|
|
int i;
|
|
|
|
if (!thp_is_enabled())
|
|
SKIP(return, "Transparent Hugepages not available");
|
|
|
|
if (self->nthreads < 2 || self->n1 < 0 || self->n2 < 0)
|
|
SKIP(return, "Not enough threads or NUMA nodes available");
|
|
|
|
ptr = mmap(NULL, 2 * TWOMEG, PROT_READ | PROT_WRITE,
|
|
MAP_SHARED | MAP_ANONYMOUS, -1, 0);
|
|
ASSERT_NE(ptr, MAP_FAILED);
|
|
|
|
ptr = (uint64_t *) ALIGN((uintptr_t) ptr, TWOMEG);
|
|
ASSERT_EQ(madvise(ptr, TWOMEG, MADV_HUGEPAGE), 0);
|
|
|
|
memset(ptr, 0xde, TWOMEG);
|
|
for (i = 0; i < self->nthreads - 1; i++) {
|
|
pid = fork();
|
|
if (!pid) {
|
|
prctl(PR_SET_PDEATHSIG, SIGHUP);
|
|
/* Parent may have died before prctl so check now. */
|
|
if (getppid() == 1)
|
|
kill(getpid(), SIGHUP);
|
|
access_mem(ptr);
|
|
} else {
|
|
self->pids[i] = pid;
|
|
}
|
|
}
|
|
|
|
ASSERT_EQ(migrate(ptr, self->n1, self->n2), 0);
|
|
for (i = 0; i < self->nthreads - 1; i++)
|
|
ASSERT_EQ(kill(self->pids[i], SIGTERM), 0);
|
|
}
|
|
|
|
/*
|
|
* migration test with private anon hugetlb page
|
|
*/
|
|
TEST_F_TIMEOUT(migration, private_anon_htlb, 2*RUNTIME)
|
|
{
|
|
uint64_t *ptr;
|
|
int i;
|
|
|
|
if (self->nthreads < 2 || self->n1 < 0 || self->n2 < 0)
|
|
SKIP(return, "Not enough threads or NUMA nodes available");
|
|
|
|
ptr = mmap(NULL, TWOMEG, PROT_READ | PROT_WRITE,
|
|
MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB, -1, 0);
|
|
ASSERT_NE(ptr, MAP_FAILED);
|
|
|
|
memset(ptr, 0xde, TWOMEG);
|
|
for (i = 0; i < self->nthreads - 1; i++)
|
|
if (pthread_create(&self->threads[i], NULL, access_mem, ptr))
|
|
perror("Couldn't create thread");
|
|
|
|
ASSERT_EQ(migrate(ptr, self->n1, self->n2), 0);
|
|
for (i = 0; i < self->nthreads - 1; i++)
|
|
ASSERT_EQ(pthread_cancel(self->threads[i]), 0);
|
|
}
|
|
|
|
/*
|
|
* migration test with shared anon hugetlb page
|
|
*/
|
|
TEST_F_TIMEOUT(migration, shared_anon_htlb, 2*RUNTIME)
|
|
{
|
|
pid_t pid;
|
|
uint64_t *ptr;
|
|
int i;
|
|
|
|
if (self->nthreads < 2 || self->n1 < 0 || self->n2 < 0)
|
|
SKIP(return, "Not enough threads or NUMA nodes available");
|
|
|
|
ptr = mmap(NULL, TWOMEG, PROT_READ | PROT_WRITE,
|
|
MAP_SHARED | MAP_ANONYMOUS | MAP_HUGETLB, -1, 0);
|
|
ASSERT_NE(ptr, MAP_FAILED);
|
|
|
|
memset(ptr, 0xde, TWOMEG);
|
|
for (i = 0; i < self->nthreads - 1; i++) {
|
|
pid = fork();
|
|
if (!pid) {
|
|
prctl(PR_SET_PDEATHSIG, SIGHUP);
|
|
/* Parent may have died before prctl so check now. */
|
|
if (getppid() == 1)
|
|
kill(getpid(), SIGHUP);
|
|
access_mem(ptr);
|
|
} else {
|
|
self->pids[i] = pid;
|
|
}
|
|
}
|
|
|
|
ASSERT_EQ(migrate(ptr, self->n1, self->n2), 0);
|
|
for (i = 0; i < self->nthreads - 1; i++)
|
|
ASSERT_EQ(kill(self->pids[i], SIGTERM), 0);
|
|
}
|
|
|
|
TEST_HARNESS_MAIN
|