mirror of
https://git.proxmox.com/git/mirror_ubuntu-kernels.git
synced 2026-01-04 22:03:53 +00:00
Before polling or sleeping to wait for a test to complete, print out ": Running (<num> active)" where the number of active tests is determined by iterating over the tests and seeing which return false for check_if_command_finished. The line erasing and printing out only occur if the number of runnings tests changes to avoid the line flickering excessively. Knowing tests are running allows a user to know a test is running and in parallel mode how many of the tests are waiting to complete. If color mode is disabled then avoid displaying the "Running" message as deleting the line isn't reliable. Tested-by: James Clark <james.clark@linaro.org> Signed-off-by: Ian Rogers <irogers@google.com> Cc: Colin Ian King <colin.i.king@gmail.com> Cc: Howard Chu <howardchu95@gmail.com> Cc: Weilin Wang <weilin.wang@intel.com> Cc: Ilya Leoshkevich <iii@linux.ibm.com> Cc: Thomas Richter <tmricht@linux.ibm.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Dapeng Mi <dapeng1.mi@linux.intel.com> Cc: Athira Jajeev <atrajeev@linux.vnet.ibm.com> Cc: Michael Petlan <mpetlan@redhat.com> Cc: Veronika Molnarova <vmolnaro@redhat.com> Link: https://lore.kernel.org/r/20241025192109.132482-3-irogers@google.com Signed-off-by: Namhyung Kim <namhyung@kernel.org>
643 lines
15 KiB
C
643 lines
15 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
* builtin-test.c
|
|
*
|
|
* Builtin regression testing command: ever growing number of sanity tests
|
|
*/
|
|
#include <fcntl.h>
|
|
#include <errno.h>
|
|
#include <poll.h>
|
|
#include <unistd.h>
|
|
#include <string.h>
|
|
#include <stdlib.h>
|
|
#include <sys/types.h>
|
|
#include <dirent.h>
|
|
#include <sys/wait.h>
|
|
#include <sys/stat.h>
|
|
#include "builtin.h"
|
|
#include "config.h"
|
|
#include "hist.h"
|
|
#include "intlist.h"
|
|
#include "tests.h"
|
|
#include "debug.h"
|
|
#include "color.h"
|
|
#include <subcmd/parse-options.h>
|
|
#include <subcmd/run-command.h>
|
|
#include "string2.h"
|
|
#include "symbol.h"
|
|
#include "util/rlimit.h"
|
|
#include "util/strbuf.h"
|
|
#include <linux/kernel.h>
|
|
#include <linux/string.h>
|
|
#include <subcmd/exec-cmd.h>
|
|
#include <linux/zalloc.h>
|
|
|
|
#include "tests-scripts.h"
|
|
|
|
/*
|
|
* Command line option to not fork the test running in the same process and
|
|
* making them easier to debug.
|
|
*/
|
|
static bool dont_fork;
|
|
/* Don't fork the tests in parallel and wait for their completion. */
|
|
static bool sequential = true;
|
|
/* Do it in parallel, lacks infrastructure to avoid running tests that clash for resources,
|
|
* So leave it as the developers choice to enable while working on the needed infra */
|
|
static bool parallel;
|
|
const char *dso_to_test;
|
|
const char *test_objdump_path = "objdump";
|
|
|
|
/*
|
|
* List of architecture specific tests. Not a weak symbol as the array length is
|
|
* dependent on the initialization, as such GCC with LTO complains of
|
|
* conflicting definitions with a weak symbol.
|
|
*/
|
|
#if defined(__i386__) || defined(__x86_64__) || defined(__aarch64__) || defined(__powerpc64__)
|
|
extern struct test_suite *arch_tests[];
|
|
#else
|
|
static struct test_suite *arch_tests[] = {
|
|
NULL,
|
|
};
|
|
#endif
|
|
|
|
static struct test_suite *generic_tests[] = {
|
|
&suite__vmlinux_matches_kallsyms,
|
|
#ifdef HAVE_LIBTRACEEVENT
|
|
&suite__openat_syscall_event,
|
|
&suite__openat_syscall_event_on_all_cpus,
|
|
&suite__basic_mmap,
|
|
#endif
|
|
&suite__mem,
|
|
&suite__parse_events,
|
|
&suite__expr,
|
|
&suite__PERF_RECORD,
|
|
&suite__pmu,
|
|
&suite__pmu_events,
|
|
&suite__tool_pmu,
|
|
&suite__dso_data,
|
|
&suite__perf_evsel__roundtrip_name_test,
|
|
#ifdef HAVE_LIBTRACEEVENT
|
|
&suite__perf_evsel__tp_sched_test,
|
|
&suite__syscall_openat_tp_fields,
|
|
#endif
|
|
&suite__hists_link,
|
|
&suite__python_use,
|
|
&suite__bp_signal,
|
|
&suite__bp_signal_overflow,
|
|
&suite__bp_accounting,
|
|
&suite__wp,
|
|
&suite__task_exit,
|
|
&suite__sw_clock_freq,
|
|
&suite__code_reading,
|
|
&suite__sample_parsing,
|
|
&suite__keep_tracking,
|
|
&suite__parse_no_sample_id_all,
|
|
&suite__hists_filter,
|
|
&suite__mmap_thread_lookup,
|
|
&suite__thread_maps_share,
|
|
&suite__hists_output,
|
|
&suite__hists_cumulate,
|
|
#ifdef HAVE_LIBTRACEEVENT
|
|
&suite__switch_tracking,
|
|
#endif
|
|
&suite__fdarray__filter,
|
|
&suite__fdarray__add,
|
|
&suite__kmod_path__parse,
|
|
&suite__thread_map,
|
|
&suite__session_topology,
|
|
&suite__thread_map_synthesize,
|
|
&suite__thread_map_remove,
|
|
&suite__cpu_map,
|
|
&suite__synthesize_stat_config,
|
|
&suite__synthesize_stat,
|
|
&suite__synthesize_stat_round,
|
|
&suite__event_update,
|
|
&suite__event_times,
|
|
&suite__backward_ring_buffer,
|
|
&suite__sdt_event,
|
|
&suite__is_printable_array,
|
|
&suite__bitmap_print,
|
|
&suite__perf_hooks,
|
|
&suite__unit_number__scnprint,
|
|
&suite__mem2node,
|
|
&suite__time_utils,
|
|
&suite__jit_write_elf,
|
|
&suite__pfm,
|
|
&suite__api_io,
|
|
&suite__maps__merge_in,
|
|
&suite__demangle_java,
|
|
&suite__demangle_ocaml,
|
|
&suite__parse_metric,
|
|
&suite__pe_file_parsing,
|
|
&suite__expand_cgroup_events,
|
|
&suite__perf_time_to_tsc,
|
|
&suite__dlfilter,
|
|
&suite__sigtrap,
|
|
&suite__event_groups,
|
|
&suite__symbols,
|
|
&suite__util,
|
|
NULL,
|
|
};
|
|
|
|
static struct test_suite **tests[] = {
|
|
generic_tests,
|
|
arch_tests,
|
|
NULL, /* shell tests created at runtime. */
|
|
};
|
|
|
|
static struct test_workload *workloads[] = {
|
|
&workload__noploop,
|
|
&workload__thloop,
|
|
&workload__leafloop,
|
|
&workload__sqrtloop,
|
|
&workload__brstack,
|
|
&workload__datasym,
|
|
&workload__landlock,
|
|
};
|
|
|
|
#define workloads__for_each(workload) \
|
|
for (unsigned i = 0; i < ARRAY_SIZE(workloads) && ({ workload = workloads[i]; 1; }); i++)
|
|
|
|
static int num_subtests(const struct test_suite *t)
|
|
{
|
|
int num;
|
|
|
|
if (!t->test_cases)
|
|
return 0;
|
|
|
|
num = 0;
|
|
while (t->test_cases[num].name)
|
|
num++;
|
|
|
|
return num;
|
|
}
|
|
|
|
static bool has_subtests(const struct test_suite *t)
|
|
{
|
|
return num_subtests(t) > 1;
|
|
}
|
|
|
|
static const char *skip_reason(const struct test_suite *t, int subtest)
|
|
{
|
|
if (!t->test_cases)
|
|
return NULL;
|
|
|
|
return t->test_cases[subtest >= 0 ? subtest : 0].skip_reason;
|
|
}
|
|
|
|
static const char *test_description(const struct test_suite *t, int subtest)
|
|
{
|
|
if (t->test_cases && subtest >= 0)
|
|
return t->test_cases[subtest].desc;
|
|
|
|
return t->desc;
|
|
}
|
|
|
|
static test_fnptr test_function(const struct test_suite *t, int subtest)
|
|
{
|
|
if (subtest <= 0)
|
|
return t->test_cases[0].run_case;
|
|
|
|
return t->test_cases[subtest].run_case;
|
|
}
|
|
|
|
static bool perf_test__matches(const char *desc, int curr, int argc, const char *argv[])
|
|
{
|
|
int i;
|
|
|
|
if (argc == 0)
|
|
return true;
|
|
|
|
for (i = 0; i < argc; ++i) {
|
|
char *end;
|
|
long nr = strtoul(argv[i], &end, 10);
|
|
|
|
if (*end == '\0') {
|
|
if (nr == curr + 1)
|
|
return true;
|
|
continue;
|
|
}
|
|
|
|
if (strcasestr(desc, argv[i]))
|
|
return true;
|
|
}
|
|
|
|
return false;
|
|
}
|
|
|
|
struct child_test {
|
|
struct child_process process;
|
|
struct test_suite *test;
|
|
int test_num;
|
|
int subtest;
|
|
};
|
|
|
|
static int run_test_child(struct child_process *process)
|
|
{
|
|
struct child_test *child = container_of(process, struct child_test, process);
|
|
int err;
|
|
|
|
pr_debug("--- start ---\n");
|
|
pr_debug("test child forked, pid %d\n", getpid());
|
|
err = test_function(child->test, child->subtest)(child->test, child->subtest);
|
|
pr_debug("---- end(%d) ----\n", err);
|
|
fflush(NULL);
|
|
return -err;
|
|
}
|
|
|
|
#define TEST_RUNNING -3
|
|
|
|
static int print_test_result(struct test_suite *t, int i, int subtest, int result, int width,
|
|
int running)
|
|
{
|
|
if (has_subtests(t)) {
|
|
int subw = width > 2 ? width - 2 : width;
|
|
|
|
pr_info("%3d.%1d: %-*s:", i + 1, subtest + 1, subw, test_description(t, subtest));
|
|
} else
|
|
pr_info("%3d: %-*s:", i + 1, width, test_description(t, subtest));
|
|
|
|
switch (result) {
|
|
case TEST_RUNNING:
|
|
color_fprintf(stderr, PERF_COLOR_YELLOW, " Running (%d active)\n", running);
|
|
break;
|
|
case TEST_OK:
|
|
pr_info(" Ok\n");
|
|
break;
|
|
case TEST_SKIP: {
|
|
const char *reason = skip_reason(t, subtest);
|
|
|
|
if (reason)
|
|
color_fprintf(stderr, PERF_COLOR_YELLOW, " Skip (%s)\n", reason);
|
|
else
|
|
color_fprintf(stderr, PERF_COLOR_YELLOW, " Skip\n");
|
|
}
|
|
break;
|
|
case TEST_FAIL:
|
|
default:
|
|
color_fprintf(stderr, PERF_COLOR_RED, " FAILED!\n");
|
|
break;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int finish_test(struct child_test **child_tests, int running_test, int child_test_num,
|
|
int width)
|
|
{
|
|
struct child_test *child_test = child_tests[running_test];
|
|
struct test_suite *t = child_test->test;
|
|
int i = child_test->test_num;
|
|
int subi = child_test->subtest;
|
|
int err = child_test->process.err;
|
|
bool err_done = false;
|
|
struct strbuf err_output = STRBUF_INIT;
|
|
int last_running = -1;
|
|
int ret;
|
|
|
|
/*
|
|
* For test suites with subtests, display the suite name ahead of the
|
|
* sub test names.
|
|
*/
|
|
if (has_subtests(t) && subi == 0)
|
|
pr_info("%3d: %-*s:\n", i + 1, width, test_description(t, -1));
|
|
|
|
/*
|
|
* Busy loop reading from the child's stdout/stderr that are set to be
|
|
* non-blocking until EOF.
|
|
*/
|
|
if (err > 0)
|
|
fcntl(err, F_SETFL, O_NONBLOCK);
|
|
if (verbose > 1) {
|
|
if (has_subtests(t))
|
|
pr_info("%3d.%1d: %s:\n", i + 1, subi + 1, test_description(t, subi));
|
|
else
|
|
pr_info("%3d: %s:\n", i + 1, test_description(t, -1));
|
|
}
|
|
while (!err_done) {
|
|
struct pollfd pfds[1] = {
|
|
{ .fd = err,
|
|
.events = POLLIN | POLLERR | POLLHUP | POLLNVAL,
|
|
},
|
|
};
|
|
if (perf_use_color_default) {
|
|
int running = 0;
|
|
|
|
for (int y = running_test; y < child_test_num; y++) {
|
|
if (check_if_command_finished(&child_tests[y]->process) == 0)
|
|
running++;
|
|
}
|
|
if (running != last_running) {
|
|
if (last_running != -1) {
|
|
/*
|
|
* Erase "Running (.. active)" line
|
|
* printed before poll/sleep.
|
|
*/
|
|
fprintf(debug_file(), PERF_COLOR_DELETE_LINE);
|
|
}
|
|
print_test_result(t, i, subi, TEST_RUNNING, width, running);
|
|
last_running = running;
|
|
}
|
|
}
|
|
|
|
err_done = true;
|
|
if (err <= 0) {
|
|
/* No child stderr to poll, sleep for 10ms for child to complete. */
|
|
usleep(10 * 1000);
|
|
} else {
|
|
/* Poll to avoid excessive spinning, timeout set for 100ms. */
|
|
poll(pfds, ARRAY_SIZE(pfds), /*timeout=*/100);
|
|
if (pfds[0].revents) {
|
|
char buf[512];
|
|
ssize_t len;
|
|
|
|
len = read(err, buf, sizeof(buf) - 1);
|
|
|
|
if (len > 0) {
|
|
err_done = false;
|
|
buf[len] = '\0';
|
|
strbuf_addstr(&err_output, buf);
|
|
}
|
|
}
|
|
}
|
|
if (err_done)
|
|
err_done = check_if_command_finished(&child_test->process);
|
|
}
|
|
if (perf_use_color_default && last_running != -1) {
|
|
/* Erase "Running (.. active)" line printed before poll/sleep. */
|
|
fprintf(debug_file(), PERF_COLOR_DELETE_LINE);
|
|
}
|
|
/* Clean up child process. */
|
|
ret = finish_command(&child_test->process);
|
|
if (verbose > 1 || (verbose == 1 && ret == TEST_FAIL))
|
|
fprintf(stderr, "%s", err_output.buf);
|
|
|
|
strbuf_release(&err_output);
|
|
print_test_result(t, i, subi, ret, width, /*running=*/0);
|
|
if (err > 0)
|
|
close(err);
|
|
return 0;
|
|
}
|
|
|
|
static int start_test(struct test_suite *test, int i, int subi, struct child_test **child,
|
|
int width)
|
|
{
|
|
int err;
|
|
|
|
*child = NULL;
|
|
if (dont_fork) {
|
|
pr_debug("--- start ---\n");
|
|
err = test_function(test, subi)(test, subi);
|
|
pr_debug("---- end ----\n");
|
|
print_test_result(test, i, subi, err, width, /*running=*/0);
|
|
return 0;
|
|
}
|
|
|
|
*child = zalloc(sizeof(**child));
|
|
if (!*child)
|
|
return -ENOMEM;
|
|
|
|
(*child)->test = test;
|
|
(*child)->test_num = i;
|
|
(*child)->subtest = subi;
|
|
(*child)->process.pid = -1;
|
|
(*child)->process.no_stdin = 1;
|
|
if (verbose <= 0) {
|
|
(*child)->process.no_stdout = 1;
|
|
(*child)->process.no_stderr = 1;
|
|
} else {
|
|
(*child)->process.stdout_to_stderr = 1;
|
|
(*child)->process.out = -1;
|
|
(*child)->process.err = -1;
|
|
}
|
|
(*child)->process.no_exec_cmd = run_test_child;
|
|
err = start_command(&(*child)->process);
|
|
if (err || !sequential)
|
|
return err;
|
|
return finish_test(child, /*running_test=*/0, /*child_test_num=*/1, width);
|
|
}
|
|
|
|
#define for_each_test(j, k, t) \
|
|
for (j = 0, k = 0; j < ARRAY_SIZE(tests); j++, k = 0) \
|
|
while ((t = tests[j][k++]) != NULL)
|
|
|
|
static int __cmd_test(int argc, const char *argv[], struct intlist *skiplist)
|
|
{
|
|
struct test_suite *t;
|
|
unsigned int j, k;
|
|
int i = 0;
|
|
int width = 0;
|
|
size_t num_tests = 0;
|
|
struct child_test **child_tests;
|
|
int child_test_num = 0;
|
|
|
|
for_each_test(j, k, t) {
|
|
int len = strlen(test_description(t, -1));
|
|
|
|
if (width < len)
|
|
width = len;
|
|
|
|
if (has_subtests(t)) {
|
|
for (int subi = 0, subn = num_subtests(t); subi < subn; subi++) {
|
|
len = strlen(test_description(t, subi));
|
|
if (width < len)
|
|
width = len;
|
|
num_tests++;
|
|
}
|
|
} else {
|
|
num_tests++;
|
|
}
|
|
}
|
|
child_tests = calloc(num_tests, sizeof(*child_tests));
|
|
if (!child_tests)
|
|
return -ENOMEM;
|
|
|
|
for_each_test(j, k, t) {
|
|
int curr = i++;
|
|
|
|
if (!perf_test__matches(test_description(t, -1), curr, argc, argv)) {
|
|
bool skip = true;
|
|
|
|
for (int subi = 0, subn = num_subtests(t); subi < subn; subi++) {
|
|
if (perf_test__matches(test_description(t, subi),
|
|
curr, argc, argv))
|
|
skip = false;
|
|
}
|
|
|
|
if (skip)
|
|
continue;
|
|
}
|
|
|
|
if (intlist__find(skiplist, i)) {
|
|
pr_info("%3d: %-*s:", curr + 1, width, test_description(t, -1));
|
|
color_fprintf(stderr, PERF_COLOR_YELLOW, " Skip (user override)\n");
|
|
continue;
|
|
}
|
|
|
|
if (!has_subtests(t)) {
|
|
int err = start_test(t, curr, -1, &child_tests[child_test_num++], width);
|
|
|
|
if (err) {
|
|
/* TODO: if !sequential waitpid the already forked children. */
|
|
free(child_tests);
|
|
return err;
|
|
}
|
|
} else {
|
|
for (int subi = 0, subn = num_subtests(t); subi < subn; subi++) {
|
|
int err;
|
|
|
|
if (!perf_test__matches(test_description(t, subi),
|
|
curr, argc, argv))
|
|
continue;
|
|
|
|
err = start_test(t, curr, subi, &child_tests[child_test_num++],
|
|
width);
|
|
if (err)
|
|
return err;
|
|
}
|
|
}
|
|
}
|
|
for (i = 0; i < child_test_num; i++) {
|
|
if (!sequential) {
|
|
int ret = finish_test(child_tests, i, child_test_num, width);
|
|
|
|
if (ret)
|
|
return ret;
|
|
}
|
|
free(child_tests[i]);
|
|
}
|
|
free(child_tests);
|
|
return 0;
|
|
}
|
|
|
|
static int perf_test__list(int argc, const char **argv)
|
|
{
|
|
unsigned int j, k;
|
|
struct test_suite *t;
|
|
int i = 0;
|
|
|
|
for_each_test(j, k, t) {
|
|
int curr = i++;
|
|
|
|
if (!perf_test__matches(test_description(t, -1), curr, argc, argv))
|
|
continue;
|
|
|
|
pr_info("%3d: %s\n", i, test_description(t, -1));
|
|
|
|
if (has_subtests(t)) {
|
|
int subn = num_subtests(t);
|
|
int subi;
|
|
|
|
for (subi = 0; subi < subn; subi++)
|
|
pr_info("%3d:%1d: %s\n", i, subi + 1,
|
|
test_description(t, subi));
|
|
}
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
static int workloads__fprintf_list(FILE *fp)
|
|
{
|
|
struct test_workload *twl;
|
|
int printed = 0;
|
|
|
|
workloads__for_each(twl)
|
|
printed += fprintf(fp, "%s\n", twl->name);
|
|
|
|
return printed;
|
|
}
|
|
|
|
static int run_workload(const char *work, int argc, const char **argv)
|
|
{
|
|
struct test_workload *twl;
|
|
|
|
workloads__for_each(twl) {
|
|
if (!strcmp(twl->name, work))
|
|
return twl->func(argc, argv);
|
|
}
|
|
|
|
pr_info("No workload found: %s\n", work);
|
|
return -1;
|
|
}
|
|
|
|
static int perf_test__config(const char *var, const char *value,
|
|
void *data __maybe_unused)
|
|
{
|
|
if (!strcmp(var, "annotate.objdump"))
|
|
test_objdump_path = value;
|
|
|
|
return 0;
|
|
}
|
|
|
|
int cmd_test(int argc, const char **argv)
|
|
{
|
|
const char *test_usage[] = {
|
|
"perf test [<options>] [{list <test-name-fragment>|[<test-name-fragments>|<test-numbers>]}]",
|
|
NULL,
|
|
};
|
|
const char *skip = NULL;
|
|
const char *workload = NULL;
|
|
bool list_workloads = false;
|
|
const struct option test_options[] = {
|
|
OPT_STRING('s', "skip", &skip, "tests", "tests to skip"),
|
|
OPT_INCR('v', "verbose", &verbose,
|
|
"be more verbose (show symbol address, etc)"),
|
|
OPT_BOOLEAN('F', "dont-fork", &dont_fork,
|
|
"Do not fork for testcase"),
|
|
OPT_BOOLEAN('p', "parallel", ¶llel, "Run the tests in parallel"),
|
|
OPT_BOOLEAN('S', "sequential", &sequential,
|
|
"Run the tests one after another rather than in parallel"),
|
|
OPT_STRING('w', "workload", &workload, "work", "workload to run for testing, use '--list-workloads' to list the available ones."),
|
|
OPT_BOOLEAN(0, "list-workloads", &list_workloads, "List the available builtin workloads to use with -w/--workload"),
|
|
OPT_STRING(0, "dso", &dso_to_test, "dso", "dso to test"),
|
|
OPT_STRING(0, "objdump", &test_objdump_path, "path",
|
|
"objdump binary to use for disassembly and annotations"),
|
|
OPT_END()
|
|
};
|
|
const char * const test_subcommands[] = { "list", NULL };
|
|
struct intlist *skiplist = NULL;
|
|
int ret = hists__init();
|
|
|
|
if (ret < 0)
|
|
return ret;
|
|
|
|
perf_config(perf_test__config, NULL);
|
|
|
|
/* Unbuffered output */
|
|
setvbuf(stdout, NULL, _IONBF, 0);
|
|
|
|
tests[2] = create_script_test_suites();
|
|
argc = parse_options_subcommand(argc, argv, test_options, test_subcommands, test_usage, 0);
|
|
if (argc >= 1 && !strcmp(argv[0], "list"))
|
|
return perf_test__list(argc - 1, argv + 1);
|
|
|
|
if (workload)
|
|
return run_workload(workload, argc, argv);
|
|
|
|
if (list_workloads) {
|
|
workloads__fprintf_list(stdout);
|
|
return 0;
|
|
}
|
|
|
|
if (dont_fork)
|
|
sequential = true;
|
|
else if (parallel)
|
|
sequential = false;
|
|
|
|
symbol_conf.priv_size = sizeof(int);
|
|
symbol_conf.try_vmlinux_path = true;
|
|
|
|
if (symbol__init(NULL) < 0)
|
|
return -1;
|
|
|
|
if (skip != NULL)
|
|
skiplist = intlist__new(skip);
|
|
/*
|
|
* Tests that create BPF maps, for instance, need more than the 64K
|
|
* default:
|
|
*/
|
|
rlimit__bump_memlock();
|
|
|
|
return __cmd_test(argc, argv, skiplist);
|
|
}
|