diff --git a/Documentation/00-INDEX b/Documentation/00-INDEX index f35473f8c630..3bec49c33bbb 100644 --- a/Documentation/00-INDEX +++ b/Documentation/00-INDEX @@ -242,8 +242,6 @@ kprobes.txt - documents the kernel probes debugging feature. kref.txt - docs on adding reference counters (krefs) to kernel objects. -kselftest.txt - - small unittests for (some) individual codepaths in the kernel. laptops/ - directory with laptop related info and laptop driver documentation. ldm.txt diff --git a/Documentation/dev-tools/index.rst b/Documentation/dev-tools/index.rst index 4ac991dbddb7..a81787cd47d7 100644 --- a/Documentation/dev-tools/index.rst +++ b/Documentation/dev-tools/index.rst @@ -24,6 +24,7 @@ whole; patches welcome! kmemcheck gdb-kernel-debugging kgdb + kselftest .. only:: subproject and html diff --git a/Documentation/kselftest.txt b/Documentation/dev-tools/kselftest.rst similarity index 59% rename from Documentation/kselftest.txt rename to Documentation/dev-tools/kselftest.rst index 5bd590335839..ebd03d11d2c2 100644 --- a/Documentation/kselftest.txt +++ b/Documentation/dev-tools/kselftest.rst @@ -1,4 +1,6 @@ +====================== Linux Kernel Selftests +====================== The kernel contains a set of "self tests" under the tools/testing/selftests/ directory. These are intended to be small tests to exercise individual code @@ -15,28 +17,33 @@ hotplug test is run on 2% of hotplug capable memory instead of 10%. Running the selftests (hotplug tests are run in limited mode) ============================================================= -To build the tests: +To build the tests:: + $ make -C tools/testing/selftests +To run the tests:: -To run the tests: $ make -C tools/testing/selftests run_tests -To build and run the tests with a single command, use: +To build and run the tests with a single command, use:: + $ make kselftest -- note that some tests will require root privileges. +Note that some tests will require root privileges. Running a subset of selftests -======================================== +============================= + You can use the "TARGETS" variable on the make command line to specify single test to run, or a list of tests to run. -To run only tests targeted for a single subsystem: - $ make -C tools/testing/selftests TARGETS=ptrace run_tests +To run only tests targeted for a single subsystem:: + + $ make -C tools/testing/selftests TARGETS=ptrace run_tests + +You can specify multiple tests to build and run:: -You can specify multiple tests to build and run: $ make TARGETS="size timers" kselftest See the top-level tools/testing/selftests/Makefile for the list of all @@ -46,13 +53,15 @@ possible targets. Running the full range hotplug selftests ======================================== -To build the hotplug tests: +To build the hotplug tests:: + $ make -C tools/testing/selftests hotplug -To run the hotplug tests: +To run the hotplug tests:: + $ make -C tools/testing/selftests run_hotplug -- note that some tests will require root privileges. +Note that some tests will require root privileges. Install selftests @@ -62,11 +71,13 @@ You can use kselftest_install.sh tool installs selftests in default location which is tools/testing/selftests/kselftest or a user specified location. -To install selftests in default location: +To install selftests in default location:: + $ cd tools/testing/selftests $ ./kselftest_install.sh -To install selftests in a user specified location: +To install selftests in a user specified location:: + $ cd tools/testing/selftests $ ./kselftest_install.sh install_dir @@ -77,10 +88,10 @@ Kselftest install as well as the Kselftest tarball provide a script named "run_kselftest.sh" to run the tests. You can simply do the following to run the installed Kselftests. Please -note some tests will require root privileges. +note some tests will require root privileges:: -cd kselftest -./run_kselftest.sh + $ cd kselftest + $ ./run_kselftest.sh Contributing new tests ====================== @@ -96,14 +107,49 @@ In general, the rules for selftests are * Don't cause the top-level "make run_tests" to fail if your feature is unconfigured. -Contributing new tests(details) -=============================== +Contributing new tests (details) +================================ * Use TEST_GEN_XXX if such binaries or files are generated during compiling. + TEST_PROGS, TEST_GEN_PROGS mean it is the excutable tested by default. + TEST_PROGS_EXTENDED, TEST_GEN_PROGS_EXTENDED mean it is the executable which is not tested by default. TEST_FILES, TEST_GEN_FILES mean it is the file which is used by test. + +Test Harness +============ + +The kselftest_harness.h file contains useful helpers to build tests. The tests +from tools/testing/selftests/seccomp/seccomp_bpf.c can be used as example. + +Example +------- + +.. kernel-doc:: tools/testing/selftests/kselftest_harness.h + :doc: example + + +Helpers +------- + +.. kernel-doc:: tools/testing/selftests/kselftest_harness.h + :functions: TH_LOG TEST TEST_SIGNAL FIXTURE FIXTURE_DATA FIXTURE_SETUP + FIXTURE_TEARDOWN TEST_F TEST_HARNESS_MAIN + +Operators +--------- + +.. kernel-doc:: tools/testing/selftests/kselftest_harness.h + :doc: operators + +.. kernel-doc:: tools/testing/selftests/kselftest_harness.h + :functions: ASSERT_EQ ASSERT_NE ASSERT_LT ASSERT_LE ASSERT_GT ASSERT_GE + ASSERT_NULL ASSERT_TRUE ASSERT_NULL ASSERT_TRUE ASSERT_FALSE + ASSERT_STREQ ASSERT_STRNE EXPECT_EQ EXPECT_NE EXPECT_LT + EXPECT_LE EXPECT_GT EXPECT_GE EXPECT_NULL EXPECT_TRUE + EXPECT_FALSE EXPECT_STREQ EXPECT_STRNE diff --git a/MAINTAINERS b/MAINTAINERS index a4f37b69a66c..b31be7522e45 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -7337,9 +7337,10 @@ KERNEL SELFTEST FRAMEWORK M: Shuah Khan M: Shuah Khan L: linux-kselftest@vger.kernel.org -T: git git://git.kernel.org/pub/scm/shuah/linux-kselftest +T: git git://git.kernel.org/pub/scm/linux/kernel/git/shuah/linux-kselftest.git S: Maintained -F: tools/testing/selftests +F: tools/testing/selftests/ +F: Documentation/dev-tools/kselftest* KERNEL VIRTUAL MACHINE (KVM) M: Paolo Bonzini @@ -11740,6 +11741,7 @@ F: kernel/seccomp.c F: include/uapi/linux/seccomp.h F: include/linux/seccomp.h F: tools/testing/selftests/seccomp/* +F: tools/testing/selftests/kselftest_harness.h F: Documentation/userspace-api/seccomp_filter.rst K: \bsecure_computing K: \bTIF_SECCOMP\b diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 00e2e4169b1e..948ec32e0c27 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -4564,7 +4564,8 @@ static const char readme_msg[] = #endif #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS) "\t accepts: event-definitions (one definition per line)\n" - "\t Format: p|r[:[/]] []\n" + "\t Format: p[:[/]] []\n" + "\t r[maxactive][:[/]] []\n" "\t -:[/]\n" #ifdef CONFIG_KPROBE_EVENTS "\t place: [:][+]|\n" diff --git a/tools/testing/selftests/breakpoints/breakpoint_test.c b/tools/testing/selftests/breakpoints/breakpoint_test.c index 120895ab5505..f63356151ad4 100644 --- a/tools/testing/selftests/breakpoints/breakpoint_test.c +++ b/tools/testing/selftests/breakpoints/breakpoint_test.c @@ -16,6 +16,8 @@ #include #include #include +#include +#include #include "../kselftest.h" @@ -42,10 +44,9 @@ static void set_breakpoint_addr(void *addr, int n) ret = ptrace(PTRACE_POKEUSER, child_pid, offsetof(struct user, u_debugreg[n]), addr); - if (ret) { - perror("Can't set breakpoint addr\n"); - ksft_exit_fail(); - } + if (ret) + ksft_exit_fail_msg("Can't set breakpoint addr: %s\n", + strerror(errno)); } static void toggle_breakpoint(int n, int type, int len, @@ -106,8 +107,8 @@ static void toggle_breakpoint(int n, int type, int len, ret = ptrace(PTRACE_POKEUSER, child_pid, offsetof(struct user, u_debugreg[7]), dr7); if (ret) { - perror("Can't set dr7"); - ksft_exit_fail(); + ksft_print_msg("Can't set dr7: %s\n", strerror(errno)); + exit(-1); } } @@ -206,7 +207,7 @@ static void trigger_tests(void) ret = ptrace(PTRACE_TRACEME, 0, NULL, 0); if (ret) { - perror("Can't be traced?\n"); + ksft_print_msg("Can't be traced? %s\n", strerror(errno)); return; } @@ -261,29 +262,30 @@ static void trigger_tests(void) static void check_success(const char *msg) { - const char *msg2; int child_nr_tests; int status; + int ret; /* Wait for the child to SIGTRAP */ wait(&status); - msg2 = "Failed"; + ret = 0; if (WSTOPSIG(status) == SIGTRAP) { child_nr_tests = ptrace(PTRACE_PEEKDATA, child_pid, &nr_tests, 0); if (child_nr_tests == nr_tests) - msg2 = "Ok"; - if (ptrace(PTRACE_POKEDATA, child_pid, &trapped, 1)) { - perror("Can't poke\n"); - ksft_exit_fail(); - } + ret = 1; + if (ptrace(PTRACE_POKEDATA, child_pid, &trapped, 1)) + ksft_exit_fail_msg("Can't poke: %s\n", strerror(errno)); } nr_tests++; - printf("%s [%s]\n", msg, msg2); + if (ret) + ksft_test_result_pass(msg); + else + ksft_test_result_fail(msg); } static void launch_instruction_breakpoints(char *buf, int local, int global) @@ -294,7 +296,7 @@ static void launch_instruction_breakpoints(char *buf, int local, int global) set_breakpoint_addr(dummy_funcs[i], i); toggle_breakpoint(i, BP_X, 1, local, global, 1); ptrace(PTRACE_CONT, child_pid, NULL, 0); - sprintf(buf, "Test breakpoint %d with local: %d global: %d", + sprintf(buf, "Test breakpoint %d with local: %d global: %d\n", i, local, global); check_success(buf); toggle_breakpoint(i, BP_X, 1, local, global, 0); @@ -316,8 +318,9 @@ static void launch_watchpoints(char *buf, int mode, int len, set_breakpoint_addr(&dummy_var[i], i); toggle_breakpoint(i, mode, len, local, global, 1); ptrace(PTRACE_CONT, child_pid, NULL, 0); - sprintf(buf, "Test %s watchpoint %d with len: %d local: " - "%d global: %d", mode_str, i, len, local, global); + sprintf(buf, + "Test %s watchpoint %d with len: %d local: %d global: %d\n", + mode_str, i, len, local, global); check_success(buf); toggle_breakpoint(i, mode, len, local, global, 0); } @@ -378,10 +381,12 @@ int main(int argc, char **argv) pid_t pid; int ret; + ksft_print_header(); + pid = fork(); if (!pid) { trigger_tests(); - return 0; + exit(0); } child_pid = pid; @@ -392,5 +397,5 @@ int main(int argc, char **argv) wait(NULL); - return ksft_exit_pass(); + ksft_exit_pass(); } diff --git a/tools/testing/selftests/breakpoints/breakpoint_test_arm64.c b/tools/testing/selftests/breakpoints/breakpoint_test_arm64.c index 3897e996541e..960d02100c26 100644 --- a/tools/testing/selftests/breakpoints/breakpoint_test_arm64.c +++ b/tools/testing/selftests/breakpoints/breakpoint_test_arm64.c @@ -43,19 +43,25 @@ static void child(int size, int wr) volatile uint8_t *addr = &var[32 + wr]; if (ptrace(PTRACE_TRACEME, 0, NULL, NULL) != 0) { - perror("ptrace(PTRACE_TRACEME) failed"); + ksft_print_msg( + "ptrace(PTRACE_TRACEME) failed: %s\n", + strerror(errno)); _exit(1); } if (raise(SIGSTOP) != 0) { - perror("raise(SIGSTOP) failed"); + ksft_print_msg( + "raise(SIGSTOP) failed: %s\n", strerror(errno)); _exit(1); } if ((uintptr_t) addr % size) { - perror("Wrong address write for the given size\n"); + ksft_print_msg( + "Wrong address write for the given size: %s\n", + strerror(errno)); _exit(1); } + switch (size) { case 1: *addr = 47; @@ -100,12 +106,14 @@ static bool set_watchpoint(pid_t pid, int size, int wp) if (ptrace(PTRACE_SETREGSET, pid, NT_ARM_HW_WATCH, &iov) == 0) return true; - if (errno == EIO) { - printf("ptrace(PTRACE_SETREGSET, NT_ARM_HW_WATCH) " - "not supported on this hardware\n"); - ksft_exit_skip(); - } - perror("ptrace(PTRACE_SETREGSET, NT_ARM_HW_WATCH) failed"); + if (errno == EIO) + ksft_print_msg( + "ptrace(PTRACE_SETREGSET, NT_ARM_HW_WATCH) not supported on this hardware: %s\n", + strerror(errno)); + + ksft_print_msg( + "ptrace(PTRACE_SETREGSET, NT_ARM_HW_WATCH) failed: %s\n", + strerror(errno)); return false; } @@ -117,7 +125,8 @@ static bool run_test(int wr_size, int wp_size, int wr, int wp) pid_t wpid; if (pid < 0) { - perror("fork() failed"); + ksft_test_result_fail( + "fork() failed: %s\n", strerror(errno)); return false; } if (pid == 0) @@ -125,15 +134,17 @@ static bool run_test(int wr_size, int wp_size, int wr, int wp) wpid = waitpid(pid, &status, __WALL); if (wpid != pid) { - perror("waitpid() failed"); + ksft_print_msg( + "waitpid() failed: %s\n", strerror(errno)); return false; } if (!WIFSTOPPED(status)) { - printf("child did not stop\n"); + ksft_print_msg( + "child did not stop: %s\n", strerror(errno)); return false; } if (WSTOPSIG(status) != SIGSTOP) { - printf("child did not stop with SIGSTOP\n"); + ksft_print_msg("child did not stop with SIGSTOP\n"); return false; } @@ -141,42 +152,49 @@ static bool run_test(int wr_size, int wp_size, int wr, int wp) return false; if (ptrace(PTRACE_CONT, pid, NULL, NULL) < 0) { - perror("ptrace(PTRACE_SINGLESTEP) failed"); + ksft_print_msg( + "ptrace(PTRACE_SINGLESTEP) failed: %s\n", + strerror(errno)); return false; } alarm(3); wpid = waitpid(pid, &status, __WALL); if (wpid != pid) { - perror("waitpid() failed"); + ksft_print_msg( + "waitpid() failed: %s\n", strerror(errno)); return false; } alarm(0); if (WIFEXITED(status)) { - printf("child did not single-step\t"); + ksft_print_msg("child did not single-step\n"); return false; } if (!WIFSTOPPED(status)) { - printf("child did not stop\n"); + ksft_print_msg("child did not stop\n"); return false; } if (WSTOPSIG(status) != SIGTRAP) { - printf("child did not stop with SIGTRAP\n"); + ksft_print_msg("child did not stop with SIGTRAP\n"); return false; } if (ptrace(PTRACE_GETSIGINFO, pid, NULL, &siginfo) != 0) { - perror("ptrace(PTRACE_GETSIGINFO)"); + ksft_print_msg( + "ptrace(PTRACE_GETSIGINFO): %s\n", + strerror(errno)); return false; } if (siginfo.si_code != TRAP_HWBKPT) { - printf("Unexpected si_code %d\n", siginfo.si_code); + ksft_print_msg( + "Unexpected si_code %d\n", siginfo.si_code); return false; } kill(pid, SIGKILL); wpid = waitpid(pid, &status, 0); if (wpid != pid) { - perror("waitpid() failed"); + ksft_print_msg( + "waitpid() failed: %s\n", strerror(errno)); return false; } return true; @@ -194,6 +212,8 @@ int main(int argc, char **argv) int wr, wp, size; bool result; + ksft_print_header(); + act.sa_handler = sigalrm; sigemptyset(&act.sa_mask); act.sa_flags = 0; @@ -201,14 +221,16 @@ int main(int argc, char **argv) for (size = 1; size <= 32; size = size*2) { for (wr = 0; wr <= 32; wr = wr + size) { for (wp = wr - size; wp <= wr + size; wp = wp + size) { - printf("Test size = %d write offset = %d watchpoint offset = %d\t", size, wr, wp); result = run_test(size, MIN(size, 8), wr, wp); - if ((result && wr == wp) || (!result && wr != wp)) { - printf("[OK]\n"); - ksft_inc_pass_cnt(); - } else { - printf("[FAILED]\n"); - ksft_inc_fail_cnt(); + if ((result && wr == wp) || + (!result && wr != wp)) + ksft_test_result_pass( + "Test size = %d write offset = %d watchpoint offset = %d\n", + size, wr, wp); + else { + ksft_test_result_fail( + "Test size = %d write offset = %d watchpoint offset = %d\n", + size, wr, wp); succeeded = false; } } @@ -216,19 +238,18 @@ int main(int argc, char **argv) } for (size = 1; size <= 32; size = size*2) { - printf("Test size = %d write offset = %d watchpoint offset = -8\t", size, -size); - - if (run_test(size, 8, -size, -8)) { - printf("[OK]\n"); - ksft_inc_pass_cnt(); - } else { - printf("[FAILED]\n"); - ksft_inc_fail_cnt(); + if (run_test(size, 8, -size, -8)) + ksft_test_result_pass( + "Test size = %d write offset = %d watchpoint offset = -8\n", + size, -size); + else { + ksft_test_result_fail( + "Test size = %d write offset = %d watchpoint offset = -8\n", + size, -size); succeeded = false; } } - ksft_print_cnts(); if (succeeded) ksft_exit_pass(); else diff --git a/tools/testing/selftests/breakpoints/step_after_suspend_test.c b/tools/testing/selftests/breakpoints/step_after_suspend_test.c index 60b8a95dac26..3fece06e9f64 100644 --- a/tools/testing/selftests/breakpoints/step_after_suspend_test.c +++ b/tools/testing/selftests/breakpoints/step_after_suspend_test.c @@ -37,17 +37,19 @@ void child(int cpu) CPU_ZERO(&set); CPU_SET(cpu, &set); if (sched_setaffinity(0, sizeof(set), &set) != 0) { - perror("sched_setaffinity() failed"); + ksft_print_msg("sched_setaffinity() failed: %s\n", + strerror(errno)); _exit(1); } if (ptrace(PTRACE_TRACEME, 0, NULL, NULL) != 0) { - perror("ptrace(PTRACE_TRACEME) failed"); + ksft_print_msg("ptrace(PTRACE_TRACEME) failed: %s\n", + strerror(errno)); _exit(1); } if (raise(SIGSTOP) != 0) { - perror("raise(SIGSTOP) failed"); + ksft_print_msg("raise(SIGSTOP) failed: %s\n", strerror(errno)); _exit(1); } @@ -61,7 +63,7 @@ bool run_test(int cpu) pid_t wpid; if (pid < 0) { - perror("fork() failed"); + ksft_print_msg("fork() failed: %s\n", strerror(errno)); return false; } if (pid == 0) @@ -69,57 +71,64 @@ bool run_test(int cpu) wpid = waitpid(pid, &status, __WALL); if (wpid != pid) { - perror("waitpid() failed"); + ksft_print_msg("waitpid() failed: %s\n", strerror(errno)); return false; } if (!WIFSTOPPED(status)) { - printf("child did not stop\n"); + ksft_print_msg("child did not stop: %s\n", strerror(errno)); return false; } if (WSTOPSIG(status) != SIGSTOP) { - printf("child did not stop with SIGSTOP\n"); + ksft_print_msg("child did not stop with SIGSTOP: %s\n", + strerror(errno)); return false; } if (ptrace(PTRACE_SINGLESTEP, pid, NULL, NULL) < 0) { if (errno == EIO) { - printf("ptrace(PTRACE_SINGLESTEP) not supported on this architecture\n"); - ksft_exit_skip(); + ksft_exit_skip( + "ptrace(PTRACE_SINGLESTEP) not supported on this architecture: %s\n", + strerror(errno)); } - perror("ptrace(PTRACE_SINGLESTEP) failed"); + ksft_print_msg("ptrace(PTRACE_SINGLESTEP) failed: %s\n", + strerror(errno)); return false; } wpid = waitpid(pid, &status, __WALL); if (wpid != pid) { - perror("waitpid() failed"); + ksft_print_msg("waitpid() failed: $s\n", strerror(errno)); return false; } if (WIFEXITED(status)) { - printf("child did not single-step\n"); + ksft_print_msg("child did not single-step: %s\n", + strerror(errno)); return false; } if (!WIFSTOPPED(status)) { - printf("child did not stop\n"); + ksft_print_msg("child did not stop: %s\n", strerror(errno)); return false; } if (WSTOPSIG(status) != SIGTRAP) { - printf("child did not stop with SIGTRAP\n"); + ksft_print_msg("child did not stop with SIGTRAP: %s\n", + strerror(errno)); return false; } if (ptrace(PTRACE_CONT, pid, NULL, NULL) < 0) { - perror("ptrace(PTRACE_CONT) failed"); + ksft_print_msg("ptrace(PTRACE_CONT) failed: %s\n", + strerror(errno)); return false; } wpid = waitpid(pid, &status, __WALL); if (wpid != pid) { - perror("waitpid() failed"); + ksft_print_msg("waitpid() failed: %s\n", strerror(errno)); return false; } if (!WIFEXITED(status)) { - printf("child did not exit after PTRACE_CONT\n"); + ksft_print_msg("child did not exit after PTRACE_CONT: %s\n", + strerror(errno)); return false; } @@ -135,28 +144,21 @@ void suspend(void) struct itimerspec spec = {}; power_state_fd = open("/sys/power/state", O_RDWR); - if (power_state_fd < 0) { - perror("open(\"/sys/power/state\") failed (is this test running as root?)"); - ksft_exit_fail(); - } + if (power_state_fd < 0) + ksft_exit_fail_msg( + "open(\"/sys/power/state\") failed (is this test running as root?)\n"); timerfd = timerfd_create(CLOCK_BOOTTIME_ALARM, 0); - if (timerfd < 0) { - perror("timerfd_create() failed"); - ksft_exit_fail(); - } + if (timerfd < 0) + ksft_exit_fail_msg("timerfd_create() failed\n"); spec.it_value.tv_sec = 5; err = timerfd_settime(timerfd, 0, &spec, NULL); - if (err < 0) { - perror("timerfd_settime() failed"); - ksft_exit_fail(); - } + if (err < 0) + ksft_exit_fail_msg("timerfd_settime() failed\n"); - if (write(power_state_fd, "mem", strlen("mem")) != strlen("mem")) { - perror("entering suspend failed"); - ksft_exit_fail(); - } + if (write(power_state_fd, "mem", strlen("mem")) != strlen("mem")) + ksft_exit_fail_msg("Failed to enter Suspend state\n"); close(timerfd); close(power_state_fd); @@ -171,6 +173,8 @@ int main(int argc, char **argv) int err; int cpu; + ksft_print_header(); + while ((opt = getopt(argc, argv, "n")) != -1) { switch (opt) { case 'n': @@ -187,10 +191,8 @@ int main(int argc, char **argv) suspend(); err = sched_getaffinity(0, sizeof(available_cpus), &available_cpus); - if (err < 0) { - perror("sched_getaffinity() failed"); - ksft_exit_fail(); - } + if (err < 0) + ksft_exit_fail_msg("sched_getaffinity() failed\n"); for (cpu = 0; cpu < CPU_SETSIZE; cpu++) { bool test_success; @@ -199,18 +201,14 @@ int main(int argc, char **argv) continue; test_success = run_test(cpu); - printf("CPU %d: ", cpu); if (test_success) { - printf("[OK]\n"); - ksft_inc_pass_cnt(); + ksft_test_result_pass("CPU %d\n", cpu); } else { - printf("[FAILED]\n"); - ksft_inc_fail_cnt(); + ksft_test_result_fail("CPU %d\n", cpu); succeeded = false; } } - ksft_print_cnts(); if (succeeded) ksft_exit_pass(); else diff --git a/tools/testing/selftests/capabilities/test_execve.c b/tools/testing/selftests/capabilities/test_execve.c index 10a21a958aaf..763f37fecfb8 100644 --- a/tools/testing/selftests/capabilities/test_execve.c +++ b/tools/testing/selftests/capabilities/test_execve.c @@ -138,9 +138,6 @@ static void chdir_to_tmpfs(void) if (chdir(cwd) != 0) err(1, "chdir to private tmpfs"); - - if (umount2(".", MNT_DETACH) != 0) - err(1, "detach private tmpfs"); } static void copy_fromat_to(int fromfd, const char *fromname, const char *toname) @@ -248,7 +245,7 @@ static int do_tests(int uid, const char *our_path) err(1, "chown"); if (chmod("validate_cap_sgidnonroot", S_ISGID | 0710) != 0) err(1, "chmod"); -} + } capng_get_caps_process(); @@ -384,7 +381,7 @@ static int do_tests(int uid, const char *our_path) } else { printf("[RUN]\tNon-root +ia, sgidnonroot => i\n"); exec_other_validate_cap("./validate_cap_sgidnonroot", - false, false, true, false); + false, false, true, false); if (fork_wait()) { printf("[RUN]\tNon-root +ia, sgidroot => i\n"); diff --git a/tools/testing/selftests/ftrace/ftracetest b/tools/testing/selftests/ftrace/ftracetest index 717581145cfc..14a03ea1e21d 100755 --- a/tools/testing/selftests/ftrace/ftracetest +++ b/tools/testing/selftests/ftrace/ftracetest @@ -250,7 +250,7 @@ run_test() { # testfile local testlog=`mktemp $LOG_DIR/${testname}-log.XXXXXX` export TMPDIR=`mktemp -d /tmp/ftracetest-dir.XXXXXX` testcase $1 - echo "execute: "$1 > $testlog + echo "execute$INSTANCE: "$1 > $testlog SIG_RESULT=0 if [ $VERBOSE -ge 2 ]; then __run_test $1 2>> $testlog | tee -a $testlog diff --git a/tools/testing/selftests/ftrace/test.d/event/toplevel-enable.tc b/tools/testing/selftests/ftrace/test.d/event/toplevel-enable.tc index 0bb5df3c00d4..15e2d3fe1731 100644 --- a/tools/testing/selftests/ftrace/test.d/event/toplevel-enable.tc +++ b/tools/testing/selftests/ftrace/test.d/event/toplevel-enable.tc @@ -28,7 +28,9 @@ echo '*:*' > set_event yield -count=`cat trace | grep -v ^# | wc -l` +echo 0 > tracing_on + +count=`head -n 128 trace | grep -v ^# | wc -l` if [ $count -eq 0 ]; then fail "none of events are recorded" fi @@ -36,10 +38,12 @@ fi do_reset echo 1 > events/enable +echo 1 > tracing_on yield -count=`cat trace | grep -v ^# | wc -l` +echo 0 > tracing_on +count=`head -n 128 trace | grep -v ^# | wc -l` if [ $count -eq 0 ]; then fail "none of events are recorded" fi diff --git a/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc b/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc index 9dcd0ca1f49c..8095e122daa9 100644 --- a/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc +++ b/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc @@ -11,17 +11,6 @@ fi disable_tracing clear_trace -# filter by ?, schedule is always good -if ! echo "sch?dule" > set_ftrace_filter; then - # test for powerpc 64 - if ! echo ".sch?dule" > set_ftrace_filter; then - fail "can not enable schedule filter" - fi - cat set_ftrace_filter | grep '^.schedule$' -else - cat set_ftrace_filter | grep '^schedule$' -fi - ftrace_filter_check() { # glob grep echo "$1" > set_ftrace_filter cut -f1 -d" " set_ftrace_filter > $TMPDIR/actual @@ -39,11 +28,28 @@ ftrace_filter_check '*schedule*' '^.*schedule.*$' # filter by *, end match ftrace_filter_check 'schedule*' '^schedule.*$' +# Advanced full-glob matching feature is recently supported. +# Skip the tests if we are sure the kernel does not support it. +if grep -q 'accepts: .* glob-matching-pattern' README ; then + # filter by *, both side match ftrace_filter_check 'sch*ule' '^sch.*ule$' # filter by char class. ftrace_filter_check '[Ss]y[Ss]_*' '^[Ss]y[Ss]_.*$' +# filter by ?, schedule is always good +if ! echo "sch?dule" > set_ftrace_filter; then + # test for powerpc 64 + if ! echo ".sch?dule" > set_ftrace_filter; then + fail "can not enable schedule filter" + fi + cat set_ftrace_filter | grep '^.schedule$' +else + cat set_ftrace_filter | grep '^schedule$' +fi + +fi + echo > set_ftrace_filter enable_tracing diff --git a/tools/testing/selftests/ftrace/test.d/ftrace/func_event_triggers.tc b/tools/testing/selftests/ftrace/test.d/ftrace/func_event_triggers.tc index aa31368851c9..77dfb6b48186 100644 --- a/tools/testing/selftests/ftrace/test.d/ftrace/func_event_triggers.tc +++ b/tools/testing/selftests/ftrace/test.d/ftrace/func_event_triggers.tc @@ -72,6 +72,15 @@ run_enable_disable() { test_event_enabled $check_disable echo "schedule:${enable}_event:$EVENT" > set_ftrace_filter + if [ -d ../../instances ]; then # Check instances + cur=`cat set_ftrace_filter` + top=`cat ../../set_ftrace_filter` + if [ "$cur" = "$top" ]; then + echo "This kernel is too old to support per instance filter" + reset_ftrace_filter + exit_unsupported + fi + fi echo " make sure it works 5 times" diff --git a/tools/testing/selftests/ftrace/test.d/ftrace/func_traceonoff_triggers.tc b/tools/testing/selftests/ftrace/test.d/ftrace/func_traceonoff_triggers.tc index c8e02ec01eaf..7a9ab4ff83b6 100644 --- a/tools/testing/selftests/ftrace/test.d/ftrace/func_traceonoff_triggers.tc +++ b/tools/testing/selftests/ftrace/test.d/ftrace/func_traceonoff_triggers.tc @@ -63,6 +63,10 @@ fi # powerpc uses .schedule func="schedule" +available_file=available_filter_functions +if [ -d ../../instances -a -f ../../available_filter_functions ]; then + available_file=../../available_filter_functions +fi x=`grep '^\.schedule$' available_filter_functions | wc -l` if [ "$x" -eq 1 ]; then func=".schedule" @@ -71,6 +75,15 @@ fi echo '** SET TRACEOFF' echo "$func:traceoff" > set_ftrace_filter +if [ -d ../../instances ]; then # Check instances + cur=`cat set_ftrace_filter` + top=`cat ../../set_ftrace_filter` + if [ "$cur" = "$top" ]; then + echo "This kernel is too old to support per instance filter" + reset_ftrace_filter + exit_unsupported + fi +fi cnt=`grep schedule set_ftrace_filter | wc -l` if [ $cnt -ne 1 ]; then @@ -90,11 +103,11 @@ if [ $on != "0" ]; then fail "Tracing is not off" fi -line1=`cat trace | tail -1` +csum1=`md5sum trace` sleep $SLEEP_TIME -line2=`cat trace | tail -1` +csum2=`md5sum trace` -if [ "$line1" != "$line2" ]; then +if [ "$csum1" != "$csum2" ]; then fail "Tracing file is still changing" fi diff --git a/tools/testing/selftests/ftrace/test.d/instances/instance-event.tc b/tools/testing/selftests/ftrace/test.d/instances/instance-event.tc index c73db7863adb..8a353314dc9b 100644 --- a/tools/testing/selftests/ftrace/test.d/instances/instance-event.tc +++ b/tools/testing/selftests/ftrace/test.d/instances/instance-event.tc @@ -82,7 +82,10 @@ rmdir foo if [ -d foo ]; then fail "foo still exists" fi - +if grep -q "schedule:enable_event:sched:sched_switch" ../set_ftrace_filter; then + echo "Older kernel detected. Cleanup filter" + echo '!schedule:enable_event:sched:sched_switch' > ../set_ftrace_filter +fi instance_slam() { while :; do diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kretprobe_maxactive.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kretprobe_maxactive.tc index 57abdf1caabf..7ec6f2639ad6 100644 --- a/tools/testing/selftests/ftrace/test.d/kprobe/kretprobe_maxactive.tc +++ b/tools/testing/selftests/ftrace/test.d/kprobe/kretprobe_maxactive.tc @@ -2,6 +2,7 @@ # description: Kretprobe dynamic event with maxactive [ -f kprobe_events ] || exit_unsupported # this is configurable +grep -q 'r\[maxactive\]' README || exit_unsupported # this is older version echo > kprobe_events diff --git a/tools/testing/selftests/futex/functional/futex_requeue_pi.c b/tools/testing/selftests/futex/functional/futex_requeue_pi.c index 3da06ad23996..d24ab7421e73 100644 --- a/tools/testing/selftests/futex/functional/futex_requeue_pi.c +++ b/tools/testing/selftests/futex/functional/futex_requeue_pi.c @@ -32,6 +32,7 @@ #include "futextest.h" #include "logging.h" +#define TEST_NAME "futex-requeue-pi" #define MAX_WAKE_ITERS 1000 #define THREAD_MAX 10 #define SIGNAL_PERIOD_US 100 @@ -404,6 +405,6 @@ int main(int argc, char *argv[]) */ ret = unit_test(broadcast, locked, owner, timeout_ns); - print_result(ret); + print_result(TEST_NAME, ret); return ret; } diff --git a/tools/testing/selftests/futex/functional/futex_requeue_pi_mismatched_ops.c b/tools/testing/selftests/futex/functional/futex_requeue_pi_mismatched_ops.c index d5e4f2c4da2a..e0a798ad0d21 100644 --- a/tools/testing/selftests/futex/functional/futex_requeue_pi_mismatched_ops.c +++ b/tools/testing/selftests/futex/functional/futex_requeue_pi_mismatched_ops.c @@ -30,6 +30,8 @@ #include "futextest.h" #include "logging.h" +#define TEST_NAME "futex-requeue-pi-mismatched-ops" + futex_t f1 = FUTEX_INITIALIZER; futex_t f2 = FUTEX_INITIALIZER; int child_ret = 0; @@ -130,6 +132,6 @@ int main(int argc, char *argv[]) out: /* If the kernel crashes, we shouldn't return at all. */ - print_result(ret); + print_result(TEST_NAME, ret); return ret; } diff --git a/tools/testing/selftests/futex/functional/futex_requeue_pi_signal_restart.c b/tools/testing/selftests/futex/functional/futex_requeue_pi_signal_restart.c index 3d7dc6afc3f8..982f83577501 100644 --- a/tools/testing/selftests/futex/functional/futex_requeue_pi_signal_restart.c +++ b/tools/testing/selftests/futex/functional/futex_requeue_pi_signal_restart.c @@ -32,6 +32,7 @@ #include "futextest.h" #include "logging.h" +#define TEST_NAME "futex-requeue-pi-signal-restart" #define DELAY_US 100 futex_t f1 = FUTEX_INITIALIZER; @@ -218,6 +219,6 @@ int main(int argc, char *argv[]) if (ret == RET_PASS && waiter_ret) ret = waiter_ret; - print_result(ret); + print_result(TEST_NAME, ret); return ret; } diff --git a/tools/testing/selftests/futex/functional/futex_wait_private_mapped_file.c b/tools/testing/selftests/futex/functional/futex_wait_private_mapped_file.c index 5f687f247454..bdc48dc047e5 100644 --- a/tools/testing/selftests/futex/functional/futex_wait_private_mapped_file.c +++ b/tools/testing/selftests/futex/functional/futex_wait_private_mapped_file.c @@ -34,6 +34,7 @@ #include "logging.h" #include "futextest.h" +#define TEST_NAME "futex-wait-private-mapped-file" #define PAGE_SZ 4096 char pad[PAGE_SZ] = {1}; @@ -60,7 +61,7 @@ void *thr_futex_wait(void *arg) ret = futex_wait(&val, 1, &wait_timeout, 0); if (ret && errno != EWOULDBLOCK && errno != ETIMEDOUT) { error("futex error.\n", errno); - print_result(RET_ERROR); + print_result(TEST_NAME, RET_ERROR); exit(RET_ERROR); } @@ -120,6 +121,6 @@ int main(int argc, char **argv) pthread_join(thr, NULL); out: - print_result(ret); + print_result(TEST_NAME, ret); return ret; } diff --git a/tools/testing/selftests/futex/functional/futex_wait_timeout.c b/tools/testing/selftests/futex/functional/futex_wait_timeout.c index ab428ca894de..6aadd560366e 100644 --- a/tools/testing/selftests/futex/functional/futex_wait_timeout.c +++ b/tools/testing/selftests/futex/functional/futex_wait_timeout.c @@ -27,6 +27,8 @@ #include "futextest.h" #include "logging.h" +#define TEST_NAME "futex-wait-timeout" + static long timeout_ns = 100000; /* 100us default timeout */ void usage(char *prog) @@ -81,6 +83,6 @@ int main(int argc, char *argv[]) ret = RET_FAIL; } - print_result(ret); + print_result(TEST_NAME, ret); return ret; } diff --git a/tools/testing/selftests/futex/functional/futex_wait_uninitialized_heap.c b/tools/testing/selftests/futex/functional/futex_wait_uninitialized_heap.c index fe7aee96844b..d237a8b702f0 100644 --- a/tools/testing/selftests/futex/functional/futex_wait_uninitialized_heap.c +++ b/tools/testing/selftests/futex/functional/futex_wait_uninitialized_heap.c @@ -36,6 +36,7 @@ #include "logging.h" #include "futextest.h" +#define TEST_NAME "futex-wait-uninitialized-heap" #define WAIT_US 5000000 static int child_blocked = 1; @@ -119,6 +120,6 @@ int main(int argc, char **argv) } out: - print_result(ret); + print_result(TEST_NAME, ret); return ret; } diff --git a/tools/testing/selftests/futex/functional/futex_wait_wouldblock.c b/tools/testing/selftests/futex/functional/futex_wait_wouldblock.c index b6b027448825..9a2c56fa7305 100644 --- a/tools/testing/selftests/futex/functional/futex_wait_wouldblock.c +++ b/tools/testing/selftests/futex/functional/futex_wait_wouldblock.c @@ -28,6 +28,7 @@ #include "futextest.h" #include "logging.h" +#define TEST_NAME "futex-wait-wouldblock" #define timeout_ns 100000 void usage(char *prog) @@ -74,6 +75,6 @@ int main(int argc, char *argv[]) ret = RET_FAIL; } - print_result(ret); + print_result(TEST_NAME, ret); return ret; } diff --git a/tools/testing/selftests/futex/include/logging.h b/tools/testing/selftests/futex/include/logging.h index e14469103f07..4e7944984fbb 100644 --- a/tools/testing/selftests/futex/include/logging.h +++ b/tools/testing/selftests/futex/include/logging.h @@ -107,7 +107,7 @@ void log_verbosity(int level) * * print_result() is primarily intended for functional tests. */ -void print_result(int ret) +void print_result(const char *test_name, int ret) { const char *result = "Unknown return code"; @@ -124,7 +124,7 @@ void print_result(int ret) result = FAIL; break; } - printf("Result: %s\n", result); + printf("selftests: %s [%s]\n", test_name, result); } /* log level macros */ diff --git a/tools/testing/selftests/intel_pstate/.gitignore b/tools/testing/selftests/intel_pstate/.gitignore new file mode 100644 index 000000000000..3bfcbae5fa13 --- /dev/null +++ b/tools/testing/selftests/intel_pstate/.gitignore @@ -0,0 +1,2 @@ +aperf +msr diff --git a/tools/testing/selftests/intel_pstate/Makefile b/tools/testing/selftests/intel_pstate/Makefile index 19678e90efb2..849a90ffe8dd 100644 --- a/tools/testing/selftests/intel_pstate/Makefile +++ b/tools/testing/selftests/intel_pstate/Makefile @@ -1,5 +1,5 @@ CFLAGS := $(CFLAGS) -Wall -D_GNU_SOURCE -LDFLAGS := $(LDFLAGS) -lm +LDLIBS := $(LDLIBS) -lm TEST_GEN_FILES := msr aperf diff --git a/tools/testing/selftests/kselftest.h b/tools/testing/selftests/kselftest.h index ef1c80d67ac7..08e90c2cc5cb 100644 --- a/tools/testing/selftests/kselftest.h +++ b/tools/testing/selftests/kselftest.h @@ -12,6 +12,7 @@ #include #include +#include /* define kselftest exit codes */ #define KSFT_PASS 0 @@ -31,38 +32,125 @@ struct ksft_count { static struct ksft_count ksft_cnt; +static inline int ksft_test_num(void) +{ + return ksft_cnt.ksft_pass + ksft_cnt.ksft_fail + + ksft_cnt.ksft_xfail + ksft_cnt.ksft_xpass + + ksft_cnt.ksft_xskip; +} + static inline void ksft_inc_pass_cnt(void) { ksft_cnt.ksft_pass++; } static inline void ksft_inc_fail_cnt(void) { ksft_cnt.ksft_fail++; } static inline void ksft_inc_xfail_cnt(void) { ksft_cnt.ksft_xfail++; } static inline void ksft_inc_xpass_cnt(void) { ksft_cnt.ksft_xpass++; } static inline void ksft_inc_xskip_cnt(void) { ksft_cnt.ksft_xskip++; } +static inline void ksft_print_header(void) +{ + printf("TAP version 13\n"); +} + static inline void ksft_print_cnts(void) { - printf("Pass: %d Fail: %d Xfail: %d Xpass: %d, Xskip: %d\n", - ksft_cnt.ksft_pass, ksft_cnt.ksft_fail, - ksft_cnt.ksft_xfail, ksft_cnt.ksft_xpass, - ksft_cnt.ksft_xskip); + printf("1..%d\n", ksft_test_num()); +} + +static inline void ksft_print_msg(const char *msg, ...) +{ + va_list args; + + va_start(args, msg); + printf("# "); + vprintf(msg, args); + va_end(args); +} + +static inline void ksft_test_result_pass(const char *msg, ...) +{ + va_list args; + + ksft_cnt.ksft_pass++; + + va_start(args, msg); + printf("ok %d ", ksft_test_num()); + vprintf(msg, args); + va_end(args); +} + +static inline void ksft_test_result_fail(const char *msg, ...) +{ + va_list args; + + ksft_cnt.ksft_fail++; + + va_start(args, msg); + printf("not ok %d ", ksft_test_num()); + vprintf(msg, args); + va_end(args); +} + +static inline void ksft_test_result_skip(const char *msg, ...) +{ + va_list args; + + ksft_cnt.ksft_xskip++; + + va_start(args, msg); + printf("ok %d # skip ", ksft_test_num()); + vprintf(msg, args); + va_end(args); } static inline int ksft_exit_pass(void) { + ksft_print_cnts(); exit(KSFT_PASS); } + static inline int ksft_exit_fail(void) { + printf("Bail out!\n"); + ksft_print_cnts(); exit(KSFT_FAIL); } + +static inline int ksft_exit_fail_msg(const char *msg, ...) +{ + va_list args; + + va_start(args, msg); + printf("Bail out! "); + vprintf(msg, args); + va_end(args); + + ksft_print_cnts(); + exit(KSFT_FAIL); +} + static inline int ksft_exit_xfail(void) { + ksft_print_cnts(); exit(KSFT_XFAIL); } + static inline int ksft_exit_xpass(void) { + ksft_print_cnts(); exit(KSFT_XPASS); } -static inline int ksft_exit_skip(void) + +static inline int ksft_exit_skip(const char *msg, ...) { + if (msg) { + va_list args; + + va_start(args, msg); + printf("1..%d # Skipped: ", ksft_test_num()); + vprintf(msg, args); + va_end(args); + } else { + ksft_print_cnts(); + } exit(KSFT_SKIP); } diff --git a/tools/testing/selftests/seccomp/test_harness.h b/tools/testing/selftests/kselftest_harness.h similarity index 52% rename from tools/testing/selftests/seccomp/test_harness.h rename to tools/testing/selftests/kselftest_harness.h index a786c69c7584..c56f72e07cd7 100644 --- a/tools/testing/selftests/seccomp/test_harness.h +++ b/tools/testing/selftests/kselftest_harness.h @@ -2,44 +2,53 @@ * Copyright (c) 2012 The Chromium OS Authors. All rights reserved. * Use of this source code is governed by the GPLv2 license. * - * test_harness.h: simple C unit test helper. + * kselftest_harness.h: simple C unit test helper. * - * Usage: - * #include "test_harness.h" - * TEST(standalone_test) { - * do_some_stuff; - * EXPECT_GT(10, stuff) { - * stuff_state_t state; - * enumerate_stuff_state(&state); - * TH_LOG("expectation failed with state: %s", state.msg); - * } - * more_stuff; - * ASSERT_NE(some_stuff, NULL) TH_LOG("how did it happen?!"); - * last_stuff; - * EXPECT_EQ(0, last_stuff); - * } - * - * FIXTURE(my_fixture) { - * mytype_t *data; - * int awesomeness_level; - * }; - * FIXTURE_SETUP(my_fixture) { - * self->data = mytype_new(); - * ASSERT_NE(NULL, self->data); - * } - * FIXTURE_TEARDOWN(my_fixture) { - * mytype_free(self->data); - * } - * TEST_F(my_fixture, data_is_good) { - * EXPECT_EQ(1, is_my_data_good(self->data)); - * } - * - * TEST_HARNESS_MAIN + * See documentation in Documentation/dev-tools/kselftest.rst * * API inspired by code.google.com/p/googletest */ -#ifndef TEST_HARNESS_H_ -#define TEST_HARNESS_H_ + +/** + * DOC: example + * + * .. code-block:: c + * + * #include "../kselftest_harness.h" + * + * TEST(standalone_test) { + * do_some_stuff; + * EXPECT_GT(10, stuff) { + * stuff_state_t state; + * enumerate_stuff_state(&state); + * TH_LOG("expectation failed with state: %s", state.msg); + * } + * more_stuff; + * ASSERT_NE(some_stuff, NULL) TH_LOG("how did it happen?!"); + * last_stuff; + * EXPECT_EQ(0, last_stuff); + * } + * + * FIXTURE(my_fixture) { + * mytype_t *data; + * int awesomeness_level; + * }; + * FIXTURE_SETUP(my_fixture) { + * self->data = mytype_new(); + * ASSERT_NE(NULL, self->data); + * } + * FIXTURE_TEARDOWN(my_fixture) { + * mytype_free(self->data); + * } + * TEST_F(my_fixture, data_is_good) { + * EXPECT_EQ(1, is_my_data_good(self->data)); + * } + * + * TEST_HARNESS_MAIN + */ + +#ifndef __KSELFTEST_HARNESS_H +#define __KSELFTEST_HARNESS_H #define _GNU_SOURCE #include @@ -50,147 +59,6 @@ #include #include -/* All exported functionality should be declared through this macro. */ -#define TEST_API(x) _##x - -/* - * Exported APIs - */ - -/* TEST(name) { implementation } - * Defines a test by name. - * Names must be unique and tests must not be run in parallel. The - * implementation containing block is a function and scoping should be treated - * as such. Returning early may be performed with a bare "return;" statement. - * - * EXPECT_* and ASSERT_* are valid in a TEST() { } context. - */ -#define TEST TEST_API(TEST) - -/* TEST_SIGNAL(name, signal) { implementation } - * Defines a test by name and the expected term signal. - * Names must be unique and tests must not be run in parallel. The - * implementation containing block is a function and scoping should be treated - * as such. Returning early may be performed with a bare "return;" statement. - * - * EXPECT_* and ASSERT_* are valid in a TEST() { } context. - */ -#define TEST_SIGNAL TEST_API(TEST_SIGNAL) - -/* FIXTURE(datatype name) { - * type property1; - * ... - * }; - * Defines the data provided to TEST_F()-defined tests as |self|. It should be - * populated and cleaned up using FIXTURE_SETUP and FIXTURE_TEARDOWN. - */ -#define FIXTURE TEST_API(FIXTURE) - -/* FIXTURE_DATA(datatype name) - * This call may be used when the type of the fixture data - * is needed. In general, this should not be needed unless - * the |self| is being passed to a helper directly. - */ -#define FIXTURE_DATA TEST_API(FIXTURE_DATA) - -/* FIXTURE_SETUP(fixture name) { implementation } - * Populates the required "setup" function for a fixture. An instance of the - * datatype defined with _FIXTURE_DATA will be exposed as |self| for the - * implementation. - * - * ASSERT_* are valid for use in this context and will prempt the execution - * of any dependent fixture tests. - * - * A bare "return;" statement may be used to return early. - */ -#define FIXTURE_SETUP TEST_API(FIXTURE_SETUP) - -/* FIXTURE_TEARDOWN(fixture name) { implementation } - * Populates the required "teardown" function for a fixture. An instance of the - * datatype defined with _FIXTURE_DATA will be exposed as |self| for the - * implementation to clean up. - * - * A bare "return;" statement may be used to return early. - */ -#define FIXTURE_TEARDOWN TEST_API(FIXTURE_TEARDOWN) - -/* TEST_F(fixture, name) { implementation } - * Defines a test that depends on a fixture (e.g., is part of a test case). - * Very similar to TEST() except that |self| is the setup instance of fixture's - * datatype exposed for use by the implementation. - */ -#define TEST_F TEST_API(TEST_F) - -#define TEST_F_SIGNAL TEST_API(TEST_F_SIGNAL) - -/* Use once to append a main() to the test file. E.g., - * TEST_HARNESS_MAIN - */ -#define TEST_HARNESS_MAIN TEST_API(TEST_HARNESS_MAIN) - -/* - * Operators for use in TEST and TEST_F. - * ASSERT_* calls will stop test execution immediately. - * EXPECT_* calls will emit a failure warning, note it, and continue. - */ - -/* ASSERT_EQ(expected, measured): expected == measured */ -#define ASSERT_EQ TEST_API(ASSERT_EQ) -/* ASSERT_NE(expected, measured): expected != measured */ -#define ASSERT_NE TEST_API(ASSERT_NE) -/* ASSERT_LT(expected, measured): expected < measured */ -#define ASSERT_LT TEST_API(ASSERT_LT) -/* ASSERT_LE(expected, measured): expected <= measured */ -#define ASSERT_LE TEST_API(ASSERT_LE) -/* ASSERT_GT(expected, measured): expected > measured */ -#define ASSERT_GT TEST_API(ASSERT_GT) -/* ASSERT_GE(expected, measured): expected >= measured */ -#define ASSERT_GE TEST_API(ASSERT_GE) -/* ASSERT_NULL(measured): NULL == measured */ -#define ASSERT_NULL TEST_API(ASSERT_NULL) -/* ASSERT_TRUE(measured): measured != 0 */ -#define ASSERT_TRUE TEST_API(ASSERT_TRUE) -/* ASSERT_FALSE(measured): measured == 0 */ -#define ASSERT_FALSE TEST_API(ASSERT_FALSE) -/* ASSERT_STREQ(expected, measured): !strcmp(expected, measured) */ -#define ASSERT_STREQ TEST_API(ASSERT_STREQ) -/* ASSERT_STRNE(expected, measured): strcmp(expected, measured) */ -#define ASSERT_STRNE TEST_API(ASSERT_STRNE) -/* EXPECT_EQ(expected, measured): expected == measured */ -#define EXPECT_EQ TEST_API(EXPECT_EQ) -/* EXPECT_NE(expected, measured): expected != measured */ -#define EXPECT_NE TEST_API(EXPECT_NE) -/* EXPECT_LT(expected, measured): expected < measured */ -#define EXPECT_LT TEST_API(EXPECT_LT) -/* EXPECT_LE(expected, measured): expected <= measured */ -#define EXPECT_LE TEST_API(EXPECT_LE) -/* EXPECT_GT(expected, measured): expected > measured */ -#define EXPECT_GT TEST_API(EXPECT_GT) -/* EXPECT_GE(expected, measured): expected >= measured */ -#define EXPECT_GE TEST_API(EXPECT_GE) -/* EXPECT_NULL(measured): NULL == measured */ -#define EXPECT_NULL TEST_API(EXPECT_NULL) -/* EXPECT_TRUE(measured): 0 != measured */ -#define EXPECT_TRUE TEST_API(EXPECT_TRUE) -/* EXPECT_FALSE(measured): 0 == measured */ -#define EXPECT_FALSE TEST_API(EXPECT_FALSE) -/* EXPECT_STREQ(expected, measured): !strcmp(expected, measured) */ -#define EXPECT_STREQ TEST_API(EXPECT_STREQ) -/* EXPECT_STRNE(expected, measured): strcmp(expected, measured) */ -#define EXPECT_STRNE TEST_API(EXPECT_STRNE) - -/* TH_LOG(format, ...) - * Optional debug logging function available for use in tests. - * Logging may be enabled or disabled by defining TH_LOG_ENABLED. - * E.g., #define TH_LOG_ENABLED 1 - * If no definition is provided, logging is enabled by default. - */ -#define TH_LOG TEST_API(TH_LOG) - -/* - * Internal implementation. - * - */ /* Utilities exposed to the test definitions */ #ifndef TH_LOG_STREAM @@ -201,7 +69,23 @@ # define TH_LOG_ENABLED 1 #endif -#define _TH_LOG(fmt, ...) do { \ +/** + * TH_LOG(fmt, ...) + * + * @fmt: format string + * @...: optional arguments + * + * .. code-block:: c + * + * TH_LOG(format, ...) + * + * Optional debug logging function available for use in tests. + * Logging may be enabled or disabled by defining TH_LOG_ENABLED. + * E.g., #define TH_LOG_ENABLED 1 + * + * If no definition is provided, logging is enabled by default. + */ +#define TH_LOG(fmt, ...) do { \ if (TH_LOG_ENABLED) \ __TH_LOG(fmt, ##__VA_ARGS__); \ } while (0) @@ -211,10 +95,43 @@ fprintf(TH_LOG_STREAM, "%s:%d:%s:" fmt "\n", \ __FILE__, __LINE__, _metadata->name, ##__VA_ARGS__) -/* Defines the test function and creates the registration stub. */ -#define _TEST(test_name) __TEST_IMPL(test_name, -1) +/** + * TEST(test_name) - Defines the test function and creates the registration + * stub + * + * @test_name: test name + * + * .. code-block:: c + * + * TEST(name) { implementation } + * + * Defines a test by name. + * Names must be unique and tests must not be run in parallel. The + * implementation containing block is a function and scoping should be treated + * as such. Returning early may be performed with a bare "return;" statement. + * + * EXPECT_* and ASSERT_* are valid in a TEST() { } context. + */ +#define TEST(test_name) __TEST_IMPL(test_name, -1) -#define _TEST_SIGNAL(test_name, signal) __TEST_IMPL(test_name, signal) +/** + * TEST_SIGNAL(test_name, signal) + * + * @test_name: test name + * @signal: signal number + * + * .. code-block:: c + * + * TEST_SIGNAL(name, signal) { implementation } + * + * Defines a test by name and the expected term signal. + * Names must be unique and tests must not be run in parallel. The + * implementation containing block is a function and scoping should be treated + * as such. Returning early may be performed with a bare "return;" statement. + * + * EXPECT_* and ASSERT_* are valid in a TEST() { } context. + */ +#define TEST_SIGNAL(test_name, signal) __TEST_IMPL(test_name, signal) #define __TEST_IMPL(test_name, _signal) \ static void test_name(struct __test_metadata *_metadata); \ @@ -228,50 +145,121 @@ static void test_name( \ struct __test_metadata __attribute__((unused)) *_metadata) -/* Wraps the struct name so we have one less argument to pass around. */ -#define _FIXTURE_DATA(fixture_name) struct _test_data_##fixture_name +/** + * FIXTURE_DATA(datatype_name) - Wraps the struct name so we have one less + * argument to pass around + * + * @datatype_name: datatype name + * + * .. code-block:: c + * + * FIXTURE_DATA(datatype name) + * + * This call may be used when the type of the fixture data + * is needed. In general, this should not be needed unless + * the *self* is being passed to a helper directly. + */ +#define FIXTURE_DATA(datatype_name) struct _test_data_##datatype_name -/* Called once per fixture to setup the data and register. */ -#define _FIXTURE(fixture_name) \ +/** + * FIXTURE(fixture_name) - Called once per fixture to setup the data and + * register + * + * @fixture_name: fixture name + * + * .. code-block:: c + * + * FIXTURE(datatype name) { + * type property1; + * ... + * }; + * + * Defines the data provided to TEST_F()-defined tests as *self*. It should be + * populated and cleaned up using FIXTURE_SETUP() and FIXTURE_TEARDOWN(). + */ +#define FIXTURE(fixture_name) \ static void __attribute__((constructor)) \ _register_##fixture_name##_data(void) \ { \ __fixture_count++; \ } \ - _FIXTURE_DATA(fixture_name) + FIXTURE_DATA(fixture_name) -/* Prepares the setup function for the fixture. |_metadata| is included - * so that ASSERT_* work as a convenience. +/** + * FIXTURE_SETUP(fixture_name) - Prepares the setup function for the fixture. + * *_metadata* is included so that ASSERT_* work as a convenience + * + * @fixture_name: fixture name + * + * .. code-block:: c + * + * FIXTURE_SETUP(fixture name) { implementation } + * + * Populates the required "setup" function for a fixture. An instance of the + * datatype defined with FIXTURE_DATA() will be exposed as *self* for the + * implementation. + * + * ASSERT_* are valid for use in this context and will prempt the execution + * of any dependent fixture tests. + * + * A bare "return;" statement may be used to return early. */ -#define _FIXTURE_SETUP(fixture_name) \ +#define FIXTURE_SETUP(fixture_name) \ void fixture_name##_setup( \ struct __test_metadata __attribute__((unused)) *_metadata, \ - _FIXTURE_DATA(fixture_name) __attribute__((unused)) *self) -#define _FIXTURE_TEARDOWN(fixture_name) \ + FIXTURE_DATA(fixture_name) __attribute__((unused)) *self) +/** + * FIXTURE_TEARDOWN(fixture_name) + * + * @fixture_name: fixture name + * + * .. code-block:: c + * + * FIXTURE_TEARDOWN(fixture name) { implementation } + * + * Populates the required "teardown" function for a fixture. An instance of the + * datatype defined with FIXTURE_DATA() will be exposed as *self* for the + * implementation to clean up. + * + * A bare "return;" statement may be used to return early. + */ +#define FIXTURE_TEARDOWN(fixture_name) \ void fixture_name##_teardown( \ struct __test_metadata __attribute__((unused)) *_metadata, \ - _FIXTURE_DATA(fixture_name) __attribute__((unused)) *self) + FIXTURE_DATA(fixture_name) __attribute__((unused)) *self) -/* Emits test registration and helpers for fixture-based test - * cases. - * TODO(wad) register fixtures on dedicated test lists. +/** + * TEST_F(fixture_name, test_name) - Emits test registration and helpers for + * fixture-based test cases + * + * @fixture_name: fixture name + * @test_name: test name + * + * .. code-block:: c + * + * TEST_F(fixture, name) { implementation } + * + * Defines a test that depends on a fixture (e.g., is part of a test case). + * Very similar to TEST() except that *self* is the setup instance of fixture's + * datatype exposed for use by the implementation. */ -#define _TEST_F(fixture_name, test_name) \ +/* TODO(wad) register fixtures on dedicated test lists. */ +#define TEST_F(fixture_name, test_name) \ __TEST_F_IMPL(fixture_name, test_name, -1) -#define _TEST_F_SIGNAL(fixture_name, test_name, signal) \ +#define TEST_F_SIGNAL(fixture_name, test_name, signal) \ __TEST_F_IMPL(fixture_name, test_name, signal) #define __TEST_F_IMPL(fixture_name, test_name, signal) \ static void fixture_name##_##test_name( \ struct __test_metadata *_metadata, \ - _FIXTURE_DATA(fixture_name) *self); \ + FIXTURE_DATA(fixture_name) *self); \ static inline void wrapper_##fixture_name##_##test_name( \ struct __test_metadata *_metadata) \ { \ /* fixture data is alloced, setup, and torn down per call. */ \ - _FIXTURE_DATA(fixture_name) self; \ - memset(&self, 0, sizeof(_FIXTURE_DATA(fixture_name))); \ + FIXTURE_DATA(fixture_name) self; \ + memset(&self, 0, sizeof(FIXTURE_DATA(fixture_name))); \ fixture_name##_setup(_metadata, &self); \ /* Let setup failure terminate early. */ \ if (!_metadata->passed) \ @@ -292,10 +280,18 @@ } \ static void fixture_name##_##test_name( \ struct __test_metadata __attribute__((unused)) *_metadata, \ - _FIXTURE_DATA(fixture_name) __attribute__((unused)) *self) + FIXTURE_DATA(fixture_name) __attribute__((unused)) *self) -/* Exports a simple wrapper to run the test harness. */ -#define _TEST_HARNESS_MAIN \ +/** + * TEST_HARNESS_MAIN - Simple wrapper to run the test harness + * + * .. code-block:: c + * + * TEST_HARNESS_MAIN + * + * Use once to append a main() to the test file. + */ +#define TEST_HARNESS_MAIN \ static void __attribute__((constructor)) \ __constructor_order_last(void) \ { \ @@ -306,54 +302,249 @@ return test_harness_run(argc, argv); \ } -#define _ASSERT_EQ(_expected, _seen) \ - __EXPECT(_expected, _seen, ==, 1) -#define _ASSERT_NE(_expected, _seen) \ - __EXPECT(_expected, _seen, !=, 1) -#define _ASSERT_LT(_expected, _seen) \ - __EXPECT(_expected, _seen, <, 1) -#define _ASSERT_LE(_expected, _seen) \ - __EXPECT(_expected, _seen, <=, 1) -#define _ASSERT_GT(_expected, _seen) \ - __EXPECT(_expected, _seen, >, 1) -#define _ASSERT_GE(_expected, _seen) \ - __EXPECT(_expected, _seen, >=, 1) -#define _ASSERT_NULL(_seen) \ - __EXPECT(NULL, _seen, ==, 1) +/** + * DOC: operators + * + * Operators for use in TEST() and TEST_F(). + * ASSERT_* calls will stop test execution immediately. + * EXPECT_* calls will emit a failure warning, note it, and continue. + */ -#define _ASSERT_TRUE(_seen) \ - _ASSERT_NE(0, _seen) -#define _ASSERT_FALSE(_seen) \ - _ASSERT_EQ(0, _seen) -#define _ASSERT_STREQ(_expected, _seen) \ - __EXPECT_STR(_expected, _seen, ==, 1) -#define _ASSERT_STRNE(_expected, _seen) \ - __EXPECT_STR(_expected, _seen, !=, 1) +/** + * ASSERT_EQ(expected, seen) + * + * @expected: expected value + * @seen: measured value + * + * ASSERT_EQ(expected, measured): expected == measured + */ +#define ASSERT_EQ(expected, seen) \ + __EXPECT(expected, seen, ==, 1) -#define _EXPECT_EQ(_expected, _seen) \ - __EXPECT(_expected, _seen, ==, 0) -#define _EXPECT_NE(_expected, _seen) \ - __EXPECT(_expected, _seen, !=, 0) -#define _EXPECT_LT(_expected, _seen) \ - __EXPECT(_expected, _seen, <, 0) -#define _EXPECT_LE(_expected, _seen) \ - __EXPECT(_expected, _seen, <=, 0) -#define _EXPECT_GT(_expected, _seen) \ - __EXPECT(_expected, _seen, >, 0) -#define _EXPECT_GE(_expected, _seen) \ - __EXPECT(_expected, _seen, >=, 0) +/** + * ASSERT_NE(expected, seen) + * + * @expected: expected value + * @seen: measured value + * + * ASSERT_NE(expected, measured): expected != measured + */ +#define ASSERT_NE(expected, seen) \ + __EXPECT(expected, seen, !=, 1) -#define _EXPECT_NULL(_seen) \ - __EXPECT(NULL, _seen, ==, 0) -#define _EXPECT_TRUE(_seen) \ - _EXPECT_NE(0, _seen) -#define _EXPECT_FALSE(_seen) \ - _EXPECT_EQ(0, _seen) +/** + * ASSERT_LT(expected, seen) + * + * @expected: expected value + * @seen: measured value + * + * ASSERT_LT(expected, measured): expected < measured + */ +#define ASSERT_LT(expected, seen) \ + __EXPECT(expected, seen, <, 1) -#define _EXPECT_STREQ(_expected, _seen) \ - __EXPECT_STR(_expected, _seen, ==, 0) -#define _EXPECT_STRNE(_expected, _seen) \ - __EXPECT_STR(_expected, _seen, !=, 0) +/** + * ASSERT_LE(expected, seen) + * + * @expected: expected value + * @seen: measured value + * + * ASSERT_LE(expected, measured): expected <= measured + */ +#define ASSERT_LE(expected, seen) \ + __EXPECT(expected, seen, <=, 1) + +/** + * ASSERT_GT(expected, seen) + * + * @expected: expected value + * @seen: measured value + * + * ASSERT_GT(expected, measured): expected > measured + */ +#define ASSERT_GT(expected, seen) \ + __EXPECT(expected, seen, >, 1) + +/** + * ASSERT_GE(expected, seen) + * + * @expected: expected value + * @seen: measured value + * + * ASSERT_GE(expected, measured): expected >= measured + */ +#define ASSERT_GE(expected, seen) \ + __EXPECT(expected, seen, >=, 1) + +/** + * ASSERT_NULL(seen) + * + * @seen: measured value + * + * ASSERT_NULL(measured): NULL == measured + */ +#define ASSERT_NULL(seen) \ + __EXPECT(NULL, seen, ==, 1) + +/** + * ASSERT_TRUE(seen) + * + * @seen: measured value + * + * ASSERT_TRUE(measured): measured != 0 + */ +#define ASSERT_TRUE(seen) \ + ASSERT_NE(0, seen) + +/** + * ASSERT_FALSE(seen) + * + * @seen: measured value + * + * ASSERT_FALSE(measured): measured == 0 + */ +#define ASSERT_FALSE(seen) \ + ASSERT_EQ(0, seen) + +/** + * ASSERT_STREQ(expected, seen) + * + * @expected: expected value + * @seen: measured value + * + * ASSERT_STREQ(expected, measured): !strcmp(expected, measured) + */ +#define ASSERT_STREQ(expected, seen) \ + __EXPECT_STR(expected, seen, ==, 1) + +/** + * ASSERT_STRNE(expected, seen) + * + * @expected: expected value + * @seen: measured value + * + * ASSERT_STRNE(expected, measured): strcmp(expected, measured) + */ +#define ASSERT_STRNE(expected, seen) \ + __EXPECT_STR(expected, seen, !=, 1) + +/** + * EXPECT_EQ(expected, seen) + * + * @expected: expected value + * @seen: measured value + * + * EXPECT_EQ(expected, measured): expected == measured + */ +#define EXPECT_EQ(expected, seen) \ + __EXPECT(expected, seen, ==, 0) + +/** + * EXPECT_NE(expected, seen) + * + * @expected: expected value + * @seen: measured value + * + * EXPECT_NE(expected, measured): expected != measured + */ +#define EXPECT_NE(expected, seen) \ + __EXPECT(expected, seen, !=, 0) + +/** + * EXPECT_LT(expected, seen) + * + * @expected: expected value + * @seen: measured value + * + * EXPECT_LT(expected, measured): expected < measured + */ +#define EXPECT_LT(expected, seen) \ + __EXPECT(expected, seen, <, 0) + +/** + * EXPECT_LE(expected, seen) + * + * @expected: expected value + * @seen: measured value + * + * EXPECT_LE(expected, measured): expected <= measured + */ +#define EXPECT_LE(expected, seen) \ + __EXPECT(expected, seen, <=, 0) + +/** + * EXPECT_GT(expected, seen) + * + * @expected: expected value + * @seen: measured value + * + * EXPECT_GT(expected, measured): expected > measured + */ +#define EXPECT_GT(expected, seen) \ + __EXPECT(expected, seen, >, 0) + +/** + * EXPECT_GE(expected, seen) + * + * @expected: expected value + * @seen: measured value + * + * EXPECT_GE(expected, measured): expected >= measured + */ +#define EXPECT_GE(expected, seen) \ + __EXPECT(expected, seen, >=, 0) + +/** + * EXPECT_NULL(seen) + * + * @seen: measured value + * + * EXPECT_NULL(measured): NULL == measured + */ +#define EXPECT_NULL(seen) \ + __EXPECT(NULL, seen, ==, 0) + +/** + * EXPECT_TRUE(seen) + * + * @seen: measured value + * + * EXPECT_TRUE(measured): 0 != measured + */ +#define EXPECT_TRUE(seen) \ + EXPECT_NE(0, seen) + +/** + * EXPECT_FALSE(seen) + * + * @seen: measured value + * + * EXPECT_FALSE(measured): 0 == measured + */ +#define EXPECT_FALSE(seen) \ + EXPECT_EQ(0, seen) + +/** + * EXPECT_STREQ(expected, seen) + * + * @expected: expected value + * @seen: measured value + * + * EXPECT_STREQ(expected, measured): !strcmp(expected, measured) + */ +#define EXPECT_STREQ(expected, seen) \ + __EXPECT_STR(expected, seen, ==, 0) + +/** + * EXPECT_STRNE(expected, seen) + * + * @expected: expected value + * @seen: measured value + * + * EXPECT_STRNE(expected, measured): strcmp(expected, measured) + */ +#define EXPECT_STRNE(expected, seen) \ + __EXPECT_STR(expected, seen, !=, 0) #define ARRAY_SIZE(a) (sizeof(a) / sizeof(a[0])) @@ -532,4 +723,4 @@ static void __attribute__((constructor)) __constructor_order_first(void) __constructor_order = _CONSTRUCTOR_ORDER_FORWARD; } -#endif /* TEST_HARNESS_H_ */ +#endif /* __KSELFTEST_HARNESS_H */ diff --git a/tools/testing/selftests/lib/bitmap.sh b/tools/testing/selftests/lib/bitmap.sh index 2da187b6ddad..b073c22a3435 100755 --- a/tools/testing/selftests/lib/bitmap.sh +++ b/tools/testing/selftests/lib/bitmap.sh @@ -1,5 +1,9 @@ #!/bin/sh # Runs bitmap infrastructure tests using test_bitmap kernel module +if ! /sbin/modprobe -q -n test_bitmap; then + echo "bitmap: [SKIP]" + exit 77 +fi if /sbin/modprobe -q test_bitmap; then /sbin/modprobe -q -r test_bitmap diff --git a/tools/testing/selftests/lib/printf.sh b/tools/testing/selftests/lib/printf.sh index 4fdc70fe6980..cbf3b124bd94 100755 --- a/tools/testing/selftests/lib/printf.sh +++ b/tools/testing/selftests/lib/printf.sh @@ -1,5 +1,9 @@ #!/bin/sh # Runs printf infrastructure using test_printf kernel module +if ! /sbin/modprobe -q -n test_printf; then + echo "printf: [SKIP]" + exit 77 +fi if /sbin/modprobe -q test_printf; then /sbin/modprobe -q -r test_printf diff --git a/tools/testing/selftests/membarrier/membarrier_test.c b/tools/testing/selftests/membarrier/membarrier_test.c index 535f0fef4d0b..21399fcf1a59 100644 --- a/tools/testing/selftests/membarrier/membarrier_test.c +++ b/tools/testing/selftests/membarrier/membarrier_test.c @@ -7,56 +7,63 @@ #include "../kselftest.h" -enum test_membarrier_status { - TEST_MEMBARRIER_PASS = 0, - TEST_MEMBARRIER_FAIL, - TEST_MEMBARRIER_SKIP, -}; - static int sys_membarrier(int cmd, int flags) { return syscall(__NR_membarrier, cmd, flags); } -static enum test_membarrier_status test_membarrier_cmd_fail(void) +static int test_membarrier_cmd_fail(void) { int cmd = -1, flags = 0; if (sys_membarrier(cmd, flags) != -1) { - printf("membarrier: Wrong command should fail but passed.\n"); - return TEST_MEMBARRIER_FAIL; + ksft_exit_fail_msg( + "sys membarrier invalid command test: command = %d, flags = %d. Should fail, but passed\n", + cmd, flags); } - return TEST_MEMBARRIER_PASS; + + ksft_test_result_pass( + "sys membarrier invalid command test: command = %d, flags = %d. Failed as expected\n", + cmd, flags); + return 0; } -static enum test_membarrier_status test_membarrier_flags_fail(void) +static int test_membarrier_flags_fail(void) { int cmd = MEMBARRIER_CMD_QUERY, flags = 1; if (sys_membarrier(cmd, flags) != -1) { - printf("membarrier: Wrong flags should fail but passed.\n"); - return TEST_MEMBARRIER_FAIL; + ksft_exit_fail_msg( + "sys membarrier MEMBARRIER_CMD_QUERY invalid flags test: flags = %d. Should fail, but passed\n", + flags); } - return TEST_MEMBARRIER_PASS; + + ksft_test_result_pass( + "sys membarrier MEMBARRIER_CMD_QUERY invalid flags test: flags = %d. Failed as expected\n", + flags); + return 0; } -static enum test_membarrier_status test_membarrier_success(void) +static int test_membarrier_success(void) { int cmd = MEMBARRIER_CMD_SHARED, flags = 0; + const char *test_name = "sys membarrier MEMBARRIER_CMD_SHARED\n"; if (sys_membarrier(cmd, flags) != 0) { - printf("membarrier: Executing MEMBARRIER_CMD_SHARED failed. %s.\n", - strerror(errno)); - return TEST_MEMBARRIER_FAIL; + ksft_exit_fail_msg( + "sys membarrier MEMBARRIER_CMD_SHARED test: flags = %d\n", + flags); } - printf("membarrier: MEMBARRIER_CMD_SHARED success.\n"); - return TEST_MEMBARRIER_PASS; + ksft_test_result_pass( + "sys membarrier MEMBARRIER_CMD_SHARED test: flags = %d\n", + flags); + return 0; } -static enum test_membarrier_status test_membarrier(void) +static int test_membarrier(void) { - enum test_membarrier_status status; + int status; status = test_membarrier_cmd_fail(); if (status) @@ -67,52 +74,38 @@ static enum test_membarrier_status test_membarrier(void) status = test_membarrier_success(); if (status) return status; - return TEST_MEMBARRIER_PASS; + return 0; } -static enum test_membarrier_status test_membarrier_query(void) +static int test_membarrier_query(void) { int flags = 0, ret; - printf("membarrier MEMBARRIER_CMD_QUERY "); ret = sys_membarrier(MEMBARRIER_CMD_QUERY, flags); if (ret < 0) { - printf("failed. %s.\n", strerror(errno)); - switch (errno) { - case ENOSYS: + if (errno == ENOSYS) { /* * It is valid to build a kernel with * CONFIG_MEMBARRIER=n. However, this skips the tests. */ - return TEST_MEMBARRIER_SKIP; - case EINVAL: - default: - return TEST_MEMBARRIER_FAIL; + ksft_exit_skip( + "sys membarrier (CONFIG_MEMBARRIER) is disabled.\n"); } + ksft_exit_fail_msg("sys_membarrier() failed\n"); } - if (!(ret & MEMBARRIER_CMD_SHARED)) { - printf("command MEMBARRIER_CMD_SHARED is not supported.\n"); - return TEST_MEMBARRIER_FAIL; - } - printf("syscall available.\n"); - return TEST_MEMBARRIER_PASS; + if (!(ret & MEMBARRIER_CMD_SHARED)) + ksft_exit_fail_msg("sys_membarrier is not supported.\n"); + + ksft_test_result_pass("sys_membarrier available\n"); + return 0; } int main(int argc, char **argv) { - switch (test_membarrier_query()) { - case TEST_MEMBARRIER_FAIL: - return ksft_exit_fail(); - case TEST_MEMBARRIER_SKIP: - return ksft_exit_skip(); - } - switch (test_membarrier()) { - case TEST_MEMBARRIER_FAIL: - return ksft_exit_fail(); - case TEST_MEMBARRIER_SKIP: - return ksft_exit_skip(); - } + ksft_print_header(); - printf("membarrier: tests done!\n"); - return ksft_exit_pass(); + test_membarrier_query(); + test_membarrier(); + + ksft_exit_pass(); } diff --git a/tools/testing/selftests/memfd/Makefile b/tools/testing/selftests/memfd/Makefile index 79891d033de1..ad8a0897e47f 100644 --- a/tools/testing/selftests/memfd/Makefile +++ b/tools/testing/selftests/memfd/Makefile @@ -7,7 +7,7 @@ TEST_PROGS := run_fuse_test.sh TEST_GEN_FILES := memfd_test fuse_mnt fuse_test fuse_mnt.o: CFLAGS += $(shell pkg-config fuse --cflags) -fuse_mnt: LDFLAGS += $(shell pkg-config fuse --libs) include ../lib.mk +$(OUTPUT)/fuse_mnt: LDLIBS += $(shell pkg-config fuse --libs) diff --git a/tools/testing/selftests/memory-hotplug/mem-on-off-test.sh b/tools/testing/selftests/memory-hotplug/mem-on-off-test.sh index 6cddde0b96f8..35025ce9ca66 100755 --- a/tools/testing/selftests/memory-hotplug/mem-on-off-test.sh +++ b/tools/testing/selftests/memory-hotplug/mem-on-off-test.sh @@ -22,6 +22,11 @@ prerequisite() echo $msg memory hotplug is not supported >&2 exit 0 fi + + if ! grep -q 1 $SYSFS/devices/system/memory/memory*/removable; then + echo $msg no hot-pluggable memory >&2 + exit 0 + fi } # @@ -39,7 +44,7 @@ hotpluggable_memory() done } -hotplaggable_offline_memory() +hotpluggable_offline_memory() { hotpluggable_memory offline } @@ -75,9 +80,12 @@ online_memory_expect_success() if ! online_memory $memory; then echo $FUNCNAME $memory: unexpected fail >&2 + return 1 elif ! memory_is_online $memory; then echo $FUNCNAME $memory: unexpected offline >&2 + return 1 fi + return 0 } online_memory_expect_fail() @@ -86,9 +94,12 @@ online_memory_expect_fail() if online_memory $memory 2> /dev/null; then echo $FUNCNAME $memory: unexpected success >&2 + return 1 elif ! memory_is_offline $memory; then echo $FUNCNAME $memory: unexpected online >&2 + return 1 fi + return 0 } offline_memory_expect_success() @@ -97,9 +108,12 @@ offline_memory_expect_success() if ! offline_memory $memory; then echo $FUNCNAME $memory: unexpected fail >&2 + return 1 elif ! memory_is_offline $memory; then echo $FUNCNAME $memory: unexpected offline >&2 + return 1 fi + return 0 } offline_memory_expect_fail() @@ -108,14 +122,18 @@ offline_memory_expect_fail() if offline_memory $memory 2> /dev/null; then echo $FUNCNAME $memory: unexpected success >&2 + return 1 elif ! memory_is_online $memory; then echo $FUNCNAME $memory: unexpected offline >&2 + return 1 fi + return 0 } error=-12 priority=0 ratio=10 +retval=0 while getopts e:hp:r: opt; do case $opt in @@ -131,6 +149,10 @@ while getopts e:hp:r: opt; do ;; r) ratio=$OPTARG + if [ "$ratio" -gt 100 ] || [ "$ratio" -lt 0 ]; then + echo "The percentage should be an integer within 0~100 range" + exit 1 + fi ;; esac done @@ -143,35 +165,58 @@ fi prerequisite echo "Test scope: $ratio% hotplug memory" -echo -e "\t online all hotplug memory in offline state" -echo -e "\t offline $ratio% hotplug memory in online state" -echo -e "\t online all hotplug memory in offline state" # # Online all hot-pluggable memory # -for memory in `hotplaggable_offline_memory`; do - echo offline-online $memory - online_memory_expect_success $memory -done +hotpluggable_num=`hotpluggable_offline_memory | wc -l` +echo -e "\t online all hot-pluggable memory in offline state:" +if [ "$hotpluggable_num" -gt 0 ]; then + for memory in `hotpluggable_offline_memory`; do + echo "offline->online memory$memory" + if ! online_memory_expect_success $memory; then + retval=1 + fi + done +else + echo -e "\t\t SKIPPED - no hot-pluggable memory in offline state" +fi # # Offline $ratio percent of hot-pluggable memory # +hotpluggable_num=`hotpluggable_online_memory | wc -l` +target=`echo "a=$hotpluggable_num*$ratio; if ( a%100 ) a/100+1 else a/100" | bc` +echo -e "\t offline $ratio% hot-pluggable memory in online state" +echo -e "\t trying to offline $target out of $hotpluggable_num memory block(s):" for memory in `hotpluggable_online_memory`; do - if [ $((RANDOM % 100)) -lt $ratio ]; then - echo online-offline $memory - offline_memory_expect_success $memory + if [ "$target" -gt 0 ]; then + echo "online->offline memory$memory" + if offline_memory_expect_success $memory; then + target=$(($target - 1)) + fi fi done +if [ "$target" -gt 0 ]; then + retval=1 + echo -e "\t\t FAILED - unable to offline some memory blocks, device busy?" +fi # # Online all hot-pluggable memory again # -for memory in `hotplaggable_offline_memory`; do - echo offline-online $memory - online_memory_expect_success $memory -done +hotpluggable_num=`hotpluggable_offline_memory | wc -l` +echo -e "\t online all hot-pluggable memory in offline state:" +if [ "$hotpluggable_num" -gt 0 ]; then + for memory in `hotpluggable_offline_memory`; do + echo "offline->online memory$memory" + if ! online_memory_expect_success $memory; then + retval=1 + fi + done +else + echo -e "\t\t SKIPPED - no hot-pluggable memory in offline state" +fi # # Test with memory notifier error injection @@ -189,15 +234,16 @@ prerequisite_extra() if [ ! -d "$DEBUGFS" ]; then echo $msg debugfs is not mounted >&2 - exit 0 + exit $retval fi if [ ! -d $NOTIFIER_ERR_INJECT_DIR ]; then echo $msg memory-notifier-error-inject module is not available >&2 - exit 0 + exit $retval fi } +echo -e "\t Test with memory notifier error injection" prerequisite_extra # @@ -214,7 +260,7 @@ done # Test memory hot-add error handling (offline => online) # echo $error > $NOTIFIER_ERR_INJECT_DIR/actions/MEM_GOING_ONLINE/error -for memory in `hotplaggable_offline_memory`; do +for memory in `hotpluggable_offline_memory`; do online_memory_expect_fail $memory done @@ -222,7 +268,7 @@ done # Online all hot-pluggable memory # echo 0 > $NOTIFIER_ERR_INJECT_DIR/actions/MEM_GOING_ONLINE/error -for memory in `hotplaggable_offline_memory`; do +for memory in `hotpluggable_offline_memory`; do online_memory_expect_success $memory done @@ -236,3 +282,5 @@ done echo 0 > $NOTIFIER_ERR_INJECT_DIR/actions/MEM_GOING_OFFLINE/error /sbin/modprobe -q -r memory-notifier-error-inject + +exit $retval diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile index 35cbb4cba410..f6c9dbf478f8 100644 --- a/tools/testing/selftests/net/Makefile +++ b/tools/testing/selftests/net/Makefile @@ -3,8 +3,6 @@ CFLAGS = -Wall -Wl,--no-as-needed -O2 -g CFLAGS += -I../../../../usr/include/ -reuseport_bpf_numa: LDFLAGS += -lnuma - TEST_PROGS := run_netsocktests run_afpackettests test_bpf.sh netdevice.sh TEST_GEN_FILES = socket TEST_GEN_FILES += psock_fanout psock_tpacket @@ -13,3 +11,4 @@ TEST_GEN_FILES += reuseport_dualstack include ../lib.mk +$(OUTPUT)/reuseport_bpf_numa: LDFLAGS += -lnuma diff --git a/tools/testing/selftests/seccomp/Makefile b/tools/testing/selftests/seccomp/Makefile index 5fa6fd2246b1..aeb0c805f3ca 100644 --- a/tools/testing/selftests/seccomp/Makefile +++ b/tools/testing/selftests/seccomp/Makefile @@ -4,3 +4,5 @@ LDFLAGS += -lpthread include ../lib.mk +$(TEST_GEN_PROGS): seccomp_bpf.c ../kselftest_harness.h + $(CC) $(CFLAGS) $(LDFLAGS) $< -o $@ diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c index 00a928b833d0..73f5ea6778ce 100644 --- a/tools/testing/selftests/seccomp/seccomp_bpf.c +++ b/tools/testing/selftests/seccomp/seccomp_bpf.c @@ -37,7 +37,7 @@ #include #include -#include "test_harness.h" +#include "../kselftest_harness.h" #ifndef PR_SET_PTRACER # define PR_SET_PTRACER 0x59616d61 @@ -1310,7 +1310,7 @@ void change_syscall(struct __test_metadata *_metadata, iov.iov_len = sizeof(regs); ret = ptrace(PTRACE_GETREGSET, tracee, NT_PRSTATUS, &iov); #endif - EXPECT_EQ(0, ret); + EXPECT_EQ(0, ret) {} #if defined(__x86_64__) || defined(__i386__) || defined(__powerpc__) || \ defined(__s390__) || defined(__hppa__) diff --git a/tools/testing/selftests/size/get_size.c b/tools/testing/selftests/size/get_size.c index 2d1af7cca463..d4b59ab979a0 100644 --- a/tools/testing/selftests/size/get_size.c +++ b/tools/testing/selftests/size/get_size.c @@ -75,26 +75,31 @@ void _start(void) int ccode; struct sysinfo info; unsigned long used; + static const char *test_name = " get runtime memory use\n"; - print("Testing system size.\n"); - print("1..1\n"); + print("TAP version 13\n"); + print("# Testing system size.\n"); ccode = sysinfo(&info); if (ccode < 0) { - print("not ok 1 get runtime memory use\n"); - print("# could not get sysinfo\n"); + print("not ok 1"); + print(test_name); + print(" ---\n reason: \"could not get sysinfo\"\n ...\n"); _exit(ccode); } + print("ok 1"); + print(test_name); + /* ignore cache complexities for now */ used = info.totalram - info.freeram - info.bufferram; - print_k_value("ok 1 get runtime memory use # size = ", used, - info.mem_unit); - print("# System runtime memory report (units in Kilobytes):\n"); - print_k_value("# Total: ", info.totalram, info.mem_unit); - print_k_value("# Free: ", info.freeram, info.mem_unit); - print_k_value("# Buffer: ", info.bufferram, info.mem_unit); - print_k_value("# In use: ", used, info.mem_unit); + print(" ---\n"); + print_k_value(" Total: ", info.totalram, info.mem_unit); + print_k_value(" Free: ", info.freeram, info.mem_unit); + print_k_value(" Buffer: ", info.bufferram, info.mem_unit); + print_k_value(" In use: ", used, info.mem_unit); + print(" ...\n"); + print("1..1\n"); _exit(0); } diff --git a/tools/testing/selftests/sync/sync_test.c b/tools/testing/selftests/sync/sync_test.c index 9ea08d9f0b13..62fa666e501a 100644 --- a/tools/testing/selftests/sync/sync_test.c +++ b/tools/testing/selftests/sync/sync_test.c @@ -29,6 +29,7 @@ #include #include #include +#include #include #include "synctest.h" @@ -52,10 +53,22 @@ static int run_test(int (*test)(void), char *name) exit(test()); } +static int sync_api_supported(void) +{ + struct stat sbuf; + + return 0 == stat("/sys/kernel/debug/sync/sw_sync", &sbuf); +} + int main(void) { int err = 0; + if (!sync_api_supported()) { + printf("SKIP: Sync framework not supported by kernel\n"); + return 0; + } + printf("[RUN]\tTesting sync framework\n"); err += RUN_TEST(test_alloc_timeline); diff --git a/tools/testing/selftests/sysctl/common_tests b/tools/testing/selftests/sysctl/common_tests index 17d534b1b7b4..b6862322962f 100644 --- a/tools/testing/selftests/sysctl/common_tests +++ b/tools/testing/selftests/sysctl/common_tests @@ -24,6 +24,14 @@ verify() return 0 } +exit_test() +{ + if [ ! -z ${old_strict} ]; then + echo ${old_strict} > ${WRITES_STRICT} + fi + exit $rc +} + trap 'set_orig; rm -f "${TEST_FILE}"' EXIT rc=0 @@ -63,6 +71,20 @@ else echo "ok" fi +echo -n "Checking write strict setting ... " +WRITES_STRICT="${SYSCTL}/kernel/sysctl_writes_strict" +if [ ! -e ${WRITES_STRICT} ]; then + echo "FAIL, but skip in case of old kernel" >&2 +else + old_strict=$(cat ${WRITES_STRICT}) + if [ "$old_strict" = "1" ]; then + echo "ok" + else + echo "FAIL, strict value is 0 but force to 1 to continue" >&2 + echo "1" > ${WRITES_STRICT} + fi +fi + # Now that we've validated the sanity of "set_test" and "set_orig", # we can use those functions to set starting states before running # specific behavioral tests. diff --git a/tools/testing/selftests/sysctl/run_numerictests b/tools/testing/selftests/sysctl/run_numerictests index 8510f93f2d14..e6e76c93d948 100755 --- a/tools/testing/selftests/sysctl/run_numerictests +++ b/tools/testing/selftests/sysctl/run_numerictests @@ -7,4 +7,4 @@ TEST_STR=$(( $ORIG + 1 )) . ./common_tests -exit $rc +exit_test diff --git a/tools/testing/selftests/sysctl/run_stringtests b/tools/testing/selftests/sysctl/run_stringtests index 90a9293d520c..857ec667fb02 100755 --- a/tools/testing/selftests/sysctl/run_stringtests +++ b/tools/testing/selftests/sysctl/run_stringtests @@ -74,4 +74,4 @@ else echo "ok" fi -exit $rc +exit_test diff --git a/tools/testing/selftests/vm/virtual_address_range.c b/tools/testing/selftests/vm/virtual_address_range.c index 3b02aa6eb9da..1830d66a6f0e 100644 --- a/tools/testing/selftests/vm/virtual_address_range.c +++ b/tools/testing/selftests/vm/virtual_address_range.c @@ -10,7 +10,6 @@ #include #include #include -#include #include #include @@ -32,15 +31,33 @@ * different areas one below 128TB and one above 128TB * till it reaches 512TB. One with size 128TB and the * other being 384TB. + * + * On Arm64 the address space is 256TB and no high mappings + * are supported so far. */ + #define NR_CHUNKS_128TB 8192UL /* Number of 16GB chunks for 128TB */ -#define NR_CHUNKS_384TB 24576UL /* Number of 16GB chunks for 384TB */ +#define NR_CHUNKS_256TB (NR_CHUNKS_128TB * 2UL) +#define NR_CHUNKS_384TB (NR_CHUNKS_128TB * 3UL) #define ADDR_MARK_128TB (1UL << 47) /* First address beyond 128TB */ +#define ADDR_MARK_256TB (1UL << 48) /* First address beyond 256TB */ + +#ifdef __aarch64__ +#define HIGH_ADDR_MARK ADDR_MARK_256TB +#define HIGH_ADDR_SHIFT 49 +#define NR_CHUNKS_LOW NR_CHUNKS_256TB +#define NR_CHUNKS_HIGH 0 +#else +#define HIGH_ADDR_MARK ADDR_MARK_128TB +#define HIGH_ADDR_SHIFT 48 +#define NR_CHUNKS_LOW NR_CHUNKS_128TB +#define NR_CHUNKS_HIGH NR_CHUNKS_384TB +#endif static char *hind_addr(void) { - int bits = 48 + rand() % 15; + int bits = HIGH_ADDR_SHIFT + rand() % (63 - HIGH_ADDR_SHIFT); return (char *) (1UL << bits); } @@ -50,14 +67,14 @@ static int validate_addr(char *ptr, int high_addr) unsigned long addr = (unsigned long) ptr; if (high_addr) { - if (addr < ADDR_MARK_128TB) { + if (addr < HIGH_ADDR_MARK) { printf("Bad address %lx\n", addr); return 1; } return 0; } - if (addr > ADDR_MARK_128TB) { + if (addr > HIGH_ADDR_MARK) { printf("Bad address %lx\n", addr); return 1; } @@ -79,12 +96,12 @@ static int validate_lower_address_hint(void) int main(int argc, char *argv[]) { - char *ptr[NR_CHUNKS_128TB]; - char *hptr[NR_CHUNKS_384TB]; + char *ptr[NR_CHUNKS_LOW]; + char *hptr[NR_CHUNKS_HIGH]; char *hint; unsigned long i, lchunks, hchunks; - for (i = 0; i < NR_CHUNKS_128TB; i++) { + for (i = 0; i < NR_CHUNKS_LOW; i++) { ptr[i] = mmap(NULL, MAP_CHUNK_SIZE, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); @@ -99,7 +116,7 @@ int main(int argc, char *argv[]) } lchunks = i; - for (i = 0; i < NR_CHUNKS_384TB; i++) { + for (i = 0; i < NR_CHUNKS_HIGH; i++) { hint = hind_addr(); hptr[i] = mmap(hint, MAP_CHUNK_SIZE, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);