diff --git a/arch/sandbox/cpu/spl.c b/arch/sandbox/cpu/spl.c index 0faf34cc00..09e3d10d6a 100644 --- a/arch/sandbox/cpu/spl.c +++ b/arch/sandbox/cpu/spl.c @@ -132,7 +132,7 @@ void spl_board_init(void) int ret; ret = ut_run_list("spl", NULL, tests, count, - state->select_unittests, 1, false); + state->select_unittests, 1, false, NULL); /* continue execution into U-Boot */ } } diff --git a/doc/develop/tests_sandbox.rst b/doc/develop/tests_sandbox.rst index 8e42a32afb..bfd3bdb927 100644 --- a/doc/develop/tests_sandbox.rst +++ b/doc/develop/tests_sandbox.rst @@ -143,6 +143,75 @@ For example:: Test dm_test_rtc_reset failed 3 times +Isolating a test that breaks another +------------------------------------ + +When running unit tests, some may have side effects which cause a subsequent +test to break. This can sometimes be seen when using 'ut dm' or similar. + +You can use the `-I` argument to the `ut` command to isolate this problem. +First use `ut info` to see how many tests there are, then use a binary search to +home in on the problem. Note that you might need to restart U-Boot after each +iteration, so the `-c` argument to U-Boot is useful. + +For example, let's stay that dm_test_host() is failing:: + + => ut dm + ... + Test: dm_test_get_stats: core.c + Test: dm_test_get_stats: core.c (flat tree) + Test: dm_test_host: host.c + test/dm/host.c:71, dm_test_host(): 0 == ut_check_delta(mem_start): Expected 0x0 (0), got 0xffffcbb0 (-13392) + Test: dm_test_host: host.c (flat tree) + Test failed 1 times + Test: dm_test_host_dup: host.c + Test: dm_test_host_dup: host.c (flat tree) + ... + +You can then tell U-Boot to run the failing test at different points in the +sequence: + + => ut info + Test suites: 21 + Total tests: 645 + +:: + + $ ./u-boot -T -c "ut dm -I300:dm_test_host" + ... + Test: dm_test_pinctrl_single: pinmux.c (flat tree) + Test: dm_test_host: host.c + test/dm/host.c:71, dm_test_host(): 0 == ut_check_delta(mem_start): Expected 0x0 (0), got 0xfffffdb0 (-592) + Test: dm_test_host: host.c (flat tree) + Test dm_test_host failed 1 times (position 300) + Failures: 4 + +So it happened before position 300. Trying 150 shows it failing, so we try 75:: + + $ ./u-boot -T -c "ut dm -I75:dm_test_host" + ... + Test: dm_test_autoprobe: core.c + Test: dm_test_autoprobe: core.c (flat tree) + Test: dm_test_host: host.c + Test: dm_test_host: host.c (flat tree) + Failures: 0 + +That succeeds, so we try 120, etc. until eventually we can figure out that the +problem first happens at position 82. + + $ ./u-boot -T -c "ut dm -I82:dm_test_host" + ... + Test: dm_test_blk_flags: blk.c + Test: dm_test_blk_flags: blk.c (flat tree) + Test: dm_test_host: host.c + test/dm/host.c:71, dm_test_host(): 0 == ut_check_delta(mem_start): Expected 0x0 (0), got 0xffffc960 (-13984) + Test: dm_test_host: host.c (flat tree) + Test dm_test_host failed 1 times (position 82) + Failures: 1 + +From this we can deduce that `dm_test_blk_flags()` causes the problem with +`dm_test_host()`. + Running sandbox_spl tests directly ---------------------------------- diff --git a/doc/usage/cmd/ut.rst b/doc/usage/cmd/ut.rst index 11c64a1779..a3039634f2 100644 --- a/doc/usage/cmd/ut.rst +++ b/doc/usage/cmd/ut.rst @@ -8,10 +8,12 @@ Synopis :: - ut [-r] [-f] [ []] + ut [-r] [-f] [-I:] [ []] Number of times to run each test -f Force 'manual' tests to run as well + Run after other tests have run + Name of the 'one' test to run Test suite to run, or `all` Name of single test to run @@ -35,6 +37,13 @@ Manual tests are normally skipped by this command. Use `-f` to run them. See See :ref:`develop/tests_writing:mixing python and c` for more information on manual test. +When running unit tests, some may have side effects which cause a subsequent +test to break. This can sometimes be seen when using 'ut dm' or similar. To +fix this, select the 'one' test which breaks. Then tell the 'ut' command to +run this one test after a certain number of other tests have run. Using a +binary search method with `-I` you can quickly figure one which test is causing +the problem. + Generally all tests in the suite are run. To run just a single test from the suite, provide the argument. diff --git a/include/test/ut.h b/include/test/ut.h index e0e618b58c..4d00b4eeca 100644 --- a/include/test/ut.h +++ b/include/test/ut.h @@ -410,10 +410,15 @@ void test_set_state(struct unit_test_state *uts); * then all tests are run * @runs_per_test: Number of times to run each test (typically 1) * @force_run: Run tests that are marked as manual-only (UT_TESTF_MANUAL) + * @test_insert: String describing a test to run after n other tests run, in the + * format n:name where n is the number of tests to run before this one and + * name is the name of the test to run. This is used to find which test causes + * another test to fail. If the one test fails, testing stops immediately. + * Pass NULL to disable this * Return: 0 if all tests passed, -1 if any failed */ int ut_run_list(const char *name, const char *prefix, struct unit_test *tests, int count, const char *select_name, int runs_per_test, - bool force_run); + bool force_run, const char *test_insert); #endif diff --git a/test/cmd_ut.c b/test/cmd_ut.c index 76e37f35c8..2736582f11 100644 --- a/test/cmd_ut.c +++ b/test/cmd_ut.c @@ -21,6 +21,7 @@ int cmd_ut_category(const char *name, const char *prefix, struct unit_test *tests, int n_ents, int argc, char *const argv[]) { + const char *test_insert = NULL; int runs_per_text = 1; bool force_run = false; int ret; @@ -35,13 +36,17 @@ int cmd_ut_category(const char *name, const char *prefix, case 'f': force_run = true; break; + case 'I': + test_insert = str + 2; + break; } argv++; - argc++; + argc--; } ret = ut_run_list(name, prefix, tests, n_ents, - argc > 1 ? argv[1] : NULL, runs_per_text, force_run); + argc > 1 ? argv[1] : NULL, runs_per_text, force_run, + test_insert); return ret ? CMD_RET_FAILURE : 0; } diff --git a/test/test-main.c b/test/test-main.c index ab3b00a3b3..5931e94a91 100644 --- a/test/test-main.c +++ b/test/test-main.c @@ -498,12 +498,29 @@ static int ut_run_test_live_flat(struct unit_test_state *uts, */ static int ut_run_tests(struct unit_test_state *uts, const char *prefix, struct unit_test *tests, int count, - const char *select_name) + const char *select_name, const char *test_insert) { - struct unit_test *test; + struct unit_test *test, *one; int found = 0; + int pos = 0; + int upto; - for (test = tests; test < tests + count; test++) { + one = NULL; + if (test_insert) { + char *p; + + pos = dectoul(test_insert, NULL); + p = strchr(test_insert, ':'); + if (p) + p++; + + for (test = tests; test < tests + count; test++) { + if (!strcmp(p, test->name)) + one = test; + } + } + + for (upto = 0, test = tests; test < tests + count; test++, upto++) { const char *test_name = test->name; int ret, i, old_fail_count; @@ -534,6 +551,17 @@ static int ut_run_tests(struct unit_test_state *uts, const char *prefix, } } old_fail_count = uts->fail_count; + + if (one && upto == pos) { + ret = ut_run_test_live_flat(uts, one); + if (uts->fail_count != old_fail_count) { + printf("Test %s failed %d times (position %d)\n", + one->name, + uts->fail_count - old_fail_count, pos); + } + return -EBADF; + } + for (i = 0; i < uts->runs_per_test; i++) ret = ut_run_test_live_flat(uts, test); if (uts->fail_count != old_fail_count) { @@ -554,7 +582,7 @@ static int ut_run_tests(struct unit_test_state *uts, const char *prefix, int ut_run_list(const char *category, const char *prefix, struct unit_test *tests, int count, const char *select_name, - int runs_per_test, bool force_run) + int runs_per_test, bool force_run, const char *test_insert) { struct unit_test_state uts = { .fail_count = 0 }; bool has_dm_tests = false; @@ -589,7 +617,8 @@ int ut_run_list(const char *category, const char *prefix, memcpy(uts.fdt_copy, gd->fdt_blob, uts.fdt_size); } uts.force_run = force_run; - ret = ut_run_tests(&uts, prefix, tests, count, select_name); + ret = ut_run_tests(&uts, prefix, tests, count, select_name, + test_insert); /* Best efforts only...ignore errors */ if (has_dm_tests)