mirror of
https://github.com/AsahiLinux/u-boot
synced 2025-02-26 12:27:12 +00:00
test: Support tests which can only be run manually
At present we normally write tests either in Python or in C. But most Python tests end up doing a lot of checks which would be better done in C. Checks done in C are orders of magnitude faster and it is possible to get full access to U-Boot's internal workings, rather than just relying on the command line. The model is to have a Python test set up some things and then use C code (in a unit test) to check that they were done correctly. But we don't want those checks to happen as part of normal test running, since each C unit tests is dependent on the associate Python tests, so cannot run without it. To acheive this, add a new UT_TESTF_MANUAL flag to use with the C 'check' tests, so that they can be skipped by default when the 'ut' command is used. Require that tests have a name ending with '_norun', so that pytest knows to skip them. Signed-off-by: Simon Glass <sjg@chromium.org>
This commit is contained in:
parent
c43635bdbc
commit
cbd71fad6d
8 changed files with 86 additions and 8 deletions
|
@ -89,7 +89,7 @@ void spl_board_init(void)
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = ut_run_list("spl", NULL, tests, count,
|
ret = ut_run_list("spl", NULL, tests, count,
|
||||||
state->select_unittests, 1);
|
state->select_unittests, 1, false);
|
||||||
/* continue execution into U-Boot */
|
/* continue execution into U-Boot */
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -74,6 +74,33 @@ NOT rely on running with sandbox, but instead should function correctly on any
|
||||||
board supported by U-Boot.
|
board supported by U-Boot.
|
||||||
|
|
||||||
|
|
||||||
|
Mixing Python and C
|
||||||
|
-------------------
|
||||||
|
|
||||||
|
The best of both worlds is sometimes to have a Python test set things up and
|
||||||
|
perform some operations, with a 'checker' C unit test doing the checks
|
||||||
|
afterwards. This can be achieved with these steps:
|
||||||
|
|
||||||
|
- Add the `UT_TESTF_MANUAL` flag to the checker test so that the `ut` command
|
||||||
|
does not run it by default
|
||||||
|
- Add a `_norun` suffix to the name so that pytest knows to skip it too
|
||||||
|
|
||||||
|
In your Python test use the `-f` flag to the `ut` command to force the checker
|
||||||
|
test to run it, e.g.::
|
||||||
|
|
||||||
|
# Do the Python part
|
||||||
|
host load ...
|
||||||
|
bootm ...
|
||||||
|
|
||||||
|
# Run the checker to make sure that everything worked
|
||||||
|
ut -f bootstd vbe_test_fixup_norun
|
||||||
|
|
||||||
|
Note that apart from the `UT_TESTF_MANUAL` flag, the code in a 'manual' C test
|
||||||
|
is just like any other C test. It still uses ut_assert...() and other such
|
||||||
|
constructs, in this case to check that the expected things happened in the
|
||||||
|
Python test.
|
||||||
|
|
||||||
|
|
||||||
How slow are Python tests?
|
How slow are Python tests?
|
||||||
--------------------------
|
--------------------------
|
||||||
|
|
||||||
|
|
|
@ -28,6 +28,7 @@
|
||||||
* @other_fdt_size: Size of the other FDT (UT_TESTF_OTHER_FDT)
|
* @other_fdt_size: Size of the other FDT (UT_TESTF_OTHER_FDT)
|
||||||
* @of_other: Live tree for the other FDT
|
* @of_other: Live tree for the other FDT
|
||||||
* @runs_per_test: Number of times to run each test (typically 1)
|
* @runs_per_test: Number of times to run each test (typically 1)
|
||||||
|
* @force_run: true to run tests marked with the UT_TESTF_MANUAL flag
|
||||||
* @expect_str: Temporary string used to hold expected string value
|
* @expect_str: Temporary string used to hold expected string value
|
||||||
* @actual_str: Temporary string used to hold actual string value
|
* @actual_str: Temporary string used to hold actual string value
|
||||||
*/
|
*/
|
||||||
|
@ -48,6 +49,7 @@ struct unit_test_state {
|
||||||
int other_fdt_size;
|
int other_fdt_size;
|
||||||
struct device_node *of_other;
|
struct device_node *of_other;
|
||||||
int runs_per_test;
|
int runs_per_test;
|
||||||
|
bool force_run;
|
||||||
char expect_str[512];
|
char expect_str[512];
|
||||||
char actual_str[512];
|
char actual_str[512];
|
||||||
};
|
};
|
||||||
|
@ -63,6 +65,12 @@ enum {
|
||||||
/* do extra driver model init and uninit */
|
/* do extra driver model init and uninit */
|
||||||
UT_TESTF_DM = BIT(6),
|
UT_TESTF_DM = BIT(6),
|
||||||
UT_TESTF_OTHER_FDT = BIT(7), /* read in other device tree */
|
UT_TESTF_OTHER_FDT = BIT(7), /* read in other device tree */
|
||||||
|
/*
|
||||||
|
* Only run if explicitly requested with 'ut -f <suite> <test>'. The
|
||||||
|
* test name must end in "_norun" so that pytest detects this also,
|
||||||
|
* since it cannot access the flags.
|
||||||
|
*/
|
||||||
|
UT_TESTF_MANUAL = BIT(8),
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -409,9 +409,11 @@ void test_set_state(struct unit_test_state *uts);
|
||||||
* @select_name: Name of a single test to run (from the list provided). If NULL
|
* @select_name: Name of a single test to run (from the list provided). If NULL
|
||||||
* then all tests are run
|
* then all tests are run
|
||||||
* @runs_per_test: Number of times to run each test (typically 1)
|
* @runs_per_test: Number of times to run each test (typically 1)
|
||||||
|
* @force_run: Run tests that are marked as manual-only (UT_TESTF_MANUAL)
|
||||||
* Return: 0 if all tests passed, -1 if any failed
|
* Return: 0 if all tests passed, -1 if any failed
|
||||||
*/
|
*/
|
||||||
int ut_run_list(const char *name, const char *prefix, struct unit_test *tests,
|
int ut_run_list(const char *name, const char *prefix, struct unit_test *tests,
|
||||||
int count, const char *select_name, int runs_per_test);
|
int count, const char *select_name, int runs_per_test,
|
||||||
|
bool force_run);
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -19,16 +19,26 @@ int cmd_ut_category(const char *name, const char *prefix,
|
||||||
int argc, char *const argv[])
|
int argc, char *const argv[])
|
||||||
{
|
{
|
||||||
int runs_per_text = 1;
|
int runs_per_text = 1;
|
||||||
|
bool force_run = false;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (argc > 1 && !strncmp("-r", argv[1], 2)) {
|
while (argc > 1 && *argv[1] == '-') {
|
||||||
runs_per_text = dectoul(argv[1] + 2, NULL);
|
const char *str = argv[1];
|
||||||
|
|
||||||
|
switch (str[1]) {
|
||||||
|
case 'r':
|
||||||
|
runs_per_text = dectoul(str + 2, NULL);
|
||||||
|
break;
|
||||||
|
case 'f':
|
||||||
|
force_run = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
argv++;
|
argv++;
|
||||||
argc++;
|
argc++;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = ut_run_list(name, prefix, tests, n_ents,
|
ret = ut_run_list(name, prefix, tests, n_ents,
|
||||||
argc > 1 ? argv[1] : NULL, runs_per_text);
|
argc > 1 ? argv[1] : NULL, runs_per_text, force_run);
|
||||||
|
|
||||||
return ret ? CMD_RET_FAILURE : 0;
|
return ret ? CMD_RET_FAILURE : 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -36,7 +36,7 @@ static int dm_test_run(const char *test_name, int runs_per_text)
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = ut_run_list("driver model", "dm_test_", tests, n_ents, test_name,
|
ret = ut_run_list("driver model", "dm_test_", tests, n_ents, test_name,
|
||||||
runs_per_text);
|
runs_per_text, false);
|
||||||
|
|
||||||
return ret ? CMD_RET_FAILURE : 0;
|
return ret ? CMD_RET_FAILURE : 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -289,7 +289,13 @@ def generate_ut_subtest(metafunc, fixture_name, sym_path):
|
||||||
m = re_ut_test_list.search(l)
|
m = re_ut_test_list.search(l)
|
||||||
if not m:
|
if not m:
|
||||||
continue
|
continue
|
||||||
vals.append(m.group(1) + ' ' + m.group(2))
|
suite, name = m.groups()
|
||||||
|
|
||||||
|
# Tests marked with _norun should only be run manually using 'ut -f'
|
||||||
|
if name.endswith('_norun'):
|
||||||
|
continue
|
||||||
|
|
||||||
|
vals.append(f'{suite} {name}')
|
||||||
|
|
||||||
ids = ['ut_' + s.replace(' ', '_') for s in vals]
|
ids = ['ut_' + s.replace(' ', '_') for s in vals]
|
||||||
metafunc.parametrize(fixture_name, vals, ids=ids)
|
metafunc.parametrize(fixture_name, vals, ids=ids)
|
||||||
|
|
|
@ -508,6 +508,30 @@ static int ut_run_tests(struct unit_test_state *uts, const char *prefix,
|
||||||
|
|
||||||
if (!test_matches(prefix, test_name, select_name))
|
if (!test_matches(prefix, test_name, select_name))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
if (test->flags & UT_TESTF_MANUAL) {
|
||||||
|
int len;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* manual tests must have a name ending "_norun" as this
|
||||||
|
* is how pytest knows to skip them. See
|
||||||
|
* generate_ut_subtest() for this check.
|
||||||
|
*/
|
||||||
|
len = strlen(test_name);
|
||||||
|
if (len < 6 || strcmp(test_name + len - 6, "_norun")) {
|
||||||
|
printf("Test %s is manual so must have a name ending in _norun\n",
|
||||||
|
test_name);
|
||||||
|
uts->fail_count++;
|
||||||
|
return -EBADF;
|
||||||
|
}
|
||||||
|
if (!uts->force_run) {
|
||||||
|
if (select_name) {
|
||||||
|
printf("Test %s skipped as it is manual (use -f to run it)\n",
|
||||||
|
test_name);
|
||||||
|
}
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
old_fail_count = uts->fail_count;
|
old_fail_count = uts->fail_count;
|
||||||
for (i = 0; i < uts->runs_per_test; i++)
|
for (i = 0; i < uts->runs_per_test; i++)
|
||||||
ret = ut_run_test_live_flat(uts, test, select_name);
|
ret = ut_run_test_live_flat(uts, test, select_name);
|
||||||
|
@ -529,7 +553,7 @@ static int ut_run_tests(struct unit_test_state *uts, const char *prefix,
|
||||||
|
|
||||||
int ut_run_list(const char *category, const char *prefix,
|
int ut_run_list(const char *category, const char *prefix,
|
||||||
struct unit_test *tests, int count, const char *select_name,
|
struct unit_test *tests, int count, const char *select_name,
|
||||||
int runs_per_test)
|
int runs_per_test, bool force_run)
|
||||||
{
|
{
|
||||||
struct unit_test_state uts = { .fail_count = 0 };
|
struct unit_test_state uts = { .fail_count = 0 };
|
||||||
bool has_dm_tests = false;
|
bool has_dm_tests = false;
|
||||||
|
@ -563,6 +587,7 @@ int ut_run_list(const char *category, const char *prefix,
|
||||||
}
|
}
|
||||||
memcpy(uts.fdt_copy, gd->fdt_blob, uts.fdt_size);
|
memcpy(uts.fdt_copy, gd->fdt_blob, uts.fdt_size);
|
||||||
}
|
}
|
||||||
|
uts.force_run = force_run;
|
||||||
ret = ut_run_tests(&uts, prefix, tests, count, select_name);
|
ret = ut_run_tests(&uts, prefix, tests, count, select_name);
|
||||||
|
|
||||||
/* Best efforts only...ignore errors */
|
/* Best efforts only...ignore errors */
|
||||||
|
|
Loading…
Add table
Reference in a new issue