mirror of
https://github.com/nushell/nushell
synced 2025-01-02 16:29:00 +00:00
0567407f85
Should close one of the tasks in #8450. # Description > **Note** > in order of appearance in the global diff -1b7497c419
adds the `std-tests` job to the CI which 1. installs `nushell` in the runner 2. run the `tests.nu` module > see `open .github/workflows/ci.yml | get jobs.std-tests | to yaml` - [`ec85b6fd`..`9c122115`](ec85b6fd3fc004cd94e3fada5c8e5fe2714fd629..9c12211564ca8ee90ed65ae45776dccb8f8e4ef1) is where all the magic happens => see below - 🧪799c7eb7fd
introduces some bugs and failing test to see how the CI behaves => see how the [tests failed](https://github.com/nushell/nushell/actions/runs/4460098237/jobs/7833018256) as expected ❌ - 🧪 andc3de1fafb5
reverts the failing tests, i.e. the previous commit, leaving a standard library whose tests all pass 🎉 => see the [tests passing](https://github.com/nushell/nushell/actions/runs/4460153434/jobs/7833110719?pr=8525#step:5:1) now ✔️ ## the changes to the runner > see [`ec85b6fd`..`9c122115`](ec85b6fd3fc004cd94e3fada5c8e5fe2714fd629..9c12211564ca8ee90ed65ae45776dccb8f8e4ef1) the issue with the previous runner was the following: the clever trick of using `nu -c "use ...; test"` did print the errors when occuring but they did not capture the true "failure", i.e. in all cases the `$env.LAST_EXIT_CODE` was set to `0`, never stopping the CI when a test failed 🤔 i first tried to `try` / `catch` the error inec85b6fd3f
which kinda worked but only throw a single error, the first one i thought it was not the best and started thinking about a solution to have a complete report of all failing tests, at once, to avoid running the CI multiple times! the easiest solution i found was the one i implemented in9c12211564
> **Warning** > this changes the structure of the runner quite a bit, but the `for` loops where annoying to manipulate structured data and allow the runner to draw a complete report... now the runner does the following - compute the list of all available tests in a table with the `file`, `module` and `name` columns (first part of the pipe until `flatten` and `rename`) - run the tests one by one computing the new `pass` column - with a `log info` - captures the failing ones => puts `true` in `pass` if the test passes, `false` otherwise - if at least one test has failed, throw a single error with the list of failing tests ### hope you'll like it 😌 # User-Facing Changes ``` $nothing ``` # Tests + Formatting the standard tests now return a true error that will stop the CI # After Submitting ``` $nothing ```
87 lines
2.7 KiB
Text
87 lines
2.7 KiB
Text
use std.nu *
|
|
|
|
# show a test record in a pretty way
|
|
#
|
|
# `$in` must be a `record<file: string, module: string, name: string, pass: bool>`.
|
|
#
|
|
# the output would be like
|
|
# - "<indentation> x <module> <test>" all in red if failed
|
|
# - "<indentation> <module> <test>" all in green if passed
|
|
def show-pretty-test [indent: int = 4] {
|
|
let test = $in
|
|
|
|
[
|
|
(" " * $indent)
|
|
(if $test.pass { ansi green } else { ansi red})
|
|
(if $test.pass { " " } else { char failed})
|
|
" "
|
|
$"($test.module) ($test.name)"
|
|
(ansi reset)
|
|
] | str join
|
|
}
|
|
|
|
# Test executor
|
|
#
|
|
# It executes exported "test_*" commands in "test_*" modules
|
|
def main [
|
|
--path: path, # Path to look for tests. Default: directory of this file.
|
|
--module: string, # Module to run tests. Default: all test modules found.
|
|
--command: string, # Test command to run. Default: all test command found in the files.
|
|
--list, # list the selected tests without running them.
|
|
] {
|
|
let tests = (
|
|
ls ($path | default $env.FILE_PWD | path join "test_*.nu")
|
|
| each {|row| {file: $row.name name: ($row.name | path parse | get stem)}}
|
|
| upsert test {|module|
|
|
nu -c $'use ($module.file) *; $nu.scope.commands | select name module_name | to nuon'
|
|
| from nuon
|
|
| where module_name == $module.name
|
|
| where ($it.name | str starts-with "test_")
|
|
| get name
|
|
}
|
|
| flatten
|
|
| rename file module name
|
|
)
|
|
|
|
let tests_to_run = (if not ($command | is-empty) {
|
|
$tests | where name == $command
|
|
} else if not ($module | is-empty) {
|
|
$tests | where module == $module
|
|
} else {
|
|
$tests
|
|
})
|
|
|
|
if $list {
|
|
return ($tests_to_run | select module name file)
|
|
}
|
|
|
|
let tests = (
|
|
$tests_to_run
|
|
| group-by module
|
|
| transpose name tests
|
|
| each {|module|
|
|
log info $"Running tests in ($module.name)"
|
|
$module.tests | each {|test|
|
|
log debug $"Running test ($test.name)"
|
|
let did_pass = (try {
|
|
nu -c $'use ($test.file) ($test.name); ($test.name)'
|
|
true
|
|
} catch { false })
|
|
|
|
$test | merge ({pass: $did_pass})
|
|
}
|
|
}
|
|
| flatten
|
|
)
|
|
|
|
if not ($tests | where not pass | is-empty) {
|
|
let text = ([
|
|
$"(ansi purple)some tests did not pass (char lparen)see complete errors above(char rparen):(ansi reset)"
|
|
""
|
|
($tests | each {|test| ($test | show-pretty-test 4)} | str join "\n")
|
|
""
|
|
] | str join "\n")
|
|
|
|
error make --unspanned { msg: $text }
|
|
}
|
|
}
|