std: refactor test-runner to no longer require tests to be exported (#9355)

# Description
Test runner now performs following actions in order to run tests:
* Module file is opened
* Public function with random name is added to the source code, this
function calls user-specified private function
* Modified module file is saved under random name in $nu.temp-path
* Modified module file is imported in subprocess, injected function is
called by the test runner
# User-Facing Changes
<!-- List of all changes that impact the user experience here. This
helps us keep track of breaking changes. -->
* Test functions no longer need to be exported
* test functions no longer need to reside in separate test_ files
* setup and teardown renamed to before-each and after-each respectively
* before-all and after-all functions added that run before all tests in
given module. This matches the behavior of test runners used by other
languages such as JUnit/TestNG or Mocha
# Tests + Formatting

# After Submitting

---------

Co-authored-by: Kamil <skelly37@protonmail.com>
Co-authored-by: amtoine <stevan.antoine@gmail.com>
This commit is contained in:
Yethal 2023-06-10 20:16:17 +02:00 committed by GitHub
parent a86f34d0ad
commit 0bdc362e13
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
15 changed files with 334 additions and 255 deletions

View file

@ -116,7 +116,7 @@ jobs:
run: cargo install --path . --locked --no-default-features
- name: Standard library tests
run: nu -c 'use std; std run-tests --path crates/nu-std'
run: nu -c 'use std testing; testing run-tests --path crates/nu-std'
- name: Setup Python
uses: actions/setup-python@v4

View file

@ -18,6 +18,7 @@ pub fn load_standard_library(
let mut std_files = vec![
("mod.nu", include_str!("../std/mod.nu")),
("testing.nu", include_str!("../std/testing.nu")),
("dirs.nu", include_str!("../std/dirs.nu")),
("dt.nu", include_str!("../std/dt.nu")),
("help.nu", include_str!("../std/help.nu")),

View file

@ -286,174 +286,3 @@ It's been this long since (ansi green)Nushell(ansi reset)'s first commit:
Startup Time: ($nu.startup-time)
"
}
# show a test record in a pretty way
#
# `$in` must be a `record<file: string, module: string, name: string, pass: bool>`.
#
# the output would be like
# - "<indentation> x <module> <test>" all in red if failed
# - "<indentation> s <module> <test>" all in yellow if skipped
# - "<indentation> <module> <test>" all in green if passed
def show-pretty-test [indent: int = 4] {
let test = $in
[
(" " * $indent)
(match $test.result {
"pass" => { ansi green },
"skip" => { ansi yellow },
_ => { ansi red }
})
(match $test.result {
"pass" => " ",
"skip" => "s",
_ => { char failed }
})
" "
$"($test.module) ($test.test)"
(ansi reset)
] | str join
}
def throw-error [error: record] {
error make {
msg: $"(ansi red)($error.msg)(ansi reset)"
label: {
text: ($error.label)
start: $error.span.start
end: $error.span.end
}
}
}
# Run Nushell tests
#
# It executes exported "test_*" commands in "test_*" modules
export def 'run-tests' [
--path: path, # Path to look for tests. Default: current directory.
--module: string, # Test module to run. Default: all test modules found.
--test: string, # Individual test to run. Default: all test command found in the files.
--list, # list the selected tests without running them.
] {
let module_search_pattern = ('**' | path join ({
stem: ($module | default "test_*")
extension: nu
} | path join))
let path = ($path | default $env.PWD)
if not ($path | path exists) {
throw-error {
msg: "directory_not_found"
label: "no such directory"
span: (metadata $path | get span)
}
}
if not ($module | is-empty) {
try { ls ($path | path join $module_search_pattern) | null } catch {
throw-error {
msg: "module_not_found"
label: $"no such module in ($path)"
span: (metadata $module | get span)
}
}
}
let tests = (
ls ($path | path join $module_search_pattern)
| each {|row| {file: $row.name name: ($row.name | path parse | get stem)}}
| upsert commands {|module|
^$nu.current-exe -c $'use `($module.file)` *; $nu.scope.commands | select name module_name | to nuon'
| from nuon
| where module_name == $module.name
| get name
}
| upsert test {|module| $module.commands | where ($it | str starts-with "test_") }
| upsert setup {|module| "setup" in $module.commands }
| upsert teardown {|module| "teardown" in $module.commands }
| reject commands
| flatten
| rename file module test
)
let tests_to_run = (if not ($test | is-empty) {
$tests | where test == $test
} else if not ($module | is-empty) {
$tests | where module == $module
} else {
$tests
})
if $list {
return ($tests_to_run | select module test file)
}
if ($tests_to_run | is-empty) {
error make --unspanned {msg: "no test to run"}
}
let tests = (
$tests_to_run
| group-by module
| transpose name tests
| each {|module|
log info $"Running tests in module ($module.name)"
$module.tests | each {|test|
log debug $"Running test ($test.test)"
let context_setup = if $test.setup {
$"use `($test.file)` setup; let context = \(setup\)"
} else {
"let context = {}"
}
let context_teardown = if $test.teardown {
$"use `($test.file)` teardown; $context | teardown"
} else {
""
}
let nu_script = $'
($context_setup)
use `($test.file)` ($test.test)
try {
$context | ($test.test)
($context_teardown)
} catch { |err|
($context_teardown)
if $err.msg == "ASSERT:SKIP" {
exit 2
} else {
$err | get raw
}
}
'
^$nu.current-exe -c $nu_script
let result = match $env.LAST_EXIT_CODE {
0 => "pass",
2 => "skip",
_ => "fail",
}
if $result == "skip" {
log warning $"Test case ($test.test) is skipped"
}
$test | merge ({result: $result})
}
}
| flatten
)
if not ($tests | where result == "fail" | is-empty) {
let text = ([
$"(ansi purple)some tests did not pass (char lparen)see complete errors above(char rparen):(ansi reset)"
""
($tests | each {|test| ($test | show-pretty-test 4)} | str join "\n")
""
] | str join "\n")
error make --unspanned { msg: $text }
}
}

View file

@ -0,0 +1,249 @@
use log.nu
def throw-error [error: record] {
error make {
msg: $"(ansi red)($error.msg)(ansi reset)"
label: {
text: ($error.label)
start: $error.span.start
end: $error.span.end
}
}
}
# show a test record in a pretty way
#
# `$in` must be a `record<file: string, module: string, name: string, pass: bool>`.
#
# the output would be like
# - "<indentation> x <module> <test>" all in red if failed
# - "<indentation> s <module> <test>" all in yellow if skipped
# - "<indentation> <module> <test>" all in green if passed
def show-pretty-test [indent: int = 4] {
let test = $in
[
(" " * $indent)
(match $test.result {
"pass" => { ansi green },
"skip" => { ansi yellow },
_ => { ansi red }
})
(match $test.result {
"pass" => " ",
"skip" => "s",
_ => { char failed }
})
" "
$"($test.name) ($test.test)"
(ansi reset)
] | str join
}
def get-commands [
file: path
] {
^$nu.current-exe --ide-ast $file
| from json
| get content
| split list def
| skip 1
| each {get 0}
}
def run-test [
test: record
] {
let test_file_name = (random chars -l 10)
let test_function_name = (random chars -l 10)
let rendered_module_path = ({parent: ($test.file|path dirname), stem: $test_file_name, extension: nu}| path join)
let test_function = $"
export def ($test_function_name) [] {
($test.before-each)
try {
$context | ($test.test)
($test.after-each)
} catch { |err|
($test.after-each)
if $err.msg == "ASSERT:SKIP" {
exit 2
} else {
$err | get raw
}
}
}
"
open $test.file
| lines
| append ($test_function)
| str join (char nl)
| save $rendered_module_path
let result = (
^$nu.current-exe -c $"use ($rendered_module_path) *; ($test_function_name)|to nuon"
| complete
)
rm $rendered_module_path
return $result
}
def run-tests-for-module [
module: record
] {
let global_context = if $module.before-all {
log info $"Running before-all for module ($module.name)"
run-test {
file: $module.file,
before-each: 'let context = {}',
after-each: '',
test: 'before-all'
}
| if $in.exit_code == 0 {
$in.stdout
} else {
throw-error {
msg: "Before-all failed"
label: "Failure in test setup"
span: (metadata $in | get span)
}
}
} else {
{}
}
let tests = (
$module
| flatten
| rename -c [tests test]
| update before-each {|x|
if $module.before-each {
$"let context = \(($global_context)|merge \(before-each\)\)"
} else {
$"let context = ($global_context)"
}
}
| update after-each {|x|
if $module.after-each {
'$context | after-each'
} else {
''
}
}
| each {|test|
log info $"Running ($test.test) in module ($module.name) with context ($global_context)"
$test|insert result {|x|
run-test $test
| match $in.exit_code {
0 => "pass",
2 => "skip",
_ => "fail",
}
}
}
)
if $module.after-all {
log info $"Running after-all for module ($module.name)"
run-test {
file: $module.file,
before-each: $"let context = ($global_context)",
after-each: '',
test: 'after-all'
}
}
return $tests
}
export def run-tests [
--path: path, # Path to look for tests. Default: current directory.
--module: string, # Test module to run. Default: all test modules found.
--test: string, # Individual test to run. Default: all test command found in the files.
--list, # list the selected tests without running them.
] {
let module_search_pattern = ('**' | path join ({
stem: ($module | default "*")
extension: nu
} | path join))
let path = if $path == null {
$env.PWD
} else {
if not ($path | path exists) {
throw-error {
msg: "directory_not_found"
label: "no such directory"
span: (metadata $path | get span)
}
}
$path
}
if not ($module | is-empty) {
try { ls ($path | path join $module_search_pattern) | null } catch {
throw-error {
msg: "module_not_found"
label: $"no such module in ($path)"
span: (metadata $module | get span)
}
}
}
let modules = (
ls ($path | path join $module_search_pattern)
| each {|row| {file: $row.name name: ($row.name | path parse | get stem)}}
| upsert commands {|module|
get-commands $module.file
}
| upsert tests {|module| $module.commands|where $it starts-with "test_"}
| filter {|x| ($x.tests|length) > 0}
| filter {|x| if ($test|is-empty) {true} else {$test in $x.tests}}
| filter {|x| if ($module|is-empty) {true} else {$module == $x.name}}
| upsert before-each {|module| "before-each" in $module.commands}
| upsert before-all {|module| "before-all" in $module.commands}
| upsert after-each {|module| "after-each" in $module.commands}
| upsert after-all {|module| "after-all" in $module.commands}
| reject commands
| rename file name tests
| update tests {|x|
if ($test|is-empty) {
$x.tests
} else {
$x.tests
| where $it == $test
}
}
)
if $list {
return $modules
}
if ($modules | is-empty) {
error make --unspanned {msg: "no test to run"}
}
let results = (
$modules
| each {|module|
run-tests-for-module $module
}
| flatten
| select name test result
)
if not ($results | where result == "fail" | is-empty) {
let text = ([
$"(ansi purple)some tests did not pass (char lparen)see complete errors below(char rparen):(ansi reset)"
""
($results | each {|test| ($test | show-pretty-test 4)} | str join "\n")
""
] | str join "\n")
error make --unspanned { msg: $text }
}
}

View file

@ -42,47 +42,47 @@ def "assert message short" [
assert str contains $output "test message"
}
export def test_critical [] {
def test_critical [] {
assert no message 99 critical
assert message CRITICAL critical CRT
}
export def test_critical_short [] {
def test_critical_short [] {
assert message short CRITICAL critical C
}
export def test_error [] {
assert no message CRITICAL error
def test_error [] {
assert no message CRITICAL error
assert message ERROR error ERR
}
export def test_error_short [] {
def test_error_short [] {
assert message short ERROR error E
}
export def test_warning [] {
assert no message ERROR warning
def test_warning [] {
assert no message ERROR warning
assert message WARNING warning WRN
}
export def test_warning_short [] {
def test_warning_short [] {
assert message short WARNING warning W
}
export def test_info [] {
assert no message WARNING info
def test_info [] {
assert no message WARNING info
assert message INFO info "INF" # INF has to be quoted, otherwise it is the `inf` float
}
export def test_info_short [] {
def test_info_short [] {
assert message short INFO info I
}
export def test_debug [] {
assert no message INFO debug
def test_debug [] {
assert no message INFO debug
assert message DEBUG debug DBG
}
export def test_debug_short [] {
def test_debug_short [] {
assert message short DEBUG debug D
}
}

View file

@ -22,20 +22,20 @@ def run-command [
} | complete | get --ignore-errors stderr
}
export def test_errors_during_deduction [] {
def test_errors_during_deduction [] {
assert str contains (run-command "DEBUG" "msg" "%MSG%" 25) "Cannot deduce log level prefix for given log level"
assert str contains (run-command "DEBUG" "msg" "%MSG%" 25 --ansi (ansi red)) "Cannot deduce log level prefix for given log level"
assert str contains (run-command "DEBUG" "msg" "%MSG%" 25 --level-prefix "abc") "Cannot deduce ansi for given log level"
}
export def test_valid_calls [] {
def test_valid_calls [] {
assert equal (run-command "DEBUG" "msg" "%MSG%" 25 --level-prefix "abc" --ansi (ansi default) | str trim --right) "msg"
assert equal (run-command "DEBUG" "msg" "%LEVEL% %MSG%" 20 | str trim --right) $"($env.LOG_PREFIX.INFO) msg"
assert equal (run-command "DEBUG" "msg" "%LEVEL% %MSG%" --level-prefix "abc" 20 | str trim --right) "abc msg"
assert equal (run-command "INFO" "msg" "%ANSI_START%%LEVEL% %MSG%%ANSI_STOP%" $env.LOG_LEVEL.CRITICAL | str trim --right) $"($env.LOG_ANSI.CRITICAL)CRT msg(ansi reset)"
}
export def test_log_level_handling [] {
def test_log_level_handling [] {
assert equal (run-command "DEBUG" "msg" "%LEVEL% %MSG%" 20 | str trim --right) $"($env.LOG_PREFIX.INFO) msg"
assert equal (run-command "WARNING" "msg" "%LEVEL% %MSG%" 20 | str trim --right) ""
}
}

View file

@ -39,7 +39,7 @@ def "assert formatted" [
assert equal ($output | str trim --right) (format-message $message $format $prefix $ansi)
}
export def "test_format_flag" [] {
def "test_format_flag" [] {
assert formatted "test" "25 %MSG% %ANSI_START% %LEVEL%%ANSI_STOP%" critical
assert formatted "test" "25 %MSG% %ANSI_START% %LEVEL%%ANSI_STOP%" error
assert formatted "test" "25 %MSG% %ANSI_START% %LEVEL%%ANSI_STOP%" warning
@ -50,4 +50,4 @@ export def "test_format_flag" [] {
assert formatted --short "test" "TEST %ANSI_START% %MSG%%ANSI_STOP%" warning
assert formatted --short "test" "TEST %ANSI_START% %MSG%%ANSI_STOP%" info
assert formatted --short "test" "TEST %ANSI_START% %MSG%%ANSI_STOP%" debug
}
}

View file

@ -1,6 +1,6 @@
use std *
export def "test_env_log_ansi" [] {
def test_env_log_ansi [] {
assert equal $env.LOG_ANSI.CRITICAL (ansi red_bold)
assert equal $env.LOG_ANSI.ERROR (ansi red)
assert equal $env.LOG_ANSI.WARNING (ansi yellow)
@ -8,7 +8,7 @@ export def "test_env_log_ansi" [] {
assert equal $env.LOG_ANSI.DEBUG (ansi default_dimmed)
}
export def "test_env_log_level" [] {
def test_env_log_level [] {
assert equal $env.LOG_LEVEL.CRITICAL 50
assert equal $env.LOG_LEVEL.ERROR 40
assert equal $env.LOG_LEVEL.WARNING 30
@ -16,7 +16,7 @@ export def "test_env_log_level" [] {
assert equal $env.LOG_LEVEL.DEBUG 10
}
export def "test_env_log_prefix" [] {
def test_env_log_prefix [] {
assert equal $env.LOG_PREFIX.CRITICAL "CRT"
assert equal $env.LOG_PREFIX.ERROR "ERR"
assert equal $env.LOG_PREFIX.WARNING "WRN"
@ -24,7 +24,7 @@ export def "test_env_log_prefix" [] {
assert equal $env.LOG_PREFIX.DEBUG "DBG"
}
export def "test_env_log_short_prefix" [] {
def test_env_log_short_prefix [] {
assert equal $env.LOG_SHORT_PREFIX.CRITICAL "C"
assert equal $env.LOG_SHORT_PREFIX.ERROR "E"
assert equal $env.LOG_SHORT_PREFIX.WARNING "W"
@ -32,6 +32,6 @@ export def "test_env_log_short_prefix" [] {
assert equal $env.LOG_SHORT_PREFIX.DEBUG "D"
}
export def "test_env_log_format" [] {
def test_env_log_format [] {
assert equal $env.LOG_FORMAT $"%ANSI_START%%DATE%|%LEVEL%|(ansi u)%MSG%%ANSI_STOP%"
}
}

View file

@ -1,34 +1,34 @@
use std *
export def test_assert [] {
def test_assert [] {
assert true
assert (1 + 2 == 3)
assert error { assert false }
assert error { assert (1 + 2 == 4) }
}
export def test_assert_not [] {
def test_assert_not [] {
assert not false
assert not (1 + 2 == 4)
assert error { assert not true }
assert error { assert not (1 + 2 == 3) }
}
export def test_assert_equal [] {
def test_assert_equal [] {
assert equal (1 + 2) 3
assert equal (0.1 + 0.2 | into string | into decimal) 0.3 # 0.30000000000000004 == 0.3
assert error { assert equal 1 "foo" }
assert error { assert equal (1 + 2) 4 }
}
export def test_assert_not_equal [] {
def test_assert_not_equal [] {
assert not equal (1 + 2) 4
assert not equal 1 "foo"
assert not equal (1 + 2) "3"
assert error { assert not equal 1 1 }
}
export def test_assert_error [] {
def test_assert_error [] {
let failing_code = {|| missing_code_to_run}
assert error $failing_code
@ -37,33 +37,33 @@ export def test_assert_error [] {
assert $assert_error_raised "The assert error should raise an error if there is no error in the executed code."
}
export def test_assert_less [] {
def test_assert_less [] {
assert less 1 2
assert error { assert less 1 1 }
}
export def test_assert_less_or_equal [] {
def test_assert_less_or_equal [] {
assert less or equal 1 2
assert less or equal 1 1
assert error { assert less or equal 1 0 }
}
export def test_assert_greater [] {
def test_assert_greater [] {
assert greater 2 1
assert error { assert greater 2 2 }
}
export def test_assert_greater_or_equal [] {
def test_assert_greater_or_equal [] {
assert greater or equal 1 1
assert greater or equal 2 1
assert error { assert greater or equal 0 1 }
}
export def test_assert_length [] {
def test_assert_length [] {
assert length [0, 0, 0] 3
assert error { assert length [0, 0] 3 }
}
export def test_assert_skip [] {
def test_assert_skip [] {
assert skip # This test case is skipped on purpose
}

View file

@ -3,10 +3,10 @@ use std assert
use std log
# A couple of nuances to understand when testing module that exports environment:
# Each 'use' for that module in the test script will execute the export def-env block.
# Each 'use' for that module in the test script will execute the def-env block.
# PWD at the time of the `use` will be what the export def-env block will see.
export def setup [] {
def before-each [] {
# need some directories to play with
let base_path = ($nu.temp-path | path join $"test_dirs_(random uuid)")
let path_a = ($base_path | path join "a")
@ -17,7 +17,7 @@ export def setup [] {
{base_path: $base_path, path_a:$path_a, path_b: $path_b}
}
export def teardown [] {
def after-each [] {
let base_path = $in.base_path
cd $base_path
cd ..
@ -34,16 +34,16 @@ def cur_ring_check [expect_dir:string, expect_position: int scenario:string] {
assert equal $expect_position $env.DIRS_POSITION $"position in ring after ($scenario)"
}
export def test_dirs_command [] {
def test_dirs_command [] {
# careful with order of these statements!
# must capture value of $in before executing `use`s
let $c = $in
# must set PWD *before* doing `use` that will run the export def-env block in dirs module.
# must set PWD *before* doing `use` that will run the def-env block in dirs module.
cd $c.base_path
# must execute these uses for the UOT commands *after* the test and *not* just put them at top of test module.
# the export def-env gets messed up
# the def-env gets messed up
use std dirs
assert equal [$c.base_path] $env.DIRS_LIST "list is just pwd after initialization"
@ -74,10 +74,10 @@ export def test_dirs_command [] {
assert equal (dirs show) [[active path]; [false $c.base_path] [true $c.path_b]] "show table contains expected information"
}
export def test_dirs_next [] {
def test_dirs_next [] {
# must capture value of $in before executing `use`s
let $c = $in
# must set PWD *before* doing `use` that will run the export def-env block in dirs module.
# must set PWD *before* doing `use` that will run the def-env block in dirs module.
cd $c.base_path
assert equal $env.PWD $c.base_path "test setup"
@ -95,10 +95,10 @@ export def test_dirs_next [] {
}
export def test_dirs_cd [] {
def test_dirs_cd [] {
# must capture value of $in before executing `use`s
let $c = $in
# must set PWD *before* doing `use` that will run the export def-env block in dirs module.
# must set PWD *before* doing `use` that will run the def-env block in dirs module.
cd $c.base_path
use std dirs

View file

@ -1,6 +1,6 @@
use std *
export def test_iter_find [] {
def test_iter_find [] {
let hastack1 = [1 2 3 4 5 6 7]
let hastack2 = [nushell rust shell iter std]
let hastack3 = [nu 69 2023-04-20 "std"]
@ -18,7 +18,7 @@ export def test_iter_find [] {
assert equal $res null
}
export def test_iter_intersperse [] {
def test_iter_intersperse [] {
let res = ([1 2 3 4] | iter intersperse 0)
assert equal $res [1 0 2 0 3 0 4]
@ -38,7 +38,7 @@ export def test_iter_intersperse [] {
assert equal $res [4]
}
export def test_iter_scan [] {
def test_iter_scan [] {
let scanned = ([1 2 3] | iter scan 0 {|x, y| $x + $y} -n)
assert equal $scanned [1, 3, 6]
@ -49,18 +49,18 @@ export def test_iter_scan [] {
assert equal $scanned ["a" "ab" "abc" "abcd"]
}
export def test_iter_filter_map [] {
def test_iter_filter_map [] {
let res = ([2 5 "4" 7] | iter filter-map {|it| $it ** 2})
assert equal $res [4 25 49]
let res = (
["3" "42" "69" "n" "x" ""]
["3" "42" "69" "n" "x" ""]
| iter filter-map {|it| $it | into int}
)
assert equal $res [3 42 69]
}
export def test_iter_find_index [] {
def test_iter_find_index [] {
let res = (
["iter", "abc", "shell", "around", "nushell", "std"]
| iter find-index {|x| $x starts-with 's'}
@ -75,7 +75,7 @@ export def test_iter_find_index [] {
assert equal $res 0
}
export def test_iter_zip_with [] {
def test_iter_zip_with [] {
let res = (
[1 2 3] | iter zip-with [2 3 4] {|a, b| $a + $b }
)
@ -98,10 +98,10 @@ export def test_iter_zip_with [] {
[name repo position];
[rust github 1]
[haskell gitlab 2]
]
]
}
export def test_iter_flat_map [] {
def test_iter_flat_map [] {
let res = (
[[1 2 3] [2 3 4] [5 6 7]] | iter flat-map {|it| $it | math sum}
)

View file

@ -1,30 +1,30 @@
use std log
use std assert
export def setup [] {
def before-each [] {
log debug "Setup is running"
{msg: "This is the context"}
}
export def teardown [] {
def after-each [] {
log debug $"Teardown is running. Context: ($in)"
}
export def test_assert_pass [] {
def test_assert_pass [] {
log debug $"Assert is running. Context: ($in)"
}
export def test_assert_skip [] {
def test_assert_skip [] {
log debug $"Assert is running. Context: ($in)"
assert skip
}
export def test_assert_fail_skipped_by_default [] {
def test_assert_fail_skipped_by_default [] {
assert skip # Comment this line if you want to see what happens if a test fails
log debug $"Assert is running. Context: ($in)"
assert false
}
export def unrelated [] {
def unrelated [] {
log error "This should not run"
}

View file

@ -1,13 +1,13 @@
use std
export def test_path_add [] {
def test_path_add [] {
use std assert
let path_name = if "PATH" in $env { "PATH" } else { "Path" }
with-env [$path_name []] {
def get_path [] { $env | get $path_name }
assert equal (get_path) []
std path add "/foo/"
@ -27,14 +27,14 @@ export def test_path_add [] {
let-env $path_name = []
let target_paths = {linux: "foo", windows: "bar", macos: "baz"}
std path add $target_paths
assert equal (get_path) [($target_paths | get $nu.os-info.name)]
}
}
export def test_banner [] {
def test_banner [] {
std assert ((std banner | lines | length) == 15)
}

View file

@ -3,7 +3,7 @@ use std xml xupdate
use std xml xinsert
use std assert
export def setup [] {
def before-each [] {
{sample_xml: ('
<a>
<b>
@ -18,7 +18,7 @@ export def setup [] {
}
}
export def test_xml_xaccess [] {
def test_xml_xaccess [] {
let sample_xml = $in.sample_xml
assert equal ($sample_xml | xaccess [a]) [$sample_xml]
@ -28,7 +28,7 @@ export def test_xml_xaccess [] {
assert equal ($sample_xml | xaccess [* * * {|e| $e.attributes != {}}]) [[tag, attributes, content]; [c, {a: b}, []]]
}
export def test_xml_xupdate [] {
def test_xml_xupdate [] {
let sample_xml = $in.sample_xml
assert equal ($sample_xml | xupdate [*] {|x| $x | update attributes {i: j}}) ('<a i="j"><b><c a="b"></c></b><c></c><d><e>z</e><e>x</e></d></a>' | from xml)
@ -36,7 +36,7 @@ export def test_xml_xupdate [] {
assert equal ($sample_xml | xupdate [* * * {|e| $e.attributes != {}}] {|x| $x | update content ['xml']}) {tag: a, attributes: {}, content: [[tag, attributes, content]; [b, {}, [[tag, attributes, content]; [c, {a: b}, [xml]]]], [c, {}, []], [d, {}, [[tag, attributes, content]; [e, {}, [[tag, attributes, content]; [null, null, z]]], [e, {}, [[tag, attributes, content]; [null, null, x]]]]]]}
}
export def test_xml_xinsert [] {
def test_xml_xinsert [] {
let sample_xml = $in.sample_xml
assert equal ($sample_xml | xinsert [a] {tag: b attributes:{} content: []}) ('<a><b><c a="b"></c></b><c></c><d><e>z</e><e>x</e></d><b></b></a>' | from xml)

View file

@ -66,7 +66,7 @@ export def test [
# run the tests for the standard library
export def "test stdlib" [] {
cargo run -- -c "use std; std run-tests --path crates/nu-std"
cargo run -- -c "use std testing; testing run-tests --path crates/nu-std"
}
# print the pipe input inside backticks, dimmed and italic, as a pretty command
@ -218,10 +218,10 @@ export def "check pr" [
}
try {
if $dataframe {
clippy --dataframe --verbose
} else {
clippy --verbose
if $dataframe {
clippy --dataframe --verbose
} else {
clippy --verbose
}
} catch {
return (report --fail-clippy)
@ -229,12 +229,12 @@ export def "check pr" [
print $"running ('toolkit test' | pretty-print-command)"
try {
if $fast and $dataframe {
test --fast --dataframe
} else if $fast {
test --fast
} else {
test
if $fast and $dataframe {
test --fast --dataframe
} else if $fast {
test --fast
} else {
test
}
} catch {
return (report --fail-test)