2024-03-09 16:59:55 +00:00
|
|
|
use nu_cli::{eval_source, evaluate_commands};
|
2024-04-27 17:08:12 +00:00
|
|
|
use nu_plugin_core::{Encoder, EncodingType};
|
|
|
|
use nu_plugin_protocol::{PluginCallResponse, PluginOutput};
|
Tango migration (#12469)
# Description
This PR migrates the benchmark suit to Tango. Its different compared to
other framework because it require 2 binaries, to run to do A/B
benchmarking, this is currently limited to Linux, Max, (Windows require
rustc nightly flag), by switching between two suits it can reduce noise
and run the code "almost" concurrently. I have have been in contact with
the maintainer, and bases this on the dev branch, as it had a newer API
simular to criterion. This framework compared to Divan also have a
simple file dump system if we want to generate graphs, do other analysis
on later. I think overall this crate is very nice, a lot faster to
compile and run then criterion, that's for sure.
2024-05-05 15:53:48 +00:00
|
|
|
|
2024-02-27 17:55:26 +00:00
|
|
|
use nu_protocol::{
|
2024-03-26 15:17:44 +00:00
|
|
|
engine::{EngineState, Stack},
|
2024-07-07 22:29:01 +00:00
|
|
|
PipelineData, Signals, Span, Spanned, Value,
|
2024-02-27 17:55:26 +00:00
|
|
|
};
|
2024-03-09 16:59:55 +00:00
|
|
|
use nu_std::load_standard_library;
|
2023-01-11 01:51:25 +00:00
|
|
|
use nu_utils::{get_default_config, get_default_env};
|
2024-07-07 22:29:01 +00:00
|
|
|
use std::{
|
|
|
|
rc::Rc,
|
|
|
|
sync::{atomic::AtomicBool, Arc},
|
|
|
|
};
|
2023-01-11 01:51:25 +00:00
|
|
|
|
Tango migration (#12469)
# Description
This PR migrates the benchmark suit to Tango. Its different compared to
other framework because it require 2 binaries, to run to do A/B
benchmarking, this is currently limited to Linux, Max, (Windows require
rustc nightly flag), by switching between two suits it can reduce noise
and run the code "almost" concurrently. I have have been in contact with
the maintainer, and bases this on the dev branch, as it had a newer API
simular to criterion. This framework compared to Divan also have a
simple file dump system if we want to generate graphs, do other analysis
on later. I think overall this crate is very nice, a lot faster to
compile and run then criterion, that's for sure.
2024-05-05 15:53:48 +00:00
|
|
|
use std::hint::black_box;
|
|
|
|
|
|
|
|
use tango_bench::{benchmark_fn, tango_benchmarks, tango_main, IntoBenchmarks};
|
2024-03-01 18:09:21 +00:00
|
|
|
|
2023-06-17 12:41:29 +00:00
|
|
|
fn load_bench_commands() -> EngineState {
|
|
|
|
nu_command::add_shell_command_context(nu_cmd_lang::create_default_context())
|
|
|
|
}
|
2023-12-07 14:13:50 +00:00
|
|
|
|
2024-03-01 18:09:21 +00:00
|
|
|
fn setup_engine() -> EngineState {
|
2023-06-17 12:41:29 +00:00
|
|
|
let mut engine_state = load_bench_commands();
|
2024-05-09 02:16:57 +00:00
|
|
|
let cwd = std::env::current_dir()
|
|
|
|
.unwrap()
|
|
|
|
.into_os_string()
|
|
|
|
.into_string()
|
|
|
|
.unwrap();
|
2023-12-07 14:13:50 +00:00
|
|
|
|
|
|
|
// parsing config.nu breaks without PWD set, so set a valid path
|
2024-05-09 02:16:57 +00:00
|
|
|
engine_state.add_env_var("PWD".into(), Value::string(cwd, Span::test_data()));
|
2023-01-11 01:51:25 +00:00
|
|
|
|
2024-05-09 23:29:27 +00:00
|
|
|
engine_state.generate_nu_constant();
|
2024-03-01 18:09:21 +00:00
|
|
|
|
|
|
|
engine_state
|
2023-01-11 01:51:25 +00:00
|
|
|
}
|
|
|
|
|
2024-03-26 15:17:44 +00:00
|
|
|
fn setup_stack_and_engine_from_command(command: &str) -> (Stack, EngineState) {
|
|
|
|
let mut engine = setup_engine();
|
|
|
|
let commands = Spanned {
|
|
|
|
span: Span::unknown(),
|
|
|
|
item: command.to_string(),
|
|
|
|
};
|
|
|
|
|
|
|
|
let mut stack = Stack::new();
|
Internal representation (IR) compiler and evaluator (#13330)
# Description
This PR adds an internal representation language to Nushell, offering an
alternative evaluator based on simple instructions, stream-containing
registers, and indexed control flow. The number of registers required is
determined statically at compile-time, and the fixed size required is
allocated upon entering the block.
Each instruction is associated with a span, which makes going backwards
from IR instructions to source code very easy.
Motivations for IR:
1. **Performance.** By simplifying the evaluation path and making it
more cache-friendly and branch predictor-friendly, code that does a lot
of computation in Nushell itself can be sped up a decent bit. Because
the IR is fairly easy to reason about, we can also implement
optimization passes in the future to eliminate and simplify code.
2. **Correctness.** The instructions mostly have very simple and
easily-specified behavior, so hopefully engine changes are a little bit
easier to reason about, and they can be specified in a more formal way
at some point. I have made an effort to document each of the
instructions in the docs for the enum itself in a reasonably specific
way. Some of the errors that would have happened during evaluation
before are now moved to the compilation step instead, because they don't
make sense to check during evaluation.
3. **As an intermediate target.** This is a good step for us to bring
the [`new-nu-parser`](https://github.com/nushell/new-nu-parser) in at
some point, as code generated from new AST can be directly compared to
code generated from old AST. If the IR code is functionally equivalent,
it will behave the exact same way.
4. **Debugging.** With a little bit more work, we can probably give
control over advancing the virtual machine that `IrBlock`s run on to
some sort of external driver, making things like breakpoints and single
stepping possible. Tools like `view ir` and [`explore
ir`](https://github.com/devyn/nu_plugin_explore_ir) make it easier than
before to see what exactly is going on with your Nushell code.
The goal is to eventually replace the AST evaluator entirely, once we're
sure it's working just as well. You can help dogfood this by running
Nushell with `$env.NU_USE_IR` set to some value. The environment
variable is checked when Nushell starts, so config runs with IR, or it
can also be set on a line at the REPL to change it dynamically. It is
also checked when running `do` in case within a script you want to just
run a specific piece of code with or without IR.
# Example
```nushell
view ir { |data|
mut sum = 0
for n in $data {
$sum += $n
}
$sum
}
```
```gas
# 3 registers, 19 instructions, 0 bytes of data
0: load-literal %0, int(0)
1: store-variable var 904, %0 # let
2: drain %0
3: drop %0
4: load-variable %1, var 903
5: iterate %0, %1, end 15 # for, label(1), from(14:)
6: store-variable var 905, %0
7: load-variable %0, var 904
8: load-variable %2, var 905
9: binary-op %0, Math(Plus), %2
10: span %0
11: store-variable var 904, %0
12: load-literal %0, nothing
13: drain %0
14: jump 5
15: drop %0 # label(0), from(5:)
16: drain %0
17: load-variable %0, var 904
18: return %0
```
# Benchmarks
All benchmarks run on a base model Mac Mini M1.
## Iterative Fibonacci sequence
This is about as best case as possible, making use of the much faster
control flow. Most code will not experience a speed improvement nearly
this large.
```nushell
def fib [n: int] {
mut a = 0
mut b = 1
for _ in 2..=$n {
let c = $a + $b
$a = $b
$b = $c
}
$b
}
use std bench
bench { 0..50 | each { |n| fib $n } }
```
IR disabled:
```
╭───────┬─────────────────╮
│ mean │ 1ms 924µs 665ns │
│ min │ 1ms 700µs 83ns │
│ max │ 3ms 450µs 125ns │
│ std │ 395µs 759ns │
│ times │ [list 50 items] │
╰───────┴─────────────────╯
```
IR enabled:
```
╭───────┬─────────────────╮
│ mean │ 452µs 820ns │
│ min │ 427µs 417ns │
│ max │ 540µs 167ns │
│ std │ 17µs 158ns │
│ times │ [list 50 items] │
╰───────┴─────────────────╯
```
![explore ir
view](https://github.com/nushell/nushell/assets/10729/d7bccc03-5222-461c-9200-0dce71b83b83)
##
[gradient_benchmark_no_check.nu](https://github.com/nushell/nu_scripts/blob/main/benchmarks/gradient_benchmark_no_check.nu)
IR disabled:
```
╭───┬──────────────────╮
│ 0 │ 27ms 929µs 958ns │
│ 1 │ 21ms 153µs 459ns │
│ 2 │ 18ms 639µs 666ns │
│ 3 │ 19ms 554µs 583ns │
│ 4 │ 13ms 383µs 375ns │
│ 5 │ 11ms 328µs 208ns │
│ 6 │ 5ms 659µs 542ns │
╰───┴──────────────────╯
```
IR enabled:
```
╭───┬──────────────────╮
│ 0 │ 22ms 662µs │
│ 1 │ 17ms 221µs 792ns │
│ 2 │ 14ms 786µs 708ns │
│ 3 │ 13ms 876µs 834ns │
│ 4 │ 13ms 52µs 875ns │
│ 5 │ 11ms 269µs 666ns │
│ 6 │ 6ms 942µs 500ns │
╰───┴──────────────────╯
```
##
[random-bytes.nu](https://github.com/nushell/nu_scripts/blob/main/benchmarks/random-bytes.nu)
I got pretty random results out of this benchmark so I decided not to
include it. Not clear why.
# User-Facing Changes
- IR compilation errors may appear even if the user isn't evaluating
with IR.
- IR evaluation can be enabled by setting the `NU_USE_IR` environment
variable to any value.
- New command `view ir` pretty-prints the IR for a block, and `view ir
--json` can be piped into an external tool like [`explore
ir`](https://github.com/devyn/nu_plugin_explore_ir).
# Tests + Formatting
All tests are passing with `NU_USE_IR=1`, and I've added some more eval
tests to compare the results for some very core operations. I will
probably want to add some more so we don't have to always check
`NU_USE_IR=1 toolkit test --workspace` on a regular basis.
# After Submitting
- [ ] release notes
- [ ] further documentation of instructions?
- [ ] post-release: publish `nu_plugin_explore_ir`
2024-07-11 00:33:59 +00:00
|
|
|
|
2024-09-15 21:54:38 +00:00
|
|
|
// Support running benchmarks without IR mode
|
|
|
|
stack.use_ir = std::env::var_os("NU_DISABLE_IR").is_none();
|
Internal representation (IR) compiler and evaluator (#13330)
# Description
This PR adds an internal representation language to Nushell, offering an
alternative evaluator based on simple instructions, stream-containing
registers, and indexed control flow. The number of registers required is
determined statically at compile-time, and the fixed size required is
allocated upon entering the block.
Each instruction is associated with a span, which makes going backwards
from IR instructions to source code very easy.
Motivations for IR:
1. **Performance.** By simplifying the evaluation path and making it
more cache-friendly and branch predictor-friendly, code that does a lot
of computation in Nushell itself can be sped up a decent bit. Because
the IR is fairly easy to reason about, we can also implement
optimization passes in the future to eliminate and simplify code.
2. **Correctness.** The instructions mostly have very simple and
easily-specified behavior, so hopefully engine changes are a little bit
easier to reason about, and they can be specified in a more formal way
at some point. I have made an effort to document each of the
instructions in the docs for the enum itself in a reasonably specific
way. Some of the errors that would have happened during evaluation
before are now moved to the compilation step instead, because they don't
make sense to check during evaluation.
3. **As an intermediate target.** This is a good step for us to bring
the [`new-nu-parser`](https://github.com/nushell/new-nu-parser) in at
some point, as code generated from new AST can be directly compared to
code generated from old AST. If the IR code is functionally equivalent,
it will behave the exact same way.
4. **Debugging.** With a little bit more work, we can probably give
control over advancing the virtual machine that `IrBlock`s run on to
some sort of external driver, making things like breakpoints and single
stepping possible. Tools like `view ir` and [`explore
ir`](https://github.com/devyn/nu_plugin_explore_ir) make it easier than
before to see what exactly is going on with your Nushell code.
The goal is to eventually replace the AST evaluator entirely, once we're
sure it's working just as well. You can help dogfood this by running
Nushell with `$env.NU_USE_IR` set to some value. The environment
variable is checked when Nushell starts, so config runs with IR, or it
can also be set on a line at the REPL to change it dynamically. It is
also checked when running `do` in case within a script you want to just
run a specific piece of code with or without IR.
# Example
```nushell
view ir { |data|
mut sum = 0
for n in $data {
$sum += $n
}
$sum
}
```
```gas
# 3 registers, 19 instructions, 0 bytes of data
0: load-literal %0, int(0)
1: store-variable var 904, %0 # let
2: drain %0
3: drop %0
4: load-variable %1, var 903
5: iterate %0, %1, end 15 # for, label(1), from(14:)
6: store-variable var 905, %0
7: load-variable %0, var 904
8: load-variable %2, var 905
9: binary-op %0, Math(Plus), %2
10: span %0
11: store-variable var 904, %0
12: load-literal %0, nothing
13: drain %0
14: jump 5
15: drop %0 # label(0), from(5:)
16: drain %0
17: load-variable %0, var 904
18: return %0
```
# Benchmarks
All benchmarks run on a base model Mac Mini M1.
## Iterative Fibonacci sequence
This is about as best case as possible, making use of the much faster
control flow. Most code will not experience a speed improvement nearly
this large.
```nushell
def fib [n: int] {
mut a = 0
mut b = 1
for _ in 2..=$n {
let c = $a + $b
$a = $b
$b = $c
}
$b
}
use std bench
bench { 0..50 | each { |n| fib $n } }
```
IR disabled:
```
╭───────┬─────────────────╮
│ mean │ 1ms 924µs 665ns │
│ min │ 1ms 700µs 83ns │
│ max │ 3ms 450µs 125ns │
│ std │ 395µs 759ns │
│ times │ [list 50 items] │
╰───────┴─────────────────╯
```
IR enabled:
```
╭───────┬─────────────────╮
│ mean │ 452µs 820ns │
│ min │ 427µs 417ns │
│ max │ 540µs 167ns │
│ std │ 17µs 158ns │
│ times │ [list 50 items] │
╰───────┴─────────────────╯
```
![explore ir
view](https://github.com/nushell/nushell/assets/10729/d7bccc03-5222-461c-9200-0dce71b83b83)
##
[gradient_benchmark_no_check.nu](https://github.com/nushell/nu_scripts/blob/main/benchmarks/gradient_benchmark_no_check.nu)
IR disabled:
```
╭───┬──────────────────╮
│ 0 │ 27ms 929µs 958ns │
│ 1 │ 21ms 153µs 459ns │
│ 2 │ 18ms 639µs 666ns │
│ 3 │ 19ms 554µs 583ns │
│ 4 │ 13ms 383µs 375ns │
│ 5 │ 11ms 328µs 208ns │
│ 6 │ 5ms 659µs 542ns │
╰───┴──────────────────╯
```
IR enabled:
```
╭───┬──────────────────╮
│ 0 │ 22ms 662µs │
│ 1 │ 17ms 221µs 792ns │
│ 2 │ 14ms 786µs 708ns │
│ 3 │ 13ms 876µs 834ns │
│ 4 │ 13ms 52µs 875ns │
│ 5 │ 11ms 269µs 666ns │
│ 6 │ 6ms 942µs 500ns │
╰───┴──────────────────╯
```
##
[random-bytes.nu](https://github.com/nushell/nu_scripts/blob/main/benchmarks/random-bytes.nu)
I got pretty random results out of this benchmark so I decided not to
include it. Not clear why.
# User-Facing Changes
- IR compilation errors may appear even if the user isn't evaluating
with IR.
- IR evaluation can be enabled by setting the `NU_USE_IR` environment
variable to any value.
- New command `view ir` pretty-prints the IR for a block, and `view ir
--json` can be piped into an external tool like [`explore
ir`](https://github.com/devyn/nu_plugin_explore_ir).
# Tests + Formatting
All tests are passing with `NU_USE_IR=1`, and I've added some more eval
tests to compare the results for some very core operations. I will
probably want to add some more so we don't have to always check
`NU_USE_IR=1 toolkit test --workspace` on a regular basis.
# After Submitting
- [ ] release notes
- [ ] further documentation of instructions?
- [ ] post-release: publish `nu_plugin_explore_ir`
2024-07-11 00:33:59 +00:00
|
|
|
|
2024-03-26 15:17:44 +00:00
|
|
|
evaluate_commands(
|
|
|
|
&commands,
|
|
|
|
&mut engine,
|
|
|
|
&mut stack,
|
|
|
|
PipelineData::empty(),
|
2024-06-19 04:37:24 +00:00
|
|
|
Default::default(),
|
2024-03-26 15:17:44 +00:00
|
|
|
)
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
(stack, engine)
|
|
|
|
}
|
|
|
|
|
Tango migration (#12469)
# Description
This PR migrates the benchmark suit to Tango. Its different compared to
other framework because it require 2 binaries, to run to do A/B
benchmarking, this is currently limited to Linux, Max, (Windows require
rustc nightly flag), by switching between two suits it can reduce noise
and run the code "almost" concurrently. I have have been in contact with
the maintainer, and bases this on the dev branch, as it had a newer API
simular to criterion. This framework compared to Divan also have a
simple file dump system if we want to generate graphs, do other analysis
on later. I think overall this crate is very nice, a lot faster to
compile and run then criterion, that's for sure.
2024-05-05 15:53:48 +00:00
|
|
|
// generate a new table data with `row_cnt` rows, `col_cnt` columns.
|
|
|
|
fn encoding_test_data(row_cnt: usize, col_cnt: usize) -> Value {
|
|
|
|
let record = Value::test_record(
|
|
|
|
(0..col_cnt)
|
|
|
|
.map(|x| (format!("col_{x}"), Value::test_int(x as i64)))
|
|
|
|
.collect(),
|
|
|
|
);
|
2023-12-07 14:13:50 +00:00
|
|
|
|
Tango migration (#12469)
# Description
This PR migrates the benchmark suit to Tango. Its different compared to
other framework because it require 2 binaries, to run to do A/B
benchmarking, this is currently limited to Linux, Max, (Windows require
rustc nightly flag), by switching between two suits it can reduce noise
and run the code "almost" concurrently. I have have been in contact with
the maintainer, and bases this on the dev branch, as it had a newer API
simular to criterion. This framework compared to Divan also have a
simple file dump system if we want to generate graphs, do other analysis
on later. I think overall this crate is very nice, a lot faster to
compile and run then criterion, that's for sure.
2024-05-05 15:53:48 +00:00
|
|
|
Value::list(vec![record; row_cnt], Span::test_data())
|
2024-03-09 16:59:55 +00:00
|
|
|
}
|
|
|
|
|
Tango migration (#12469)
# Description
This PR migrates the benchmark suit to Tango. Its different compared to
other framework because it require 2 binaries, to run to do A/B
benchmarking, this is currently limited to Linux, Max, (Windows require
rustc nightly flag), by switching between two suits it can reduce noise
and run the code "almost" concurrently. I have have been in contact with
the maintainer, and bases this on the dev branch, as it had a newer API
simular to criterion. This framework compared to Divan also have a
simple file dump system if we want to generate graphs, do other analysis
on later. I think overall this crate is very nice, a lot faster to
compile and run then criterion, that's for sure.
2024-05-05 15:53:48 +00:00
|
|
|
fn bench_command(
|
|
|
|
name: &str,
|
|
|
|
command: &str,
|
|
|
|
stack: Stack,
|
|
|
|
engine: EngineState,
|
|
|
|
) -> impl IntoBenchmarks {
|
|
|
|
let commands = Spanned {
|
|
|
|
span: Span::unknown(),
|
|
|
|
item: command.to_string(),
|
|
|
|
};
|
|
|
|
[benchmark_fn(name, move |b| {
|
|
|
|
let commands = commands.clone();
|
|
|
|
let stack = stack.clone();
|
|
|
|
let engine = engine.clone();
|
|
|
|
b.iter(move || {
|
|
|
|
let mut stack = stack.clone();
|
|
|
|
let mut engine = engine.clone();
|
2024-05-09 23:29:27 +00:00
|
|
|
#[allow(clippy::unit_arg)]
|
Tango migration (#12469)
# Description
This PR migrates the benchmark suit to Tango. Its different compared to
other framework because it require 2 binaries, to run to do A/B
benchmarking, this is currently limited to Linux, Max, (Windows require
rustc nightly flag), by switching between two suits it can reduce noise
and run the code "almost" concurrently. I have have been in contact with
the maintainer, and bases this on the dev branch, as it had a newer API
simular to criterion. This framework compared to Divan also have a
simple file dump system if we want to generate graphs, do other analysis
on later. I think overall this crate is very nice, a lot faster to
compile and run then criterion, that's for sure.
2024-05-05 15:53:48 +00:00
|
|
|
black_box(
|
|
|
|
evaluate_commands(
|
|
|
|
&commands,
|
|
|
|
&mut engine,
|
|
|
|
&mut stack,
|
|
|
|
PipelineData::empty(),
|
2024-06-19 04:37:24 +00:00
|
|
|
Default::default(),
|
Tango migration (#12469)
# Description
This PR migrates the benchmark suit to Tango. Its different compared to
other framework because it require 2 binaries, to run to do A/B
benchmarking, this is currently limited to Linux, Max, (Windows require
rustc nightly flag), by switching between two suits it can reduce noise
and run the code "almost" concurrently. I have have been in contact with
the maintainer, and bases this on the dev branch, as it had a newer API
simular to criterion. This framework compared to Divan also have a
simple file dump system if we want to generate graphs, do other analysis
on later. I think overall this crate is very nice, a lot faster to
compile and run then criterion, that's for sure.
2024-05-05 15:53:48 +00:00
|
|
|
)
|
|
|
|
.unwrap(),
|
|
|
|
);
|
|
|
|
})
|
|
|
|
})]
|
|
|
|
}
|
2024-03-26 15:17:44 +00:00
|
|
|
|
Tango migration (#12469)
# Description
This PR migrates the benchmark suit to Tango. Its different compared to
other framework because it require 2 binaries, to run to do A/B
benchmarking, this is currently limited to Linux, Max, (Windows require
rustc nightly flag), by switching between two suits it can reduce noise
and run the code "almost" concurrently. I have have been in contact with
the maintainer, and bases this on the dev branch, as it had a newer API
simular to criterion. This framework compared to Divan also have a
simple file dump system if we want to generate graphs, do other analysis
on later. I think overall this crate is very nice, a lot faster to
compile and run then criterion, that's for sure.
2024-05-05 15:53:48 +00:00
|
|
|
fn bench_eval_source(
|
|
|
|
name: &str,
|
|
|
|
fname: String,
|
|
|
|
source: Vec<u8>,
|
|
|
|
stack: Stack,
|
|
|
|
engine: EngineState,
|
|
|
|
) -> impl IntoBenchmarks {
|
|
|
|
[benchmark_fn(name, move |b| {
|
|
|
|
let stack = stack.clone();
|
|
|
|
let engine = engine.clone();
|
|
|
|
let fname = fname.clone();
|
|
|
|
let source = source.clone();
|
|
|
|
b.iter(move || {
|
|
|
|
let mut stack = stack.clone();
|
|
|
|
let mut engine = engine.clone();
|
|
|
|
let fname: &str = &fname.clone();
|
|
|
|
let source: &[u8] = &source.clone();
|
|
|
|
black_box(eval_source(
|
|
|
|
&mut engine,
|
|
|
|
&mut stack,
|
|
|
|
source,
|
|
|
|
fname,
|
|
|
|
PipelineData::empty(),
|
|
|
|
false,
|
|
|
|
));
|
|
|
|
})
|
|
|
|
})]
|
|
|
|
}
|
2024-03-09 16:59:55 +00:00
|
|
|
|
Tango migration (#12469)
# Description
This PR migrates the benchmark suit to Tango. Its different compared to
other framework because it require 2 binaries, to run to do A/B
benchmarking, this is currently limited to Linux, Max, (Windows require
rustc nightly flag), by switching between two suits it can reduce noise
and run the code "almost" concurrently. I have have been in contact with
the maintainer, and bases this on the dev branch, as it had a newer API
simular to criterion. This framework compared to Divan also have a
simple file dump system if we want to generate graphs, do other analysis
on later. I think overall this crate is very nice, a lot faster to
compile and run then criterion, that's for sure.
2024-05-05 15:53:48 +00:00
|
|
|
/// Load the standard library into the engine.
|
|
|
|
fn bench_load_standard_lib() -> impl IntoBenchmarks {
|
|
|
|
[benchmark_fn("load_standard_lib", move |b| {
|
|
|
|
let engine = setup_engine();
|
|
|
|
b.iter(move || {
|
|
|
|
let mut engine = engine.clone();
|
|
|
|
load_standard_library(&mut engine)
|
|
|
|
})
|
|
|
|
})]
|
|
|
|
}
|
2024-03-09 16:59:55 +00:00
|
|
|
|
Tango migration (#12469)
# Description
This PR migrates the benchmark suit to Tango. Its different compared to
other framework because it require 2 binaries, to run to do A/B
benchmarking, this is currently limited to Linux, Max, (Windows require
rustc nightly flag), by switching between two suits it can reduce noise
and run the code "almost" concurrently. I have have been in contact with
the maintainer, and bases this on the dev branch, as it had a newer API
simular to criterion. This framework compared to Divan also have a
simple file dump system if we want to generate graphs, do other analysis
on later. I think overall this crate is very nice, a lot faster to
compile and run then criterion, that's for sure.
2024-05-05 15:53:48 +00:00
|
|
|
fn create_flat_record_string(n: i32) -> String {
|
|
|
|
let mut s = String::from("let record = {");
|
|
|
|
for i in 0..n {
|
|
|
|
s.push_str(&format!("col_{}: {}", i, i));
|
|
|
|
if i < n - 1 {
|
|
|
|
s.push_str(", ");
|
2024-03-26 15:17:44 +00:00
|
|
|
}
|
|
|
|
}
|
Tango migration (#12469)
# Description
This PR migrates the benchmark suit to Tango. Its different compared to
other framework because it require 2 binaries, to run to do A/B
benchmarking, this is currently limited to Linux, Max, (Windows require
rustc nightly flag), by switching between two suits it can reduce noise
and run the code "almost" concurrently. I have have been in contact with
the maintainer, and bases this on the dev branch, as it had a newer API
simular to criterion. This framework compared to Divan also have a
simple file dump system if we want to generate graphs, do other analysis
on later. I think overall this crate is very nice, a lot faster to
compile and run then criterion, that's for sure.
2024-05-05 15:53:48 +00:00
|
|
|
s.push('}');
|
|
|
|
s
|
|
|
|
}
|
2024-03-26 15:17:44 +00:00
|
|
|
|
Tango migration (#12469)
# Description
This PR migrates the benchmark suit to Tango. Its different compared to
other framework because it require 2 binaries, to run to do A/B
benchmarking, this is currently limited to Linux, Max, (Windows require
rustc nightly flag), by switching between two suits it can reduce noise
and run the code "almost" concurrently. I have have been in contact with
the maintainer, and bases this on the dev branch, as it had a newer API
simular to criterion. This framework compared to Divan also have a
simple file dump system if we want to generate graphs, do other analysis
on later. I think overall this crate is very nice, a lot faster to
compile and run then criterion, that's for sure.
2024-05-05 15:53:48 +00:00
|
|
|
fn create_nested_record_string(depth: i32) -> String {
|
|
|
|
let mut s = String::from("let record = {");
|
|
|
|
for _ in 0..depth {
|
|
|
|
s.push_str("col: {");
|
2024-03-26 15:17:44 +00:00
|
|
|
}
|
Tango migration (#12469)
# Description
This PR migrates the benchmark suit to Tango. Its different compared to
other framework because it require 2 binaries, to run to do A/B
benchmarking, this is currently limited to Linux, Max, (Windows require
rustc nightly flag), by switching between two suits it can reduce noise
and run the code "almost" concurrently. I have have been in contact with
the maintainer, and bases this on the dev branch, as it had a newer API
simular to criterion. This framework compared to Divan also have a
simple file dump system if we want to generate graphs, do other analysis
on later. I think overall this crate is very nice, a lot faster to
compile and run then criterion, that's for sure.
2024-05-05 15:53:48 +00:00
|
|
|
s.push_str("col_final: 0");
|
|
|
|
for _ in 0..depth {
|
|
|
|
s.push('}');
|
2024-03-26 15:17:44 +00:00
|
|
|
}
|
Tango migration (#12469)
# Description
This PR migrates the benchmark suit to Tango. Its different compared to
other framework because it require 2 binaries, to run to do A/B
benchmarking, this is currently limited to Linux, Max, (Windows require
rustc nightly flag), by switching between two suits it can reduce noise
and run the code "almost" concurrently. I have have been in contact with
the maintainer, and bases this on the dev branch, as it had a newer API
simular to criterion. This framework compared to Divan also have a
simple file dump system if we want to generate graphs, do other analysis
on later. I think overall this crate is very nice, a lot faster to
compile and run then criterion, that's for sure.
2024-05-05 15:53:48 +00:00
|
|
|
s.push('}');
|
|
|
|
s
|
2024-03-26 15:17:44 +00:00
|
|
|
}
|
|
|
|
|
Tango migration (#12469)
# Description
This PR migrates the benchmark suit to Tango. Its different compared to
other framework because it require 2 binaries, to run to do A/B
benchmarking, this is currently limited to Linux, Max, (Windows require
rustc nightly flag), by switching between two suits it can reduce noise
and run the code "almost" concurrently. I have have been in contact with
the maintainer, and bases this on the dev branch, as it had a newer API
simular to criterion. This framework compared to Divan also have a
simple file dump system if we want to generate graphs, do other analysis
on later. I think overall this crate is very nice, a lot faster to
compile and run then criterion, that's for sure.
2024-05-05 15:53:48 +00:00
|
|
|
fn create_example_table_nrows(n: i32) -> String {
|
|
|
|
let mut s = String::from("let table = [[foo bar baz]; ");
|
|
|
|
for i in 0..n {
|
|
|
|
s.push_str(&format!("[0, 1, {i}]"));
|
|
|
|
if i < n - 1 {
|
|
|
|
s.push_str(", ");
|
2024-03-26 17:59:52 +00:00
|
|
|
}
|
|
|
|
}
|
Tango migration (#12469)
# Description
This PR migrates the benchmark suit to Tango. Its different compared to
other framework because it require 2 binaries, to run to do A/B
benchmarking, this is currently limited to Linux, Max, (Windows require
rustc nightly flag), by switching between two suits it can reduce noise
and run the code "almost" concurrently. I have have been in contact with
the maintainer, and bases this on the dev branch, as it had a newer API
simular to criterion. This framework compared to Divan also have a
simple file dump system if we want to generate graphs, do other analysis
on later. I think overall this crate is very nice, a lot faster to
compile and run then criterion, that's for sure.
2024-05-05 15:53:48 +00:00
|
|
|
s.push(']');
|
|
|
|
s
|
|
|
|
}
|
2024-03-26 17:59:52 +00:00
|
|
|
|
Tango migration (#12469)
# Description
This PR migrates the benchmark suit to Tango. Its different compared to
other framework because it require 2 binaries, to run to do A/B
benchmarking, this is currently limited to Linux, Max, (Windows require
rustc nightly flag), by switching between two suits it can reduce noise
and run the code "almost" concurrently. I have have been in contact with
the maintainer, and bases this on the dev branch, as it had a newer API
simular to criterion. This framework compared to Divan also have a
simple file dump system if we want to generate graphs, do other analysis
on later. I think overall this crate is very nice, a lot faster to
compile and run then criterion, that's for sure.
2024-05-05 15:53:48 +00:00
|
|
|
fn bench_record_create(n: i32) -> impl IntoBenchmarks {
|
|
|
|
bench_command(
|
|
|
|
&format!("record_create_{n}"),
|
|
|
|
&create_flat_record_string(n),
|
|
|
|
Stack::new(),
|
|
|
|
setup_engine(),
|
|
|
|
)
|
2024-03-26 17:59:52 +00:00
|
|
|
}
|
|
|
|
|
Tango migration (#12469)
# Description
This PR migrates the benchmark suit to Tango. Its different compared to
other framework because it require 2 binaries, to run to do A/B
benchmarking, this is currently limited to Linux, Max, (Windows require
rustc nightly flag), by switching between two suits it can reduce noise
and run the code "almost" concurrently. I have have been in contact with
the maintainer, and bases this on the dev branch, as it had a newer API
simular to criterion. This framework compared to Divan also have a
simple file dump system if we want to generate graphs, do other analysis
on later. I think overall this crate is very nice, a lot faster to
compile and run then criterion, that's for sure.
2024-05-05 15:53:48 +00:00
|
|
|
fn bench_record_flat_access(n: i32) -> impl IntoBenchmarks {
|
|
|
|
let setup_command = create_flat_record_string(n);
|
|
|
|
let (stack, engine) = setup_stack_and_engine_from_command(&setup_command);
|
|
|
|
bench_command(
|
|
|
|
&format!("record_flat_access_{n}"),
|
|
|
|
"$record.col_0 | ignore",
|
|
|
|
stack,
|
|
|
|
engine,
|
|
|
|
)
|
|
|
|
}
|
2024-03-26 15:17:44 +00:00
|
|
|
|
Tango migration (#12469)
# Description
This PR migrates the benchmark suit to Tango. Its different compared to
other framework because it require 2 binaries, to run to do A/B
benchmarking, this is currently limited to Linux, Max, (Windows require
rustc nightly flag), by switching between two suits it can reduce noise
and run the code "almost" concurrently. I have have been in contact with
the maintainer, and bases this on the dev branch, as it had a newer API
simular to criterion. This framework compared to Divan also have a
simple file dump system if we want to generate graphs, do other analysis
on later. I think overall this crate is very nice, a lot faster to
compile and run then criterion, that's for sure.
2024-05-05 15:53:48 +00:00
|
|
|
fn bench_record_nested_access(n: i32) -> impl IntoBenchmarks {
|
|
|
|
let setup_command = create_nested_record_string(n);
|
|
|
|
let (stack, engine) = setup_stack_and_engine_from_command(&setup_command);
|
|
|
|
let nested_access = ".col".repeat(n as usize);
|
|
|
|
bench_command(
|
|
|
|
&format!("record_nested_access_{n}"),
|
|
|
|
&format!("$record{} | ignore", nested_access),
|
|
|
|
stack,
|
|
|
|
engine,
|
|
|
|
)
|
|
|
|
}
|
2024-03-12 12:10:37 +00:00
|
|
|
|
Tango migration (#12469)
# Description
This PR migrates the benchmark suit to Tango. Its different compared to
other framework because it require 2 binaries, to run to do A/B
benchmarking, this is currently limited to Linux, Max, (Windows require
rustc nightly flag), by switching between two suits it can reduce noise
and run the code "almost" concurrently. I have have been in contact with
the maintainer, and bases this on the dev branch, as it had a newer API
simular to criterion. This framework compared to Divan also have a
simple file dump system if we want to generate graphs, do other analysis
on later. I think overall this crate is very nice, a lot faster to
compile and run then criterion, that's for sure.
2024-05-05 15:53:48 +00:00
|
|
|
fn bench_table_create(n: i32) -> impl IntoBenchmarks {
|
|
|
|
bench_command(
|
|
|
|
&format!("table_create_{n}"),
|
|
|
|
&create_example_table_nrows(n),
|
|
|
|
Stack::new(),
|
|
|
|
setup_engine(),
|
|
|
|
)
|
|
|
|
}
|
2024-03-09 16:59:55 +00:00
|
|
|
|
Tango migration (#12469)
# Description
This PR migrates the benchmark suit to Tango. Its different compared to
other framework because it require 2 binaries, to run to do A/B
benchmarking, this is currently limited to Linux, Max, (Windows require
rustc nightly flag), by switching between two suits it can reduce noise
and run the code "almost" concurrently. I have have been in contact with
the maintainer, and bases this on the dev branch, as it had a newer API
simular to criterion. This framework compared to Divan also have a
simple file dump system if we want to generate graphs, do other analysis
on later. I think overall this crate is very nice, a lot faster to
compile and run then criterion, that's for sure.
2024-05-05 15:53:48 +00:00
|
|
|
fn bench_table_get(n: i32) -> impl IntoBenchmarks {
|
|
|
|
let setup_command = create_example_table_nrows(n);
|
|
|
|
let (stack, engine) = setup_stack_and_engine_from_command(&setup_command);
|
|
|
|
bench_command(
|
|
|
|
&format!("table_get_{n}"),
|
|
|
|
"$table | get bar | math sum | ignore",
|
|
|
|
stack,
|
|
|
|
engine,
|
|
|
|
)
|
|
|
|
}
|
2024-03-09 16:59:55 +00:00
|
|
|
|
Tango migration (#12469)
# Description
This PR migrates the benchmark suit to Tango. Its different compared to
other framework because it require 2 binaries, to run to do A/B
benchmarking, this is currently limited to Linux, Max, (Windows require
rustc nightly flag), by switching between two suits it can reduce noise
and run the code "almost" concurrently. I have have been in contact with
the maintainer, and bases this on the dev branch, as it had a newer API
simular to criterion. This framework compared to Divan also have a
simple file dump system if we want to generate graphs, do other analysis
on later. I think overall this crate is very nice, a lot faster to
compile and run then criterion, that's for sure.
2024-05-05 15:53:48 +00:00
|
|
|
fn bench_table_select(n: i32) -> impl IntoBenchmarks {
|
|
|
|
let setup_command = create_example_table_nrows(n);
|
|
|
|
let (stack, engine) = setup_stack_and_engine_from_command(&setup_command);
|
|
|
|
bench_command(
|
|
|
|
&format!("table_select_{n}"),
|
|
|
|
"$table | select foo baz | ignore",
|
|
|
|
stack,
|
|
|
|
engine,
|
|
|
|
)
|
|
|
|
}
|
2024-03-09 16:59:55 +00:00
|
|
|
|
Tango migration (#12469)
# Description
This PR migrates the benchmark suit to Tango. Its different compared to
other framework because it require 2 binaries, to run to do A/B
benchmarking, this is currently limited to Linux, Max, (Windows require
rustc nightly flag), by switching between two suits it can reduce noise
and run the code "almost" concurrently. I have have been in contact with
the maintainer, and bases this on the dev branch, as it had a newer API
simular to criterion. This framework compared to Divan also have a
simple file dump system if we want to generate graphs, do other analysis
on later. I think overall this crate is very nice, a lot faster to
compile and run then criterion, that's for sure.
2024-05-05 15:53:48 +00:00
|
|
|
fn bench_eval_interleave(n: i32) -> impl IntoBenchmarks {
|
|
|
|
let engine = setup_engine();
|
|
|
|
let stack = Stack::new();
|
|
|
|
bench_command(
|
|
|
|
&format!("eval_interleave_{n}"),
|
|
|
|
&format!("seq 1 {n} | wrap a | interleave {{ seq 1 {n} | wrap b }} | ignore"),
|
|
|
|
stack,
|
|
|
|
engine,
|
|
|
|
)
|
|
|
|
}
|
2024-03-12 12:10:37 +00:00
|
|
|
|
2024-07-07 22:29:01 +00:00
|
|
|
fn bench_eval_interleave_with_interrupt(n: i32) -> impl IntoBenchmarks {
|
Tango migration (#12469)
# Description
This PR migrates the benchmark suit to Tango. Its different compared to
other framework because it require 2 binaries, to run to do A/B
benchmarking, this is currently limited to Linux, Max, (Windows require
rustc nightly flag), by switching between two suits it can reduce noise
and run the code "almost" concurrently. I have have been in contact with
the maintainer, and bases this on the dev branch, as it had a newer API
simular to criterion. This framework compared to Divan also have a
simple file dump system if we want to generate graphs, do other analysis
on later. I think overall this crate is very nice, a lot faster to
compile and run then criterion, that's for sure.
2024-05-05 15:53:48 +00:00
|
|
|
let mut engine = setup_engine();
|
2024-07-07 22:29:01 +00:00
|
|
|
engine.set_signals(Signals::new(Arc::new(AtomicBool::new(false))));
|
Tango migration (#12469)
# Description
This PR migrates the benchmark suit to Tango. Its different compared to
other framework because it require 2 binaries, to run to do A/B
benchmarking, this is currently limited to Linux, Max, (Windows require
rustc nightly flag), by switching between two suits it can reduce noise
and run the code "almost" concurrently. I have have been in contact with
the maintainer, and bases this on the dev branch, as it had a newer API
simular to criterion. This framework compared to Divan also have a
simple file dump system if we want to generate graphs, do other analysis
on later. I think overall this crate is very nice, a lot faster to
compile and run then criterion, that's for sure.
2024-05-05 15:53:48 +00:00
|
|
|
let stack = Stack::new();
|
|
|
|
bench_command(
|
2024-07-07 22:29:01 +00:00
|
|
|
&format!("eval_interleave_with_interrupt_{n}"),
|
Tango migration (#12469)
# Description
This PR migrates the benchmark suit to Tango. Its different compared to
other framework because it require 2 binaries, to run to do A/B
benchmarking, this is currently limited to Linux, Max, (Windows require
rustc nightly flag), by switching between two suits it can reduce noise
and run the code "almost" concurrently. I have have been in contact with
the maintainer, and bases this on the dev branch, as it had a newer API
simular to criterion. This framework compared to Divan also have a
simple file dump system if we want to generate graphs, do other analysis
on later. I think overall this crate is very nice, a lot faster to
compile and run then criterion, that's for sure.
2024-05-05 15:53:48 +00:00
|
|
|
&format!("seq 1 {n} | wrap a | interleave {{ seq 1 {n} | wrap b }} | ignore"),
|
|
|
|
stack,
|
|
|
|
engine,
|
|
|
|
)
|
2024-03-09 16:59:55 +00:00
|
|
|
}
|
|
|
|
|
Tango migration (#12469)
# Description
This PR migrates the benchmark suit to Tango. Its different compared to
other framework because it require 2 binaries, to run to do A/B
benchmarking, this is currently limited to Linux, Max, (Windows require
rustc nightly flag), by switching between two suits it can reduce noise
and run the code "almost" concurrently. I have have been in contact with
the maintainer, and bases this on the dev branch, as it had a newer API
simular to criterion. This framework compared to Divan also have a
simple file dump system if we want to generate graphs, do other analysis
on later. I think overall this crate is very nice, a lot faster to
compile and run then criterion, that's for sure.
2024-05-05 15:53:48 +00:00
|
|
|
fn bench_eval_for(n: i32) -> impl IntoBenchmarks {
|
|
|
|
let engine = setup_engine();
|
|
|
|
let stack = Stack::new();
|
|
|
|
bench_command(
|
|
|
|
&format!("eval_for_{n}"),
|
|
|
|
&format!("(for $x in (1..{n}) {{ 1 }}) | ignore"),
|
|
|
|
stack,
|
|
|
|
engine,
|
|
|
|
)
|
|
|
|
}
|
2023-12-07 14:13:50 +00:00
|
|
|
|
Tango migration (#12469)
# Description
This PR migrates the benchmark suit to Tango. Its different compared to
other framework because it require 2 binaries, to run to do A/B
benchmarking, this is currently limited to Linux, Max, (Windows require
rustc nightly flag), by switching between two suits it can reduce noise
and run the code "almost" concurrently. I have have been in contact with
the maintainer, and bases this on the dev branch, as it had a newer API
simular to criterion. This framework compared to Divan also have a
simple file dump system if we want to generate graphs, do other analysis
on later. I think overall this crate is very nice, a lot faster to
compile and run then criterion, that's for sure.
2024-05-05 15:53:48 +00:00
|
|
|
fn bench_eval_each(n: i32) -> impl IntoBenchmarks {
|
|
|
|
let engine = setup_engine();
|
|
|
|
let stack = Stack::new();
|
|
|
|
bench_command(
|
|
|
|
&format!("eval_each_{n}"),
|
|
|
|
&format!("(1..{n}) | each {{|_| 1 }} | ignore"),
|
|
|
|
stack,
|
|
|
|
engine,
|
|
|
|
)
|
|
|
|
}
|
2024-03-01 18:09:21 +00:00
|
|
|
|
Tango migration (#12469)
# Description
This PR migrates the benchmark suit to Tango. Its different compared to
other framework because it require 2 binaries, to run to do A/B
benchmarking, this is currently limited to Linux, Max, (Windows require
rustc nightly flag), by switching between two suits it can reduce noise
and run the code "almost" concurrently. I have have been in contact with
the maintainer, and bases this on the dev branch, as it had a newer API
simular to criterion. This framework compared to Divan also have a
simple file dump system if we want to generate graphs, do other analysis
on later. I think overall this crate is very nice, a lot faster to
compile and run then criterion, that's for sure.
2024-05-05 15:53:48 +00:00
|
|
|
fn bench_eval_par_each(n: i32) -> impl IntoBenchmarks {
|
|
|
|
let engine = setup_engine();
|
|
|
|
let stack = Stack::new();
|
|
|
|
bench_command(
|
|
|
|
&format!("eval_par_each_{n}"),
|
|
|
|
&format!("(1..{}) | par-each -t 2 {{|_| 1 }} | ignore", n),
|
|
|
|
stack,
|
|
|
|
engine,
|
|
|
|
)
|
|
|
|
}
|
2024-03-01 18:09:21 +00:00
|
|
|
|
Tango migration (#12469)
# Description
This PR migrates the benchmark suit to Tango. Its different compared to
other framework because it require 2 binaries, to run to do A/B
benchmarking, this is currently limited to Linux, Max, (Windows require
rustc nightly flag), by switching between two suits it can reduce noise
and run the code "almost" concurrently. I have have been in contact with
the maintainer, and bases this on the dev branch, as it had a newer API
simular to criterion. This framework compared to Divan also have a
simple file dump system if we want to generate graphs, do other analysis
on later. I think overall this crate is very nice, a lot faster to
compile and run then criterion, that's for sure.
2024-05-05 15:53:48 +00:00
|
|
|
fn bench_eval_default_config() -> impl IntoBenchmarks {
|
|
|
|
let default_env = get_default_config().as_bytes().to_vec();
|
|
|
|
let fname = "default_config.nu".to_string();
|
|
|
|
bench_eval_source(
|
|
|
|
"eval_default_config",
|
|
|
|
fname,
|
|
|
|
default_env,
|
|
|
|
Stack::new(),
|
|
|
|
setup_engine(),
|
|
|
|
)
|
|
|
|
}
|
2024-03-01 18:09:21 +00:00
|
|
|
|
Tango migration (#12469)
# Description
This PR migrates the benchmark suit to Tango. Its different compared to
other framework because it require 2 binaries, to run to do A/B
benchmarking, this is currently limited to Linux, Max, (Windows require
rustc nightly flag), by switching between two suits it can reduce noise
and run the code "almost" concurrently. I have have been in contact with
the maintainer, and bases this on the dev branch, as it had a newer API
simular to criterion. This framework compared to Divan also have a
simple file dump system if we want to generate graphs, do other analysis
on later. I think overall this crate is very nice, a lot faster to
compile and run then criterion, that's for sure.
2024-05-05 15:53:48 +00:00
|
|
|
fn bench_eval_default_env() -> impl IntoBenchmarks {
|
|
|
|
let default_env = get_default_env().as_bytes().to_vec();
|
|
|
|
let fname = "default_env.nu".to_string();
|
|
|
|
bench_eval_source(
|
|
|
|
"eval_default_env",
|
|
|
|
fname,
|
|
|
|
default_env,
|
|
|
|
Stack::new(),
|
|
|
|
setup_engine(),
|
|
|
|
)
|
2024-03-01 18:09:21 +00:00
|
|
|
}
|
2024-02-27 17:55:26 +00:00
|
|
|
|
Tango migration (#12469)
# Description
This PR migrates the benchmark suit to Tango. Its different compared to
other framework because it require 2 binaries, to run to do A/B
benchmarking, this is currently limited to Linux, Max, (Windows require
rustc nightly flag), by switching between two suits it can reduce noise
and run the code "almost" concurrently. I have have been in contact with
the maintainer, and bases this on the dev branch, as it had a newer API
simular to criterion. This framework compared to Divan also have a
simple file dump system if we want to generate graphs, do other analysis
on later. I think overall this crate is very nice, a lot faster to
compile and run then criterion, that's for sure.
2024-05-05 15:53:48 +00:00
|
|
|
fn encode_json(row_cnt: usize, col_cnt: usize) -> impl IntoBenchmarks {
|
|
|
|
let test_data = Rc::new(PluginOutput::CallResponse(
|
|
|
|
0,
|
|
|
|
PluginCallResponse::value(encoding_test_data(row_cnt, col_cnt)),
|
|
|
|
));
|
|
|
|
let encoder = Rc::new(EncodingType::try_from_bytes(b"json").unwrap());
|
|
|
|
|
|
|
|
[benchmark_fn(
|
|
|
|
format!("encode_json_{}_{}", row_cnt, col_cnt),
|
|
|
|
move |b| {
|
|
|
|
let encoder = encoder.clone();
|
|
|
|
let test_data = test_data.clone();
|
|
|
|
b.iter(move || {
|
|
|
|
let mut res = Vec::new();
|
|
|
|
encoder.encode(&*test_data, &mut res).unwrap();
|
2024-03-01 18:09:21 +00:00
|
|
|
})
|
Tango migration (#12469)
# Description
This PR migrates the benchmark suit to Tango. Its different compared to
other framework because it require 2 binaries, to run to do A/B
benchmarking, this is currently limited to Linux, Max, (Windows require
rustc nightly flag), by switching between two suits it can reduce noise
and run the code "almost" concurrently. I have have been in contact with
the maintainer, and bases this on the dev branch, as it had a newer API
simular to criterion. This framework compared to Divan also have a
simple file dump system if we want to generate graphs, do other analysis
on later. I think overall this crate is very nice, a lot faster to
compile and run then criterion, that's for sure.
2024-05-05 15:53:48 +00:00
|
|
|
},
|
|
|
|
)]
|
|
|
|
}
|
2024-03-01 18:09:21 +00:00
|
|
|
|
Tango migration (#12469)
# Description
This PR migrates the benchmark suit to Tango. Its different compared to
other framework because it require 2 binaries, to run to do A/B
benchmarking, this is currently limited to Linux, Max, (Windows require
rustc nightly flag), by switching between two suits it can reduce noise
and run the code "almost" concurrently. I have have been in contact with
the maintainer, and bases this on the dev branch, as it had a newer API
simular to criterion. This framework compared to Divan also have a
simple file dump system if we want to generate graphs, do other analysis
on later. I think overall this crate is very nice, a lot faster to
compile and run then criterion, that's for sure.
2024-05-05 15:53:48 +00:00
|
|
|
fn encode_msgpack(row_cnt: usize, col_cnt: usize) -> impl IntoBenchmarks {
|
|
|
|
let test_data = Rc::new(PluginOutput::CallResponse(
|
|
|
|
0,
|
|
|
|
PluginCallResponse::value(encoding_test_data(row_cnt, col_cnt)),
|
|
|
|
));
|
|
|
|
let encoder = Rc::new(EncodingType::try_from_bytes(b"msgpack").unwrap());
|
|
|
|
|
|
|
|
[benchmark_fn(
|
|
|
|
format!("encode_msgpack_{}_{}", row_cnt, col_cnt),
|
|
|
|
move |b| {
|
|
|
|
let encoder = encoder.clone();
|
|
|
|
let test_data = test_data.clone();
|
|
|
|
b.iter(move || {
|
|
|
|
let mut res = Vec::new();
|
|
|
|
encoder.encode(&*test_data, &mut res).unwrap();
|
2024-03-01 18:09:21 +00:00
|
|
|
})
|
Tango migration (#12469)
# Description
This PR migrates the benchmark suit to Tango. Its different compared to
other framework because it require 2 binaries, to run to do A/B
benchmarking, this is currently limited to Linux, Max, (Windows require
rustc nightly flag), by switching between two suits it can reduce noise
and run the code "almost" concurrently. I have have been in contact with
the maintainer, and bases this on the dev branch, as it had a newer API
simular to criterion. This framework compared to Divan also have a
simple file dump system if we want to generate graphs, do other analysis
on later. I think overall this crate is very nice, a lot faster to
compile and run then criterion, that's for sure.
2024-05-05 15:53:48 +00:00
|
|
|
},
|
|
|
|
)]
|
2023-01-11 01:51:25 +00:00
|
|
|
}
|
|
|
|
|
Tango migration (#12469)
# Description
This PR migrates the benchmark suit to Tango. Its different compared to
other framework because it require 2 binaries, to run to do A/B
benchmarking, this is currently limited to Linux, Max, (Windows require
rustc nightly flag), by switching between two suits it can reduce noise
and run the code "almost" concurrently. I have have been in contact with
the maintainer, and bases this on the dev branch, as it had a newer API
simular to criterion. This framework compared to Divan also have a
simple file dump system if we want to generate graphs, do other analysis
on later. I think overall this crate is very nice, a lot faster to
compile and run then criterion, that's for sure.
2024-05-05 15:53:48 +00:00
|
|
|
fn decode_json(row_cnt: usize, col_cnt: usize) -> impl IntoBenchmarks {
|
|
|
|
let test_data = PluginOutput::CallResponse(
|
|
|
|
0,
|
|
|
|
PluginCallResponse::value(encoding_test_data(row_cnt, col_cnt)),
|
Create `Record` type (#10103)
# Description
This PR creates a new `Record` type to reduce duplicate code and
possibly bugs as well. (This is an edited version of #9648.)
- `Record` implements `FromIterator` and `IntoIterator` and so can be
iterated over or collected into. For example, this helps with
conversions to and from (hash)maps. (Also, no more
`cols.iter().zip(vals)`!)
- `Record` has a `push(col, val)` function to help insure that the
number of columns is equal to the number of values. I caught a few
potential bugs thanks to this (e.g. in the `ls` command).
- Finally, this PR also adds a `record!` macro that helps simplify
record creation. It is used like so:
```rust
record! {
"key1" => some_value,
"key2" => Value::string("text", span),
"key3" => Value::int(optional_int.unwrap_or(0), span),
"key4" => Value::bool(config.setting, span),
}
```
Since macros hinder formatting, etc., the right hand side values should
be relatively short and sweet like the examples above.
Where possible, prefer `record!` or `.collect()` on an iterator instead
of multiple `Record::push`s, since the first two automatically set the
record capacity and do less work overall.
# User-Facing Changes
Besides the changes in `nu-protocol` the only other breaking changes are
to `nu-table::{ExpandedTable::build_map, JustTable::kv_table}`.
2023-08-24 19:50:29 +00:00
|
|
|
);
|
Tango migration (#12469)
# Description
This PR migrates the benchmark suit to Tango. Its different compared to
other framework because it require 2 binaries, to run to do A/B
benchmarking, this is currently limited to Linux, Max, (Windows require
rustc nightly flag), by switching between two suits it can reduce noise
and run the code "almost" concurrently. I have have been in contact with
the maintainer, and bases this on the dev branch, as it had a newer API
simular to criterion. This framework compared to Divan also have a
simple file dump system if we want to generate graphs, do other analysis
on later. I think overall this crate is very nice, a lot faster to
compile and run then criterion, that's for sure.
2024-05-05 15:53:48 +00:00
|
|
|
let encoder = EncodingType::try_from_bytes(b"json").unwrap();
|
|
|
|
let mut res = vec![];
|
|
|
|
encoder.encode(&test_data, &mut res).unwrap();
|
|
|
|
|
|
|
|
[benchmark_fn(
|
|
|
|
format!("decode_json_{}_{}", row_cnt, col_cnt),
|
|
|
|
move |b| {
|
|
|
|
let res = res.clone();
|
|
|
|
b.iter(move || {
|
2024-03-01 18:09:21 +00:00
|
|
|
let mut binary_data = std::io::Cursor::new(res.clone());
|
|
|
|
binary_data.set_position(0);
|
Tango migration (#12469)
# Description
This PR migrates the benchmark suit to Tango. Its different compared to
other framework because it require 2 binaries, to run to do A/B
benchmarking, this is currently limited to Linux, Max, (Windows require
rustc nightly flag), by switching between two suits it can reduce noise
and run the code "almost" concurrently. I have have been in contact with
the maintainer, and bases this on the dev branch, as it had a newer API
simular to criterion. This framework compared to Divan also have a
simple file dump system if we want to generate graphs, do other analysis
on later. I think overall this crate is very nice, a lot faster to
compile and run then criterion, that's for sure.
2024-05-05 15:53:48 +00:00
|
|
|
let _: Result<Option<PluginOutput>, _> =
|
|
|
|
black_box(encoder.decode(&mut binary_data));
|
2024-03-01 18:09:21 +00:00
|
|
|
})
|
Tango migration (#12469)
# Description
This PR migrates the benchmark suit to Tango. Its different compared to
other framework because it require 2 binaries, to run to do A/B
benchmarking, this is currently limited to Linux, Max, (Windows require
rustc nightly flag), by switching between two suits it can reduce noise
and run the code "almost" concurrently. I have have been in contact with
the maintainer, and bases this on the dev branch, as it had a newer API
simular to criterion. This framework compared to Divan also have a
simple file dump system if we want to generate graphs, do other analysis
on later. I think overall this crate is very nice, a lot faster to
compile and run then criterion, that's for sure.
2024-05-05 15:53:48 +00:00
|
|
|
},
|
|
|
|
)]
|
|
|
|
}
|
2024-03-01 18:09:21 +00:00
|
|
|
|
Tango migration (#12469)
# Description
This PR migrates the benchmark suit to Tango. Its different compared to
other framework because it require 2 binaries, to run to do A/B
benchmarking, this is currently limited to Linux, Max, (Windows require
rustc nightly flag), by switching between two suits it can reduce noise
and run the code "almost" concurrently. I have have been in contact with
the maintainer, and bases this on the dev branch, as it had a newer API
simular to criterion. This framework compared to Divan also have a
simple file dump system if we want to generate graphs, do other analysis
on later. I think overall this crate is very nice, a lot faster to
compile and run then criterion, that's for sure.
2024-05-05 15:53:48 +00:00
|
|
|
fn decode_msgpack(row_cnt: usize, col_cnt: usize) -> impl IntoBenchmarks {
|
|
|
|
let test_data = PluginOutput::CallResponse(
|
|
|
|
0,
|
|
|
|
PluginCallResponse::value(encoding_test_data(row_cnt, col_cnt)),
|
|
|
|
);
|
|
|
|
let encoder = EncodingType::try_from_bytes(b"msgpack").unwrap();
|
|
|
|
let mut res = vec![];
|
|
|
|
encoder.encode(&test_data, &mut res).unwrap();
|
|
|
|
|
|
|
|
[benchmark_fn(
|
|
|
|
format!("decode_msgpack_{}_{}", row_cnt, col_cnt),
|
|
|
|
move |b| {
|
|
|
|
let res = res.clone();
|
|
|
|
b.iter(move || {
|
2024-03-01 18:09:21 +00:00
|
|
|
let mut binary_data = std::io::Cursor::new(res.clone());
|
|
|
|
binary_data.set_position(0);
|
Tango migration (#12469)
# Description
This PR migrates the benchmark suit to Tango. Its different compared to
other framework because it require 2 binaries, to run to do A/B
benchmarking, this is currently limited to Linux, Max, (Windows require
rustc nightly flag), by switching between two suits it can reduce noise
and run the code "almost" concurrently. I have have been in contact with
the maintainer, and bases this on the dev branch, as it had a newer API
simular to criterion. This framework compared to Divan also have a
simple file dump system if we want to generate graphs, do other analysis
on later. I think overall this crate is very nice, a lot faster to
compile and run then criterion, that's for sure.
2024-05-05 15:53:48 +00:00
|
|
|
let _: Result<Option<PluginOutput>, _> =
|
|
|
|
black_box(encoder.decode(&mut binary_data));
|
2024-03-01 18:09:21 +00:00
|
|
|
})
|
Tango migration (#12469)
# Description
This PR migrates the benchmark suit to Tango. Its different compared to
other framework because it require 2 binaries, to run to do A/B
benchmarking, this is currently limited to Linux, Max, (Windows require
rustc nightly flag), by switching between two suits it can reduce noise
and run the code "almost" concurrently. I have have been in contact with
the maintainer, and bases this on the dev branch, as it had a newer API
simular to criterion. This framework compared to Divan also have a
simple file dump system if we want to generate graphs, do other analysis
on later. I think overall this crate is very nice, a lot faster to
compile and run then criterion, that's for sure.
2024-05-05 15:53:48 +00:00
|
|
|
},
|
|
|
|
)]
|
2024-03-01 18:09:21 +00:00
|
|
|
}
|
Tango migration (#12469)
# Description
This PR migrates the benchmark suit to Tango. Its different compared to
other framework because it require 2 binaries, to run to do A/B
benchmarking, this is currently limited to Linux, Max, (Windows require
rustc nightly flag), by switching between two suits it can reduce noise
and run the code "almost" concurrently. I have have been in contact with
the maintainer, and bases this on the dev branch, as it had a newer API
simular to criterion. This framework compared to Divan also have a
simple file dump system if we want to generate graphs, do other analysis
on later. I think overall this crate is very nice, a lot faster to
compile and run then criterion, that's for sure.
2024-05-05 15:53:48 +00:00
|
|
|
|
|
|
|
tango_benchmarks!(
|
|
|
|
bench_load_standard_lib(),
|
|
|
|
// Data types
|
|
|
|
// Record
|
|
|
|
bench_record_create(1),
|
|
|
|
bench_record_create(10),
|
|
|
|
bench_record_create(100),
|
|
|
|
bench_record_create(1_000),
|
|
|
|
bench_record_flat_access(1),
|
|
|
|
bench_record_flat_access(10),
|
|
|
|
bench_record_flat_access(100),
|
|
|
|
bench_record_flat_access(1_000),
|
|
|
|
bench_record_nested_access(1),
|
|
|
|
bench_record_nested_access(2),
|
|
|
|
bench_record_nested_access(4),
|
|
|
|
bench_record_nested_access(8),
|
|
|
|
bench_record_nested_access(16),
|
|
|
|
bench_record_nested_access(32),
|
|
|
|
bench_record_nested_access(64),
|
|
|
|
bench_record_nested_access(128),
|
|
|
|
// Table
|
|
|
|
bench_table_create(1),
|
|
|
|
bench_table_create(10),
|
|
|
|
bench_table_create(100),
|
|
|
|
bench_table_create(1_000),
|
|
|
|
bench_table_get(1),
|
|
|
|
bench_table_get(10),
|
|
|
|
bench_table_get(100),
|
|
|
|
bench_table_get(1_000),
|
|
|
|
bench_table_select(1),
|
|
|
|
bench_table_select(10),
|
|
|
|
bench_table_select(100),
|
|
|
|
bench_table_select(1_000),
|
|
|
|
// Eval
|
|
|
|
// Interleave
|
|
|
|
bench_eval_interleave(100),
|
|
|
|
bench_eval_interleave(1_000),
|
|
|
|
bench_eval_interleave(10_000),
|
2024-07-07 22:29:01 +00:00
|
|
|
bench_eval_interleave_with_interrupt(100),
|
|
|
|
bench_eval_interleave_with_interrupt(1_000),
|
|
|
|
bench_eval_interleave_with_interrupt(10_000),
|
Tango migration (#12469)
# Description
This PR migrates the benchmark suit to Tango. Its different compared to
other framework because it require 2 binaries, to run to do A/B
benchmarking, this is currently limited to Linux, Max, (Windows require
rustc nightly flag), by switching between two suits it can reduce noise
and run the code "almost" concurrently. I have have been in contact with
the maintainer, and bases this on the dev branch, as it had a newer API
simular to criterion. This framework compared to Divan also have a
simple file dump system if we want to generate graphs, do other analysis
on later. I think overall this crate is very nice, a lot faster to
compile and run then criterion, that's for sure.
2024-05-05 15:53:48 +00:00
|
|
|
// For
|
|
|
|
bench_eval_for(1),
|
|
|
|
bench_eval_for(10),
|
|
|
|
bench_eval_for(100),
|
|
|
|
bench_eval_for(1_000),
|
|
|
|
bench_eval_for(10_000),
|
|
|
|
// Each
|
|
|
|
bench_eval_each(1),
|
|
|
|
bench_eval_each(10),
|
|
|
|
bench_eval_each(100),
|
|
|
|
bench_eval_each(1_000),
|
|
|
|
bench_eval_each(10_000),
|
|
|
|
// Par-Each
|
|
|
|
bench_eval_par_each(1),
|
|
|
|
bench_eval_par_each(10),
|
|
|
|
bench_eval_par_each(100),
|
|
|
|
bench_eval_par_each(1_000),
|
|
|
|
bench_eval_par_each(10_000),
|
|
|
|
// Config
|
|
|
|
bench_eval_default_config(),
|
|
|
|
// Env
|
|
|
|
bench_eval_default_env(),
|
|
|
|
// Encode
|
|
|
|
// Json
|
|
|
|
encode_json(100, 5),
|
|
|
|
encode_json(10000, 15),
|
|
|
|
// MsgPack
|
|
|
|
encode_msgpack(100, 5),
|
|
|
|
encode_msgpack(10000, 15),
|
|
|
|
// Decode
|
|
|
|
// Json
|
|
|
|
decode_json(100, 5),
|
|
|
|
decode_json(10000, 15),
|
|
|
|
// MsgPack
|
|
|
|
decode_msgpack(100, 5),
|
|
|
|
decode_msgpack(10000, 15)
|
|
|
|
);
|
|
|
|
|
|
|
|
tango_main!();
|