nushell/src/main.rs

446 lines
12 KiB
Rust
Raw Normal View History

mod command;
2022-01-18 08:48:28 +00:00
mod config_files;
mod logger;
mod signals;
mod terminal;
mod test_bins;
2021-08-10 18:57:08 +00:00
#[cfg(test)]
mod tests;
#[cfg(feature = "plugin")]
use crate::config_files::NUSHELL_FOLDER;
use crate::{
command::parse_commandline_args,
config_files::{set_config_path, setup_config},
logger::{configure, logger},
terminal::acquire_terminal,
};
use command::gather_commandline_args;
add some startup performance metrics (#7851) # Description This PR changes the old performance logging with `Instant` timers. I'm not sure if this is the best way to do it but it does help reveal where time is being spent on startup. This is what it looks like when you launch nushell with `cargo run -- --log-level info`. I'm using the `info` log level exclusively for performance monitoring at this point. ![image](https://user-images.githubusercontent.com/343840/214372903-fdfa9c99-b846-47f3-8faf-bd6ed98df3a9.png) ## After Startup Since you're in the repl, you can continue running commands. Here's the output of `ls`, for instance. ![image](https://user-images.githubusercontent.com/343840/214373035-4d2f6e2d-5c1d-43d3-b997-51d79d496ba3.png) Note that the above screenshots are in debug mode, so they're much slower than release. # User-Facing Changes # Tests + Formatting Don't forget to add tests that cover your changes. Make sure you've run and fixed any issues with these commands: - `cargo fmt --all -- --check` to check standard code formatting (`cargo fmt --all` applies these changes) - `cargo clippy --workspace -- -D warnings -D clippy::unwrap_used -A clippy::needless_collect` to check that you're using the standard code style - `cargo test --workspace` to check that all tests pass # After Submitting If your PR had any user-facing changes, update [the documentation](https://github.com/nushell/nushell.github.io) after the PR is merged, if necessary. This will help us keep the docs up to date.
2023-01-24 20:28:59 +00:00
use log::Level;
2022-01-18 08:48:28 +00:00
use miette::Result;
#[cfg(feature = "plugin")]
use nu_cli::read_plugin_file;
use nu_cli::{
evaluate_commands, evaluate_file, evaluate_repl, gather_parent_env_vars, get_init_cwd,
report_error_new,
};
use nu_command::create_default_context;
use nu_protocol::{util::BufferedReader, PipelineData, RawStream};
add some startup performance metrics (#7851) # Description This PR changes the old performance logging with `Instant` timers. I'm not sure if this is the best way to do it but it does help reveal where time is being spent on startup. This is what it looks like when you launch nushell with `cargo run -- --log-level info`. I'm using the `info` log level exclusively for performance monitoring at this point. ![image](https://user-images.githubusercontent.com/343840/214372903-fdfa9c99-b846-47f3-8faf-bd6ed98df3a9.png) ## After Startup Since you're in the repl, you can continue running commands. Here's the output of `ls`, for instance. ![image](https://user-images.githubusercontent.com/343840/214373035-4d2f6e2d-5c1d-43d3-b997-51d79d496ba3.png) Note that the above screenshots are in debug mode, so they're much slower than release. # User-Facing Changes # Tests + Formatting Don't forget to add tests that cover your changes. Make sure you've run and fixed any issues with these commands: - `cargo fmt --all -- --check` to check standard code formatting (`cargo fmt --all` applies these changes) - `cargo clippy --workspace -- -D warnings -D clippy::unwrap_used -A clippy::needless_collect` to check that you're using the standard code style - `cargo test --workspace` to check that all tests pass # After Submitting If your PR had any user-facing changes, update [the documentation](https://github.com/nushell/nushell.github.io) after the PR is merged, if necessary. This will help us keep the docs up to date.
2023-01-24 20:28:59 +00:00
use nu_utils::utils::perf;
use signals::{ctrlc_protection, sigquit_protection};
use std::{
io::BufReader,
str::FromStr,
sync::{atomic::AtomicBool, Arc},
};
fn main() -> Result<()> {
add some startup performance metrics (#7851) # Description This PR changes the old performance logging with `Instant` timers. I'm not sure if this is the best way to do it but it does help reveal where time is being spent on startup. This is what it looks like when you launch nushell with `cargo run -- --log-level info`. I'm using the `info` log level exclusively for performance monitoring at this point. ![image](https://user-images.githubusercontent.com/343840/214372903-fdfa9c99-b846-47f3-8faf-bd6ed98df3a9.png) ## After Startup Since you're in the repl, you can continue running commands. Here's the output of `ls`, for instance. ![image](https://user-images.githubusercontent.com/343840/214373035-4d2f6e2d-5c1d-43d3-b997-51d79d496ba3.png) Note that the above screenshots are in debug mode, so they're much slower than release. # User-Facing Changes # Tests + Formatting Don't forget to add tests that cover your changes. Make sure you've run and fixed any issues with these commands: - `cargo fmt --all -- --check` to check standard code formatting (`cargo fmt --all` applies these changes) - `cargo clippy --workspace -- -D warnings -D clippy::unwrap_used -A clippy::needless_collect` to check that you're using the standard code style - `cargo test --workspace` to check that all tests pass # After Submitting If your PR had any user-facing changes, update [the documentation](https://github.com/nushell/nushell.github.io) after the PR is merged, if necessary. This will help us keep the docs up to date.
2023-01-24 20:28:59 +00:00
let entire_start_time = std::time::Instant::now();
let mut start_time = std::time::Instant::now();
let miette_hook = std::panic::take_hook();
std::panic::set_hook(Box::new(move |x| {
2021-12-04 12:38:21 +00:00
crossterm::terminal::disable_raw_mode().expect("unable to disable raw mode");
miette_hook(x);
}));
2021-09-21 19:37:16 +00:00
Use only $nu.env.PWD for getting the current directory (#587) * Use only $nu.env.PWD for getting current directory Because setting and reading to/from std::env changes the global state shich is problematic if we call `cd` from multiple threads (e.g., in a `par-each` block). With this change, when engine-q starts, it will either inherit existing PWD env var, or create a new one from `std::env::current_dir()`. Otherwise, everything that needs the current directory will get it from `$nu.env.PWD`. Each spawned external command will get its current directory per-process which should be thread-safe. One thing left to do is to patch nu-path for this as well since it uses `std::env::current_dir()` in its expansions. * Rename nu-path functions *_with is not *_relative which should be more descriptive and frees "with" for use in a followup commit. * Clone stack every each iter; Fix some commands Cloning the stack each iteration of `each` makes sure we're not reusing PWD between iterations. Some fixes in commands to make them use the new PWD. * Post-rebase cleanup, fmt, clippy * Change back _relative to _with in nu-path funcs Didn't use the idea I had for the new "_with". * Remove leftover current_dir from rebase * Add cwd sync at merge_delta() This makes sure the parser and completer always have up-to-date cwd. * Always pass absolute path to glob in ls * Do not allow PWD a relative path; Allow recovery Makes it possible to recover PWD by proceeding with the REPL cycle. * Clone stack in each also for byte/string stream * (WIP) Start moving env variables to engine state * (WIP) Move env vars to engine state (ugly) Quick and dirty code. * (WIP) Remove unused mut and args; Fmt * (WIP) Fix dataframe tests * (WIP) Fix missing args after rebase * (WIP) Clone only env vars, not the whole stack * (WIP) Add env var clone to `for` loop as well * Minor edits * Refactor merge_delta() to include stack merging. Less error-prone than doing it manually. * Clone env for each `update` command iteration * Mark env var hidden only when found in eng. state * Fix clippt warnings * Add TODO about env var reading * Do not clone empty environment in loops * Remove extra cwd collection * Split current_dir() into str and path; Fix autocd * Make completions respect PWD env var
2022-01-04 22:30:34 +00:00
// Get initial current working directory.
let init_cwd = get_init_cwd();
let mut engine_state = create_default_context();
2021-07-17 06:31:34 +00:00
// Custom additions
let delta = {
let mut working_set = nu_protocol::engine::StateWorkingSet::new(&engine_state);
working_set.add_decl(Box::new(nu_cli::NuHighlight));
working_set.add_decl(Box::new(nu_cli::Print));
working_set.render()
};
if let Err(err) = engine_state.merge_delta(delta) {
report_error_new(&engine_state, &err);
}
2021-10-28 04:13:10 +00:00
let ctrlc = Arc::new(AtomicBool::new(false));
// TODO: make this conditional in the future
ctrlc_protection(&mut engine_state, &ctrlc);
sigquit_protection(&mut engine_state);
let (args_to_nushell, script_name, args_to_script) = gather_commandline_args();
let parsed_nu_cli_args = parse_commandline_args(&args_to_nushell.join(" "), &mut engine_state)
.unwrap_or_else(|_| std::process::exit(1));
let use_color = engine_state.get_config().use_ansi_coloring;
add some startup performance metrics (#7851) # Description This PR changes the old performance logging with `Instant` timers. I'm not sure if this is the best way to do it but it does help reveal where time is being spent on startup. This is what it looks like when you launch nushell with `cargo run -- --log-level info`. I'm using the `info` log level exclusively for performance monitoring at this point. ![image](https://user-images.githubusercontent.com/343840/214372903-fdfa9c99-b846-47f3-8faf-bd6ed98df3a9.png) ## After Startup Since you're in the repl, you can continue running commands. Here's the output of `ls`, for instance. ![image](https://user-images.githubusercontent.com/343840/214373035-4d2f6e2d-5c1d-43d3-b997-51d79d496ba3.png) Note that the above screenshots are in debug mode, so they're much slower than release. # User-Facing Changes # Tests + Formatting Don't forget to add tests that cover your changes. Make sure you've run and fixed any issues with these commands: - `cargo fmt --all -- --check` to check standard code formatting (`cargo fmt --all` applies these changes) - `cargo clippy --workspace -- -D warnings -D clippy::unwrap_used -A clippy::needless_collect` to check that you're using the standard code style - `cargo test --workspace` to check that all tests pass # After Submitting If your PR had any user-facing changes, update [the documentation](https://github.com/nushell/nushell.github.io) after the PR is merged, if necessary. This will help us keep the docs up to date.
2023-01-24 20:28:59 +00:00
if let Some(level) = parsed_nu_cli_args.log_level.map(|level| level.item) {
let level = if Level::from_str(&level).is_ok() {
level
} else {
eprintln!(
"ERROR: log library did not recognize log level '{level}', using default 'info'"
);
"info".to_string()
};
let target = parsed_nu_cli_args
.log_target
.map(|target| target.item)
.unwrap_or_else(|| "stderr".to_string());
logger(|builder| configure(&level, &target, builder))?;
// info!("start logging {}:{}:{}", file!(), line!(), column!());
perf(
"start logging",
start_time,
file!(),
line!(),
column!(),
use_color,
);
add some startup performance metrics (#7851) # Description This PR changes the old performance logging with `Instant` timers. I'm not sure if this is the best way to do it but it does help reveal where time is being spent on startup. This is what it looks like when you launch nushell with `cargo run -- --log-level info`. I'm using the `info` log level exclusively for performance monitoring at this point. ![image](https://user-images.githubusercontent.com/343840/214372903-fdfa9c99-b846-47f3-8faf-bd6ed98df3a9.png) ## After Startup Since you're in the repl, you can continue running commands. Here's the output of `ls`, for instance. ![image](https://user-images.githubusercontent.com/343840/214373035-4d2f6e2d-5c1d-43d3-b997-51d79d496ba3.png) Note that the above screenshots are in debug mode, so they're much slower than release. # User-Facing Changes # Tests + Formatting Don't forget to add tests that cover your changes. Make sure you've run and fixed any issues with these commands: - `cargo fmt --all -- --check` to check standard code formatting (`cargo fmt --all` applies these changes) - `cargo clippy --workspace -- -D warnings -D clippy::unwrap_used -A clippy::needless_collect` to check that you're using the standard code style - `cargo test --workspace` to check that all tests pass # After Submitting If your PR had any user-facing changes, update [the documentation](https://github.com/nushell/nushell.github.io) after the PR is merged, if necessary. This will help us keep the docs up to date.
2023-01-24 20:28:59 +00:00
}
start_time = std::time::Instant::now();
set_config_path(
&mut engine_state,
&init_cwd,
"config.nu",
"config-path",
&parsed_nu_cli_args.config_file,
);
set_config_path(
&mut engine_state,
&init_cwd,
"env.nu",
"env-path",
&parsed_nu_cli_args.env_file,
);
perf(
"set_config_path",
start_time,
file!(),
line!(),
column!(),
use_color,
);
add some startup performance metrics (#7851) # Description This PR changes the old performance logging with `Instant` timers. I'm not sure if this is the best way to do it but it does help reveal where time is being spent on startup. This is what it looks like when you launch nushell with `cargo run -- --log-level info`. I'm using the `info` log level exclusively for performance monitoring at this point. ![image](https://user-images.githubusercontent.com/343840/214372903-fdfa9c99-b846-47f3-8faf-bd6ed98df3a9.png) ## After Startup Since you're in the repl, you can continue running commands. Here's the output of `ls`, for instance. ![image](https://user-images.githubusercontent.com/343840/214373035-4d2f6e2d-5c1d-43d3-b997-51d79d496ba3.png) Note that the above screenshots are in debug mode, so they're much slower than release. # User-Facing Changes # Tests + Formatting Don't forget to add tests that cover your changes. Make sure you've run and fixed any issues with these commands: - `cargo fmt --all -- --check` to check standard code formatting (`cargo fmt --all` applies these changes) - `cargo clippy --workspace -- -D warnings -D clippy::unwrap_used -A clippy::needless_collect` to check that you're using the standard code style - `cargo test --workspace` to check that all tests pass # After Submitting If your PR had any user-facing changes, update [the documentation](https://github.com/nushell/nushell.github.io) after the PR is merged, if necessary. This will help us keep the docs up to date.
2023-01-24 20:28:59 +00:00
start_time = std::time::Instant::now();
// keep this condition in sync with the branches below
acquire_terminal(
parsed_nu_cli_args.commands.is_none()
&& (script_name.is_empty() || parsed_nu_cli_args.interactive_shell.is_some()),
);
perf(
"acquire_terminal",
start_time,
file!(),
line!(),
column!(),
use_color,
);
add some startup performance metrics (#7851) # Description This PR changes the old performance logging with `Instant` timers. I'm not sure if this is the best way to do it but it does help reveal where time is being spent on startup. This is what it looks like when you launch nushell with `cargo run -- --log-level info`. I'm using the `info` log level exclusively for performance monitoring at this point. ![image](https://user-images.githubusercontent.com/343840/214372903-fdfa9c99-b846-47f3-8faf-bd6ed98df3a9.png) ## After Startup Since you're in the repl, you can continue running commands. Here's the output of `ls`, for instance. ![image](https://user-images.githubusercontent.com/343840/214373035-4d2f6e2d-5c1d-43d3-b997-51d79d496ba3.png) Note that the above screenshots are in debug mode, so they're much slower than release. # User-Facing Changes # Tests + Formatting Don't forget to add tests that cover your changes. Make sure you've run and fixed any issues with these commands: - `cargo fmt --all -- --check` to check standard code formatting (`cargo fmt --all` applies these changes) - `cargo clippy --workspace -- -D warnings -D clippy::unwrap_used -A clippy::needless_collect` to check that you're using the standard code style - `cargo test --workspace` to check that all tests pass # After Submitting If your PR had any user-facing changes, update [the documentation](https://github.com/nushell/nushell.github.io) after the PR is merged, if necessary. This will help us keep the docs up to date.
2023-01-24 20:28:59 +00:00
start_time = std::time::Instant::now();
if let Some(t) = parsed_nu_cli_args.threads {
// 0 means to let rayon decide how many threads to use
let threads = t.as_i64().unwrap_or(0);
rayon::ThreadPoolBuilder::new()
.num_threads(threads as usize)
.build_global()
.expect("error setting number of threads");
}
perf(
"set rayon threads",
start_time,
file!(),
line!(),
column!(),
use_color,
);
add some startup performance metrics (#7851) # Description This PR changes the old performance logging with `Instant` timers. I'm not sure if this is the best way to do it but it does help reveal where time is being spent on startup. This is what it looks like when you launch nushell with `cargo run -- --log-level info`. I'm using the `info` log level exclusively for performance monitoring at this point. ![image](https://user-images.githubusercontent.com/343840/214372903-fdfa9c99-b846-47f3-8faf-bd6ed98df3a9.png) ## After Startup Since you're in the repl, you can continue running commands. Here's the output of `ls`, for instance. ![image](https://user-images.githubusercontent.com/343840/214373035-4d2f6e2d-5c1d-43d3-b997-51d79d496ba3.png) Note that the above screenshots are in debug mode, so they're much slower than release. # User-Facing Changes # Tests + Formatting Don't forget to add tests that cover your changes. Make sure you've run and fixed any issues with these commands: - `cargo fmt --all -- --check` to check standard code formatting (`cargo fmt --all` applies these changes) - `cargo clippy --workspace -- -D warnings -D clippy::unwrap_used -A clippy::needless_collect` to check that you're using the standard code style - `cargo test --workspace` to check that all tests pass # After Submitting If your PR had any user-facing changes, update [the documentation](https://github.com/nushell/nushell.github.io) after the PR is merged, if necessary. This will help us keep the docs up to date.
2023-01-24 20:28:59 +00:00
start_time = std::time::Instant::now();
if let Some(testbin) = &parsed_nu_cli_args.testbin {
// Call out to the correct testbin
match testbin.item.as_str() {
"echo_env" => test_bins::echo_env(true),
"echo_env_stderr" => test_bins::echo_env(false),
"cococo" => test_bins::cococo(),
"meow" => test_bins::meow(),
"meowb" => test_bins::meowb(),
"relay" => test_bins::relay(),
"iecho" => test_bins::iecho(),
"fail" => test_bins::fail(),
"nonu" => test_bins::nonu(),
"chop" => test_bins::chop(),
"repeater" => test_bins::repeater(),
"nu_repl" => test_bins::nu_repl(),
_ => std::process::exit(1),
}
std::process::exit(0)
}
perf(
"run test_bins",
start_time,
file!(),
line!(),
column!(),
use_color,
);
add some startup performance metrics (#7851) # Description This PR changes the old performance logging with `Instant` timers. I'm not sure if this is the best way to do it but it does help reveal where time is being spent on startup. This is what it looks like when you launch nushell with `cargo run -- --log-level info`. I'm using the `info` log level exclusively for performance monitoring at this point. ![image](https://user-images.githubusercontent.com/343840/214372903-fdfa9c99-b846-47f3-8faf-bd6ed98df3a9.png) ## After Startup Since you're in the repl, you can continue running commands. Here's the output of `ls`, for instance. ![image](https://user-images.githubusercontent.com/343840/214373035-4d2f6e2d-5c1d-43d3-b997-51d79d496ba3.png) Note that the above screenshots are in debug mode, so they're much slower than release. # User-Facing Changes # Tests + Formatting Don't forget to add tests that cover your changes. Make sure you've run and fixed any issues with these commands: - `cargo fmt --all -- --check` to check standard code formatting (`cargo fmt --all` applies these changes) - `cargo clippy --workspace -- -D warnings -D clippy::unwrap_used -A clippy::needless_collect` to check that you're using the standard code style - `cargo test --workspace` to check that all tests pass # After Submitting If your PR had any user-facing changes, update [the documentation](https://github.com/nushell/nushell.github.io) after the PR is merged, if necessary. This will help us keep the docs up to date.
2023-01-24 20:28:59 +00:00
start_time = std::time::Instant::now();
let input = if let Some(redirect_stdin) = &parsed_nu_cli_args.redirect_stdin {
let stdin = std::io::stdin();
let buf_reader = BufReader::new(stdin);
PipelineData::ExternalStream {
stdout: Some(RawStream::new(
Box::new(BufferedReader::new(buf_reader)),
Some(ctrlc),
redirect_stdin.span,
None,
)),
stderr: None,
exit_code: None,
span: redirect_stdin.span,
metadata: None,
trim_end_newline: false,
}
} else {
PipelineData::empty()
};
perf(
"redirect stdin",
start_time,
file!(),
line!(),
column!(),
use_color,
);
add some startup performance metrics (#7851) # Description This PR changes the old performance logging with `Instant` timers. I'm not sure if this is the best way to do it but it does help reveal where time is being spent on startup. This is what it looks like when you launch nushell with `cargo run -- --log-level info`. I'm using the `info` log level exclusively for performance monitoring at this point. ![image](https://user-images.githubusercontent.com/343840/214372903-fdfa9c99-b846-47f3-8faf-bd6ed98df3a9.png) ## After Startup Since you're in the repl, you can continue running commands. Here's the output of `ls`, for instance. ![image](https://user-images.githubusercontent.com/343840/214373035-4d2f6e2d-5c1d-43d3-b997-51d79d496ba3.png) Note that the above screenshots are in debug mode, so they're much slower than release. # User-Facing Changes # Tests + Formatting Don't forget to add tests that cover your changes. Make sure you've run and fixed any issues with these commands: - `cargo fmt --all -- --check` to check standard code formatting (`cargo fmt --all` applies these changes) - `cargo clippy --workspace -- -D warnings -D clippy::unwrap_used -A clippy::needless_collect` to check that you're using the standard code style - `cargo test --workspace` to check that all tests pass # After Submitting If your PR had any user-facing changes, update [the documentation](https://github.com/nushell/nushell.github.io) after the PR is merged, if necessary. This will help us keep the docs up to date.
2023-01-24 20:28:59 +00:00
start_time = std::time::Instant::now();
// First, set up env vars as strings only
gather_parent_env_vars(&mut engine_state, &init_cwd);
perf(
"gather env vars",
start_time,
file!(),
line!(),
column!(),
use_color,
);
let mut stack = nu_protocol::engine::Stack::new();
if let Some(commands) = &parsed_nu_cli_args.commands {
add some startup performance metrics (#7851) # Description This PR changes the old performance logging with `Instant` timers. I'm not sure if this is the best way to do it but it does help reveal where time is being spent on startup. This is what it looks like when you launch nushell with `cargo run -- --log-level info`. I'm using the `info` log level exclusively for performance monitoring at this point. ![image](https://user-images.githubusercontent.com/343840/214372903-fdfa9c99-b846-47f3-8faf-bd6ed98df3a9.png) ## After Startup Since you're in the repl, you can continue running commands. Here's the output of `ls`, for instance. ![image](https://user-images.githubusercontent.com/343840/214373035-4d2f6e2d-5c1d-43d3-b997-51d79d496ba3.png) Note that the above screenshots are in debug mode, so they're much slower than release. # User-Facing Changes # Tests + Formatting Don't forget to add tests that cover your changes. Make sure you've run and fixed any issues with these commands: - `cargo fmt --all -- --check` to check standard code formatting (`cargo fmt --all` applies these changes) - `cargo clippy --workspace -- -D warnings -D clippy::unwrap_used -A clippy::needless_collect` to check that you're using the standard code style - `cargo test --workspace` to check that all tests pass # After Submitting If your PR had any user-facing changes, update [the documentation](https://github.com/nushell/nushell.github.io) after the PR is merged, if necessary. This will help us keep the docs up to date.
2023-01-24 20:28:59 +00:00
start_time = std::time::Instant::now();
#[cfg(feature = "plugin")]
read_plugin_file(
&mut engine_state,
&mut stack,
parsed_nu_cli_args.plugin_file,
NUSHELL_FOLDER,
);
perf(
"read plugins",
start_time,
file!(),
line!(),
column!(),
use_color,
);
add some startup performance metrics (#7851) # Description This PR changes the old performance logging with `Instant` timers. I'm not sure if this is the best way to do it but it does help reveal where time is being spent on startup. This is what it looks like when you launch nushell with `cargo run -- --log-level info`. I'm using the `info` log level exclusively for performance monitoring at this point. ![image](https://user-images.githubusercontent.com/343840/214372903-fdfa9c99-b846-47f3-8faf-bd6ed98df3a9.png) ## After Startup Since you're in the repl, you can continue running commands. Here's the output of `ls`, for instance. ![image](https://user-images.githubusercontent.com/343840/214373035-4d2f6e2d-5c1d-43d3-b997-51d79d496ba3.png) Note that the above screenshots are in debug mode, so they're much slower than release. # User-Facing Changes # Tests + Formatting Don't forget to add tests that cover your changes. Make sure you've run and fixed any issues with these commands: - `cargo fmt --all -- --check` to check standard code formatting (`cargo fmt --all` applies these changes) - `cargo clippy --workspace -- -D warnings -D clippy::unwrap_used -A clippy::needless_collect` to check that you're using the standard code style - `cargo test --workspace` to check that all tests pass # After Submitting If your PR had any user-facing changes, update [the documentation](https://github.com/nushell/nushell.github.io) after the PR is merged, if necessary. This will help us keep the docs up to date.
2023-01-24 20:28:59 +00:00
start_time = std::time::Instant::now();
// only want to load config and env if relative argument is provided.
if parsed_nu_cli_args.env_file.is_some() {
config_files::read_config_file(
&mut engine_state,
&mut stack,
parsed_nu_cli_args.env_file,
true,
);
} else {
config_files::read_default_env_file(&mut engine_state, &mut stack)
}
perf(
"read env.nu",
start_time,
file!(),
line!(),
column!(),
use_color,
);
add some startup performance metrics (#7851) # Description This PR changes the old performance logging with `Instant` timers. I'm not sure if this is the best way to do it but it does help reveal where time is being spent on startup. This is what it looks like when you launch nushell with `cargo run -- --log-level info`. I'm using the `info` log level exclusively for performance monitoring at this point. ![image](https://user-images.githubusercontent.com/343840/214372903-fdfa9c99-b846-47f3-8faf-bd6ed98df3a9.png) ## After Startup Since you're in the repl, you can continue running commands. Here's the output of `ls`, for instance. ![image](https://user-images.githubusercontent.com/343840/214373035-4d2f6e2d-5c1d-43d3-b997-51d79d496ba3.png) Note that the above screenshots are in debug mode, so they're much slower than release. # User-Facing Changes # Tests + Formatting Don't forget to add tests that cover your changes. Make sure you've run and fixed any issues with these commands: - `cargo fmt --all -- --check` to check standard code formatting (`cargo fmt --all` applies these changes) - `cargo clippy --workspace -- -D warnings -D clippy::unwrap_used -A clippy::needless_collect` to check that you're using the standard code style - `cargo test --workspace` to check that all tests pass # After Submitting If your PR had any user-facing changes, update [the documentation](https://github.com/nushell/nushell.github.io) after the PR is merged, if necessary. This will help us keep the docs up to date.
2023-01-24 20:28:59 +00:00
start_time = std::time::Instant::now();
if parsed_nu_cli_args.config_file.is_some() {
config_files::read_config_file(
&mut engine_state,
&mut stack,
parsed_nu_cli_args.config_file,
false,
);
}
perf(
"read config.nu",
start_time,
file!(),
line!(),
column!(),
use_color,
);
add some startup performance metrics (#7851) # Description This PR changes the old performance logging with `Instant` timers. I'm not sure if this is the best way to do it but it does help reveal where time is being spent on startup. This is what it looks like when you launch nushell with `cargo run -- --log-level info`. I'm using the `info` log level exclusively for performance monitoring at this point. ![image](https://user-images.githubusercontent.com/343840/214372903-fdfa9c99-b846-47f3-8faf-bd6ed98df3a9.png) ## After Startup Since you're in the repl, you can continue running commands. Here's the output of `ls`, for instance. ![image](https://user-images.githubusercontent.com/343840/214373035-4d2f6e2d-5c1d-43d3-b997-51d79d496ba3.png) Note that the above screenshots are in debug mode, so they're much slower than release. # User-Facing Changes # Tests + Formatting Don't forget to add tests that cover your changes. Make sure you've run and fixed any issues with these commands: - `cargo fmt --all -- --check` to check standard code formatting (`cargo fmt --all` applies these changes) - `cargo clippy --workspace -- -D warnings -D clippy::unwrap_used -A clippy::needless_collect` to check that you're using the standard code style - `cargo test --workspace` to check that all tests pass # After Submitting If your PR had any user-facing changes, update [the documentation](https://github.com/nushell/nushell.github.io) after the PR is merged, if necessary. This will help us keep the docs up to date.
2023-01-24 20:28:59 +00:00
start_time = std::time::Instant::now();
let ret_val = evaluate_commands(
commands,
&mut engine_state,
&mut stack,
input,
parsed_nu_cli_args.table_mode,
);
perf(
"evaluate_commands",
start_time,
file!(),
line!(),
column!(),
use_color,
);
add some startup performance metrics (#7851) # Description This PR changes the old performance logging with `Instant` timers. I'm not sure if this is the best way to do it but it does help reveal where time is being spent on startup. This is what it looks like when you launch nushell with `cargo run -- --log-level info`. I'm using the `info` log level exclusively for performance monitoring at this point. ![image](https://user-images.githubusercontent.com/343840/214372903-fdfa9c99-b846-47f3-8faf-bd6ed98df3a9.png) ## After Startup Since you're in the repl, you can continue running commands. Here's the output of `ls`, for instance. ![image](https://user-images.githubusercontent.com/343840/214373035-4d2f6e2d-5c1d-43d3-b997-51d79d496ba3.png) Note that the above screenshots are in debug mode, so they're much slower than release. # User-Facing Changes # Tests + Formatting Don't forget to add tests that cover your changes. Make sure you've run and fixed any issues with these commands: - `cargo fmt --all -- --check` to check standard code formatting (`cargo fmt --all` applies these changes) - `cargo clippy --workspace -- -D warnings -D clippy::unwrap_used -A clippy::needless_collect` to check that you're using the standard code style - `cargo test --workspace` to check that all tests pass # After Submitting If your PR had any user-facing changes, update [the documentation](https://github.com/nushell/nushell.github.io) after the PR is merged, if necessary. This will help us keep the docs up to date.
2023-01-24 20:28:59 +00:00
match ret_val {
Ok(Some(exit_code)) => std::process::exit(exit_code as i32),
Ok(None) => Ok(()),
Err(e) => Err(e),
}
} else if !script_name.is_empty() && parsed_nu_cli_args.interactive_shell.is_none() {
add some startup performance metrics (#7851) # Description This PR changes the old performance logging with `Instant` timers. I'm not sure if this is the best way to do it but it does help reveal where time is being spent on startup. This is what it looks like when you launch nushell with `cargo run -- --log-level info`. I'm using the `info` log level exclusively for performance monitoring at this point. ![image](https://user-images.githubusercontent.com/343840/214372903-fdfa9c99-b846-47f3-8faf-bd6ed98df3a9.png) ## After Startup Since you're in the repl, you can continue running commands. Here's the output of `ls`, for instance. ![image](https://user-images.githubusercontent.com/343840/214373035-4d2f6e2d-5c1d-43d3-b997-51d79d496ba3.png) Note that the above screenshots are in debug mode, so they're much slower than release. # User-Facing Changes # Tests + Formatting Don't forget to add tests that cover your changes. Make sure you've run and fixed any issues with these commands: - `cargo fmt --all -- --check` to check standard code formatting (`cargo fmt --all` applies these changes) - `cargo clippy --workspace -- -D warnings -D clippy::unwrap_used -A clippy::needless_collect` to check that you're using the standard code style - `cargo test --workspace` to check that all tests pass # After Submitting If your PR had any user-facing changes, update [the documentation](https://github.com/nushell/nushell.github.io) after the PR is merged, if necessary. This will help us keep the docs up to date.
2023-01-24 20:28:59 +00:00
start_time = std::time::Instant::now();
#[cfg(feature = "plugin")]
read_plugin_file(
&mut engine_state,
&mut stack,
parsed_nu_cli_args.plugin_file,
NUSHELL_FOLDER,
);
perf(
"read plugins",
start_time,
file!(),
line!(),
column!(),
use_color,
);
2022-07-10 15:12:24 +00:00
add some startup performance metrics (#7851) # Description This PR changes the old performance logging with `Instant` timers. I'm not sure if this is the best way to do it but it does help reveal where time is being spent on startup. This is what it looks like when you launch nushell with `cargo run -- --log-level info`. I'm using the `info` log level exclusively for performance monitoring at this point. ![image](https://user-images.githubusercontent.com/343840/214372903-fdfa9c99-b846-47f3-8faf-bd6ed98df3a9.png) ## After Startup Since you're in the repl, you can continue running commands. Here's the output of `ls`, for instance. ![image](https://user-images.githubusercontent.com/343840/214373035-4d2f6e2d-5c1d-43d3-b997-51d79d496ba3.png) Note that the above screenshots are in debug mode, so they're much slower than release. # User-Facing Changes # Tests + Formatting Don't forget to add tests that cover your changes. Make sure you've run and fixed any issues with these commands: - `cargo fmt --all -- --check` to check standard code formatting (`cargo fmt --all` applies these changes) - `cargo clippy --workspace -- -D warnings -D clippy::unwrap_used -A clippy::needless_collect` to check that you're using the standard code style - `cargo test --workspace` to check that all tests pass # After Submitting If your PR had any user-facing changes, update [the documentation](https://github.com/nushell/nushell.github.io) after the PR is merged, if necessary. This will help us keep the docs up to date.
2023-01-24 20:28:59 +00:00
start_time = std::time::Instant::now();
// only want to load config and env if relative argument is provided.
if parsed_nu_cli_args.env_file.is_some() {
config_files::read_config_file(
&mut engine_state,
&mut stack,
parsed_nu_cli_args.env_file,
true,
);
} else {
config_files::read_default_env_file(&mut engine_state, &mut stack)
}
perf(
"read env.nu",
start_time,
file!(),
line!(),
column!(),
use_color,
);
add some startup performance metrics (#7851) # Description This PR changes the old performance logging with `Instant` timers. I'm not sure if this is the best way to do it but it does help reveal where time is being spent on startup. This is what it looks like when you launch nushell with `cargo run -- --log-level info`. I'm using the `info` log level exclusively for performance monitoring at this point. ![image](https://user-images.githubusercontent.com/343840/214372903-fdfa9c99-b846-47f3-8faf-bd6ed98df3a9.png) ## After Startup Since you're in the repl, you can continue running commands. Here's the output of `ls`, for instance. ![image](https://user-images.githubusercontent.com/343840/214373035-4d2f6e2d-5c1d-43d3-b997-51d79d496ba3.png) Note that the above screenshots are in debug mode, so they're much slower than release. # User-Facing Changes # Tests + Formatting Don't forget to add tests that cover your changes. Make sure you've run and fixed any issues with these commands: - `cargo fmt --all -- --check` to check standard code formatting (`cargo fmt --all` applies these changes) - `cargo clippy --workspace -- -D warnings -D clippy::unwrap_used -A clippy::needless_collect` to check that you're using the standard code style - `cargo test --workspace` to check that all tests pass # After Submitting If your PR had any user-facing changes, update [the documentation](https://github.com/nushell/nushell.github.io) after the PR is merged, if necessary. This will help us keep the docs up to date.
2023-01-24 20:28:59 +00:00
start_time = std::time::Instant::now();
if parsed_nu_cli_args.config_file.is_some() {
config_files::read_config_file(
&mut engine_state,
&mut stack,
parsed_nu_cli_args.config_file,
false,
);
}
perf(
"read config.nu",
start_time,
file!(),
line!(),
column!(),
use_color,
);
2022-07-10 15:12:24 +00:00
add some startup performance metrics (#7851) # Description This PR changes the old performance logging with `Instant` timers. I'm not sure if this is the best way to do it but it does help reveal where time is being spent on startup. This is what it looks like when you launch nushell with `cargo run -- --log-level info`. I'm using the `info` log level exclusively for performance monitoring at this point. ![image](https://user-images.githubusercontent.com/343840/214372903-fdfa9c99-b846-47f3-8faf-bd6ed98df3a9.png) ## After Startup Since you're in the repl, you can continue running commands. Here's the output of `ls`, for instance. ![image](https://user-images.githubusercontent.com/343840/214373035-4d2f6e2d-5c1d-43d3-b997-51d79d496ba3.png) Note that the above screenshots are in debug mode, so they're much slower than release. # User-Facing Changes # Tests + Formatting Don't forget to add tests that cover your changes. Make sure you've run and fixed any issues with these commands: - `cargo fmt --all -- --check` to check standard code formatting (`cargo fmt --all` applies these changes) - `cargo clippy --workspace -- -D warnings -D clippy::unwrap_used -A clippy::needless_collect` to check that you're using the standard code style - `cargo test --workspace` to check that all tests pass # After Submitting If your PR had any user-facing changes, update [the documentation](https://github.com/nushell/nushell.github.io) after the PR is merged, if necessary. This will help us keep the docs up to date.
2023-01-24 20:28:59 +00:00
start_time = std::time::Instant::now();
let ret_val = evaluate_file(
script_name,
&args_to_script,
&mut engine_state,
&mut stack,
input,
);
perf(
"evaluate_file",
start_time,
file!(),
line!(),
column!(),
use_color,
);
add some startup performance metrics (#7851) # Description This PR changes the old performance logging with `Instant` timers. I'm not sure if this is the best way to do it but it does help reveal where time is being spent on startup. This is what it looks like when you launch nushell with `cargo run -- --log-level info`. I'm using the `info` log level exclusively for performance monitoring at this point. ![image](https://user-images.githubusercontent.com/343840/214372903-fdfa9c99-b846-47f3-8faf-bd6ed98df3a9.png) ## After Startup Since you're in the repl, you can continue running commands. Here's the output of `ls`, for instance. ![image](https://user-images.githubusercontent.com/343840/214373035-4d2f6e2d-5c1d-43d3-b997-51d79d496ba3.png) Note that the above screenshots are in debug mode, so they're much slower than release. # User-Facing Changes # Tests + Formatting Don't forget to add tests that cover your changes. Make sure you've run and fixed any issues with these commands: - `cargo fmt --all -- --check` to check standard code formatting (`cargo fmt --all` applies these changes) - `cargo clippy --workspace -- -D warnings -D clippy::unwrap_used -A clippy::needless_collect` to check that you're using the standard code style - `cargo test --workspace` to check that all tests pass # After Submitting If your PR had any user-facing changes, update [the documentation](https://github.com/nushell/nushell.github.io) after the PR is merged, if necessary. This will help us keep the docs up to date.
2023-01-24 20:28:59 +00:00
start_time = std::time::Instant::now();
let last_exit_code = stack.get_env_var(&engine_state, "LAST_EXIT_CODE");
if let Some(last_exit_code) = last_exit_code {
let value = last_exit_code.as_integer();
if let Ok(value) = value {
if value != 0 {
std::process::exit(value as i32);
}
}
}
perf(
"get exit code",
start_time,
file!(),
line!(),
column!(),
use_color,
);
ret_val
} else {
add some startup performance metrics (#7851) # Description This PR changes the old performance logging with `Instant` timers. I'm not sure if this is the best way to do it but it does help reveal where time is being spent on startup. This is what it looks like when you launch nushell with `cargo run -- --log-level info`. I'm using the `info` log level exclusively for performance monitoring at this point. ![image](https://user-images.githubusercontent.com/343840/214372903-fdfa9c99-b846-47f3-8faf-bd6ed98df3a9.png) ## After Startup Since you're in the repl, you can continue running commands. Here's the output of `ls`, for instance. ![image](https://user-images.githubusercontent.com/343840/214373035-4d2f6e2d-5c1d-43d3-b997-51d79d496ba3.png) Note that the above screenshots are in debug mode, so they're much slower than release. # User-Facing Changes # Tests + Formatting Don't forget to add tests that cover your changes. Make sure you've run and fixed any issues with these commands: - `cargo fmt --all -- --check` to check standard code formatting (`cargo fmt --all` applies these changes) - `cargo clippy --workspace -- -D warnings -D clippy::unwrap_used -A clippy::needless_collect` to check that you're using the standard code style - `cargo test --workspace` to check that all tests pass # After Submitting If your PR had any user-facing changes, update [the documentation](https://github.com/nushell/nushell.github.io) after the PR is merged, if necessary. This will help us keep the docs up to date.
2023-01-24 20:28:59 +00:00
start_time = std::time::Instant::now();
setup_config(
&mut engine_state,
&mut stack,
#[cfg(feature = "plugin")]
parsed_nu_cli_args.plugin_file,
parsed_nu_cli_args.config_file,
parsed_nu_cli_args.env_file,
parsed_nu_cli_args.login_shell.is_some(),
);
// Reload use_color from config in case it's different from the default value
let use_color = engine_state.get_config().use_ansi_coloring;
perf(
"setup_config",
start_time,
file!(),
line!(),
column!(),
use_color,
);
add some startup performance metrics (#7851) # Description This PR changes the old performance logging with `Instant` timers. I'm not sure if this is the best way to do it but it does help reveal where time is being spent on startup. This is what it looks like when you launch nushell with `cargo run -- --log-level info`. I'm using the `info` log level exclusively for performance monitoring at this point. ![image](https://user-images.githubusercontent.com/343840/214372903-fdfa9c99-b846-47f3-8faf-bd6ed98df3a9.png) ## After Startup Since you're in the repl, you can continue running commands. Here's the output of `ls`, for instance. ![image](https://user-images.githubusercontent.com/343840/214373035-4d2f6e2d-5c1d-43d3-b997-51d79d496ba3.png) Note that the above screenshots are in debug mode, so they're much slower than release. # User-Facing Changes # Tests + Formatting Don't forget to add tests that cover your changes. Make sure you've run and fixed any issues with these commands: - `cargo fmt --all -- --check` to check standard code formatting (`cargo fmt --all` applies these changes) - `cargo clippy --workspace -- -D warnings -D clippy::unwrap_used -A clippy::needless_collect` to check that you're using the standard code style - `cargo test --workspace` to check that all tests pass # After Submitting If your PR had any user-facing changes, update [the documentation](https://github.com/nushell/nushell.github.io) after the PR is merged, if necessary. This will help us keep the docs up to date.
2023-01-24 20:28:59 +00:00
start_time = std::time::Instant::now();
let ret_val = evaluate_repl(
&mut engine_state,
&mut stack,
config_files::NUSHELL_FOLDER,
parsed_nu_cli_args.execute,
add some startup performance metrics (#7851) # Description This PR changes the old performance logging with `Instant` timers. I'm not sure if this is the best way to do it but it does help reveal where time is being spent on startup. This is what it looks like when you launch nushell with `cargo run -- --log-level info`. I'm using the `info` log level exclusively for performance monitoring at this point. ![image](https://user-images.githubusercontent.com/343840/214372903-fdfa9c99-b846-47f3-8faf-bd6ed98df3a9.png) ## After Startup Since you're in the repl, you can continue running commands. Here's the output of `ls`, for instance. ![image](https://user-images.githubusercontent.com/343840/214373035-4d2f6e2d-5c1d-43d3-b997-51d79d496ba3.png) Note that the above screenshots are in debug mode, so they're much slower than release. # User-Facing Changes # Tests + Formatting Don't forget to add tests that cover your changes. Make sure you've run and fixed any issues with these commands: - `cargo fmt --all -- --check` to check standard code formatting (`cargo fmt --all` applies these changes) - `cargo clippy --workspace -- -D warnings -D clippy::unwrap_used -A clippy::needless_collect` to check that you're using the standard code style - `cargo test --workspace` to check that all tests pass # After Submitting If your PR had any user-facing changes, update [the documentation](https://github.com/nushell/nushell.github.io) after the PR is merged, if necessary. This will help us keep the docs up to date.
2023-01-24 20:28:59 +00:00
entire_start_time,
);
perf(
"evaluate_repl",
start_time,
file!(),
line!(),
column!(),
use_color,
);
ret_val
}
}