2019-12-02 16:14:05 +00:00
|
|
|
use crate::commands::classified::pipeline::run_pipeline;
|
2019-11-24 22:19:12 +00:00
|
|
|
use crate::commands::classified::ClassifiedInputStream;
|
2019-07-04 03:06:43 +00:00
|
|
|
use crate::commands::plugin::JsonRpc;
|
2019-08-09 07:54:21 +00:00
|
|
|
use crate::commands::plugin::{PluginCommand, PluginSink};
|
2019-08-15 05:02:02 +00:00
|
|
|
use crate::commands::whole_stream_command;
|
2019-05-23 04:30:43 +00:00
|
|
|
use crate::context::Context;
|
Extract core stuff into own crates
This commit extracts five new crates:
- nu-source, which contains the core source-code handling logic in Nu,
including Text, Span, and also the pretty.rs-based debug logic
- nu-parser, which is the parser and expander logic
- nu-protocol, which is the bulk of the types and basic conveniences
used by plugins
- nu-errors, which contains ShellError, ParseError and error handling
conveniences
- nu-textview, which is the textview plugin extracted into a crate
One of the major consequences of this refactor is that it's no longer
possible to `impl X for Spanned<Y>` outside of the `nu-source` crate, so
a lot of types became more concrete (Value became a concrete type
instead of Spanned<Value>, for example).
This also turned a number of inherent methods in the main nu crate into
plain functions (impl Value {} became a bunch of functions in the
`value` namespace in `crate::data::value`).
2019-11-26 02:30:48 +00:00
|
|
|
use crate::data::config;
|
2019-10-08 20:50:28 +00:00
|
|
|
#[cfg(not(feature = "starship-prompt"))]
|
2019-07-03 17:37:09 +00:00
|
|
|
use crate::git::current_branch;
|
|
|
|
use crate::prelude::*;
|
Extract core stuff into own crates
This commit extracts five new crates:
- nu-source, which contains the core source-code handling logic in Nu,
including Text, Span, and also the pretty.rs-based debug logic
- nu-parser, which is the parser and expander logic
- nu-protocol, which is the bulk of the types and basic conveniences
used by plugins
- nu-errors, which contains ShellError, ParseError and error handling
conveniences
- nu-textview, which is the textview plugin extracted into a crate
One of the major consequences of this refactor is that it's no longer
possible to `impl X for Spanned<Y>` outside of the `nu-source` crate, so
a lot of types became more concrete (Value became a concrete type
instead of Spanned<Value>, for example).
This also turned a number of inherent methods in the main nu crate into
plain functions (impl Value {} became a bunch of functions in the
`value` namespace in `crate::data::value`).
2019-11-26 02:30:48 +00:00
|
|
|
use nu_errors::ShellError;
|
|
|
|
use nu_parser::{
|
|
|
|
expand_syntax, hir, ClassifiedCommand, ClassifiedPipeline, InternalCommand, PipelineShape,
|
|
|
|
TokenNode, TokensIterator,
|
|
|
|
};
|
2019-12-02 16:14:05 +00:00
|
|
|
use nu_protocol::{Signature, UntaggedValue, Value};
|
2019-05-23 04:30:43 +00:00
|
|
|
|
2019-10-28 14:46:50 +00:00
|
|
|
use log::{debug, log_enabled, trace};
|
2019-05-23 04:30:43 +00:00
|
|
|
use rustyline::error::ReadlineError;
|
2019-08-30 18:27:15 +00:00
|
|
|
use rustyline::{self, config::Configurer, config::EditMode, ColorMode, Config, Editor};
|
2019-05-23 04:30:43 +00:00
|
|
|
use std::error::Error;
|
2019-07-03 17:37:09 +00:00
|
|
|
use std::io::{BufRead, BufReader, Write};
|
2019-05-24 07:29:16 +00:00
|
|
|
use std::iter::Iterator;
|
2019-09-19 20:28:48 +00:00
|
|
|
use std::path::PathBuf;
|
2019-10-13 04:12:43 +00:00
|
|
|
use std::sync::atomic::Ordering;
|
2019-05-23 04:30:43 +00:00
|
|
|
|
2019-07-04 03:06:43 +00:00
|
|
|
fn load_plugin(path: &std::path::Path, context: &mut Context) -> Result<(), ShellError> {
|
|
|
|
let mut child = std::process::Command::new(path)
|
2019-07-03 17:37:09 +00:00
|
|
|
.stdin(std::process::Stdio::piped())
|
|
|
|
.stdout(std::process::Stdio::piped())
|
|
|
|
.spawn()
|
|
|
|
.expect("Failed to spawn child process");
|
|
|
|
|
|
|
|
let stdin = child.stdin.as_mut().expect("Failed to open stdin");
|
|
|
|
let stdout = child.stdout.as_mut().expect("Failed to open stdout");
|
|
|
|
|
|
|
|
let mut reader = BufReader::new(stdout);
|
|
|
|
|
|
|
|
let request = JsonRpc::new("config", Vec::<Value>::new());
|
2019-08-17 03:53:39 +00:00
|
|
|
let request_raw = serde_json::to_string(&request)?;
|
2019-07-03 17:37:09 +00:00
|
|
|
stdin.write(format!("{}\n", request_raw).as_bytes())?;
|
2019-08-17 03:53:39 +00:00
|
|
|
let path = dunce::canonicalize(path)?;
|
2019-07-03 17:37:09 +00:00
|
|
|
|
|
|
|
let mut input = String::new();
|
2019-08-31 21:19:59 +00:00
|
|
|
let result = match reader.read_line(&mut input) {
|
Add support for ~ expansion
This ended up being a bit of a yak shave. The basic idea in this commit is to
expand `~` in paths, but only in paths.
The way this is accomplished is by doing the expansion inside of the code that
parses literal syntax for `SyntaxType::Path`.
As a quick refresher: every command is entitled to expand its arguments in a
custom way. While this could in theory be used for general-purpose macros,
today the expansion facility is limited to syntactic hints.
For example, the syntax `where cpu > 0` expands under the hood to
`where { $it.cpu > 0 }`. This happens because the first argument to `where`
is defined as a `SyntaxType::Block`, and the parser coerces binary expressions
whose left-hand-side looks like a member into a block when the command is
expecting one.
This is mildly more magical than what most programming languages would do,
but we believe that it makes sense to allow commands to fine-tune the syntax
because of the domain nushell is in (command-line shells).
The syntactic expansions supported by this facility are relatively limited.
For example, we don't allow `$it` to become a bare word, simply because the
command asks for a string in the relevant position. That would quickly
become more confusing than it's worth.
This PR adds a new `SyntaxType` rule: `SyntaxType::Path`. When a command
declares a parameter as a `SyntaxType::Path`, string literals and bare
words passed as an argument to that parameter are processed using the
path expansion rules. Right now, that only means that `~` is expanded into
the home directory, but additional rules are possible in the future.
By restricting this expansion to a syntactic expansion when passed as an
argument to a command expecting a path, we avoid making `~` a generally
reserved character. This will also allow us to give good tab completion
for paths with `~` characters in them when a command is expecting a path.
In order to accomplish the above, this commit changes the parsing functions
to take a `Context` instead of just a `CommandRegistry`. From the perspective
of macro expansion, you can think of the `CommandRegistry` as a dictionary
of in-scope macros, and the `Context` as the compile-time state used in
expansion. This could gain additional functionality over time as we find
more uses for the expansion system.
2019-08-26 19:21:03 +00:00
|
|
|
Ok(count) => {
|
|
|
|
trace!("processing response ({} bytes)", count);
|
2019-09-14 17:48:24 +00:00
|
|
|
trace!("response: {}", input);
|
Add support for ~ expansion
This ended up being a bit of a yak shave. The basic idea in this commit is to
expand `~` in paths, but only in paths.
The way this is accomplished is by doing the expansion inside of the code that
parses literal syntax for `SyntaxType::Path`.
As a quick refresher: every command is entitled to expand its arguments in a
custom way. While this could in theory be used for general-purpose macros,
today the expansion facility is limited to syntactic hints.
For example, the syntax `where cpu > 0` expands under the hood to
`where { $it.cpu > 0 }`. This happens because the first argument to `where`
is defined as a `SyntaxType::Block`, and the parser coerces binary expressions
whose left-hand-side looks like a member into a block when the command is
expecting one.
This is mildly more magical than what most programming languages would do,
but we believe that it makes sense to allow commands to fine-tune the syntax
because of the domain nushell is in (command-line shells).
The syntactic expansions supported by this facility are relatively limited.
For example, we don't allow `$it` to become a bare word, simply because the
command asks for a string in the relevant position. That would quickly
become more confusing than it's worth.
This PR adds a new `SyntaxType` rule: `SyntaxType::Path`. When a command
declares a parameter as a `SyntaxType::Path`, string literals and bare
words passed as an argument to that parameter are processed using the
path expansion rules. Right now, that only means that `~` is expanded into
the home directory, but additional rules are possible in the future.
By restricting this expansion to a syntactic expansion when passed as an
argument to a command expecting a path, we avoid making `~` a generally
reserved character. This will also allow us to give good tab completion
for paths with `~` characters in them when a command is expecting a path.
In order to accomplish the above, this commit changes the parsing functions
to take a `Context` instead of just a `CommandRegistry`. From the perspective
of macro expansion, you can think of the `CommandRegistry` as a dictionary
of in-scope macros, and the `Context` as the compile-time state used in
expansion. This could gain additional functionality over time as we find
more uses for the expansion system.
2019-08-26 19:21:03 +00:00
|
|
|
|
2019-08-02 19:15:07 +00:00
|
|
|
let response = serde_json::from_str::<JsonRpc<Result<Signature, ShellError>>>(&input);
|
2019-07-03 17:37:09 +00:00
|
|
|
match response {
|
|
|
|
Ok(jrpc) => match jrpc.params {
|
|
|
|
Ok(params) => {
|
2019-07-04 03:06:43 +00:00
|
|
|
let fname = path.to_string_lossy();
|
Add support for ~ expansion
This ended up being a bit of a yak shave. The basic idea in this commit is to
expand `~` in paths, but only in paths.
The way this is accomplished is by doing the expansion inside of the code that
parses literal syntax for `SyntaxType::Path`.
As a quick refresher: every command is entitled to expand its arguments in a
custom way. While this could in theory be used for general-purpose macros,
today the expansion facility is limited to syntactic hints.
For example, the syntax `where cpu > 0` expands under the hood to
`where { $it.cpu > 0 }`. This happens because the first argument to `where`
is defined as a `SyntaxType::Block`, and the parser coerces binary expressions
whose left-hand-side looks like a member into a block when the command is
expecting one.
This is mildly more magical than what most programming languages would do,
but we believe that it makes sense to allow commands to fine-tune the syntax
because of the domain nushell is in (command-line shells).
The syntactic expansions supported by this facility are relatively limited.
For example, we don't allow `$it` to become a bare word, simply because the
command asks for a string in the relevant position. That would quickly
become more confusing than it's worth.
This PR adds a new `SyntaxType` rule: `SyntaxType::Path`. When a command
declares a parameter as a `SyntaxType::Path`, string literals and bare
words passed as an argument to that parameter are processed using the
path expansion rules. Right now, that only means that `~` is expanded into
the home directory, but additional rules are possible in the future.
By restricting this expansion to a syntactic expansion when passed as an
argument to a command expecting a path, we avoid making `~` a generally
reserved character. This will also allow us to give good tab completion
for paths with `~` characters in them when a command is expecting a path.
In order to accomplish the above, this commit changes the parsing functions
to take a `Context` instead of just a `CommandRegistry`. From the perspective
of macro expansion, you can think of the `CommandRegistry` as a dictionary
of in-scope macros, and the `Context` as the compile-time state used in
expansion. This could gain additional functionality over time as we find
more uses for the expansion system.
2019-08-26 19:21:03 +00:00
|
|
|
|
|
|
|
trace!("processing {:?}", params);
|
|
|
|
|
2019-09-23 22:01:40 +00:00
|
|
|
let name = params.name.clone();
|
|
|
|
let fname = fname.to_string();
|
|
|
|
|
Overhaul the expansion system
The main thrust of this (very large) commit is an overhaul of the
expansion system.
The parsing pipeline is:
- Lightly parse the source file for atoms, basic delimiters and pipeline
structure into a token tree
- Expand the token tree into a HIR (high-level intermediate
representation) based upon the baseline syntax rules for expressions
and the syntactic shape of commands.
Somewhat non-traditionally, nu doesn't have an AST at all. It goes
directly from the token tree, which doesn't represent many important
distinctions (like the difference between `hello` and `5KB`) directly
into a high-level representation that doesn't have a direct
correspondence to the source code.
At a high level, nu commands work like macros, in the sense that the
syntactic shape of the invocation of a command depends on the
definition of a command.
However, commands do not have the ability to perform unrestricted
expansions of the token tree. Instead, they describe their arguments in
terms of syntactic shapes, and the expander expands the token tree into
HIR based upon that definition.
For example, the `where` command says that it takes a block as its first
required argument, and the description of the block syntactic shape
expands the syntax `cpu > 10` into HIR that represents
`{ $it.cpu > 10 }`.
This commit overhauls that system so that the syntactic shapes are
described in terms of a few new traits (`ExpandSyntax` and
`ExpandExpression` are the primary ones) that are more composable than
the previous system.
The first big win of this new system is the addition of the `ColumnPath`
shape, which looks like `cpu."max ghz"` or `package.version`.
Previously, while a variable path could look like `$it.cpu."max ghz"`,
the tail of a variable path could not be easily reused in other
contexts. Now, that tail is its own syntactic shape, and it can be used
as part of a command's signature.
This cleans up commands like `inc`, `add` and `edit` as well as
shorthand blocks, which can now look like `| where cpu."max ghz" > 10`
2019-09-17 22:26:27 +00:00
|
|
|
if let Some(_) = context.get_command(&name) {
|
2019-09-23 22:01:40 +00:00
|
|
|
trace!("plugin {:?} already loaded.", &name);
|
2019-07-03 17:37:09 +00:00
|
|
|
} else {
|
2019-09-23 22:01:40 +00:00
|
|
|
if params.is_filter {
|
|
|
|
context.add_commands(vec![whole_stream_command(
|
|
|
|
PluginCommand::new(name, fname, params),
|
|
|
|
)]);
|
|
|
|
} else {
|
|
|
|
context.add_commands(vec![whole_stream_command(PluginSink::new(
|
|
|
|
name, fname, params,
|
|
|
|
))]);
|
|
|
|
};
|
2019-07-03 17:37:09 +00:00
|
|
|
}
|
2019-09-23 22:01:40 +00:00
|
|
|
Ok(())
|
2019-07-03 17:37:09 +00:00
|
|
|
}
|
|
|
|
Err(e) => Err(e),
|
|
|
|
},
|
2019-09-23 22:27:18 +00:00
|
|
|
Err(e) => {
|
|
|
|
trace!("incompatible plugin {:?}", input);
|
Overhaul the coloring system
This commit replaces the previous naive coloring system with a coloring
system that is more aligned with the parser.
The main benefit of this change is that it allows us to use parsing
rules to decide how to color tokens.
For example, consider the following syntax:
```
$ ps | where cpu > 10
```
Ideally, we could color `cpu` like a column name and not a string,
because `cpu > 10` is a shorthand block syntax that expands to
`{ $it.cpu > 10 }`.
The way that we know that it's a shorthand block is that the `where`
command declares that its first parameter is a `SyntaxShape::Block`,
which allows the shorthand block form.
In order to accomplish this, we need to color the tokens in a way that
corresponds to their expanded semantics, which means that high-fidelity
coloring requires expansion.
This commit adds a `ColorSyntax` trait that corresponds to the
`ExpandExpression` trait. The semantics are fairly similar, with a few
differences.
First `ExpandExpression` consumes N tokens and returns a single
`hir::Expression`. `ColorSyntax` consumes N tokens and writes M
`FlatShape` tokens to the output.
Concretely, for syntax like `[1 2 3]`
- `ExpandExpression` takes a single token node and produces a single
`hir::Expression`
- `ColorSyntax` takes the same token node and emits 7 `FlatShape`s
(open delimiter, int, whitespace, int, whitespace, int, close
delimiter)
Second, `ColorSyntax` is more willing to plow through failures than
`ExpandExpression`.
In particular, consider syntax like
```
$ ps | where cpu >
```
In this case
- `ExpandExpression` will see that the `where` command is expecting a
block, see that it's not a literal block and try to parse it as a
shorthand block. It will successfully find a member followed by an
infix operator, but not a following expression. That means that the
entire pipeline part fails to parse and is a syntax error.
- `ColorSyntax` will also try to parse it as a shorthand block and
ultimately fail, but it will fall back to "backoff coloring mode",
which parsing any unidentified tokens in an unfallible, simple way. In
this case, `cpu` will color as a string and `>` will color as an
operator.
Finally, it's very important that coloring a pipeline infallibly colors
the entire string, doesn't fail, and doesn't get stuck in an infinite
loop.
In order to accomplish this, this PR separates `ColorSyntax`, which is
infallible from `FallibleColorSyntax`, which might fail. This allows the
type system to let us know if our coloring rules bottom out at at an
infallible rule.
It's not perfect: it's still possible for the coloring process to get
stuck or consume tokens non-atomically. I intend to reduce the
opportunity for those problems in a future commit. In the meantime, the
current system catches a number of mistakes (like trying to use a
fallible coloring rule in a loop without thinking about the possibility
that it will never terminate).
2019-10-06 20:22:50 +00:00
|
|
|
Err(ShellError::untagged_runtime_error(format!(
|
|
|
|
"Error: {:?}",
|
|
|
|
e
|
|
|
|
)))
|
2019-09-23 22:27:18 +00:00
|
|
|
}
|
2019-07-03 17:37:09 +00:00
|
|
|
}
|
|
|
|
}
|
Overhaul the coloring system
This commit replaces the previous naive coloring system with a coloring
system that is more aligned with the parser.
The main benefit of this change is that it allows us to use parsing
rules to decide how to color tokens.
For example, consider the following syntax:
```
$ ps | where cpu > 10
```
Ideally, we could color `cpu` like a column name and not a string,
because `cpu > 10` is a shorthand block syntax that expands to
`{ $it.cpu > 10 }`.
The way that we know that it's a shorthand block is that the `where`
command declares that its first parameter is a `SyntaxShape::Block`,
which allows the shorthand block form.
In order to accomplish this, we need to color the tokens in a way that
corresponds to their expanded semantics, which means that high-fidelity
coloring requires expansion.
This commit adds a `ColorSyntax` trait that corresponds to the
`ExpandExpression` trait. The semantics are fairly similar, with a few
differences.
First `ExpandExpression` consumes N tokens and returns a single
`hir::Expression`. `ColorSyntax` consumes N tokens and writes M
`FlatShape` tokens to the output.
Concretely, for syntax like `[1 2 3]`
- `ExpandExpression` takes a single token node and produces a single
`hir::Expression`
- `ColorSyntax` takes the same token node and emits 7 `FlatShape`s
(open delimiter, int, whitespace, int, whitespace, int, close
delimiter)
Second, `ColorSyntax` is more willing to plow through failures than
`ExpandExpression`.
In particular, consider syntax like
```
$ ps | where cpu >
```
In this case
- `ExpandExpression` will see that the `where` command is expecting a
block, see that it's not a literal block and try to parse it as a
shorthand block. It will successfully find a member followed by an
infix operator, but not a following expression. That means that the
entire pipeline part fails to parse and is a syntax error.
- `ColorSyntax` will also try to parse it as a shorthand block and
ultimately fail, but it will fall back to "backoff coloring mode",
which parsing any unidentified tokens in an unfallible, simple way. In
this case, `cpu` will color as a string and `>` will color as an
operator.
Finally, it's very important that coloring a pipeline infallibly colors
the entire string, doesn't fail, and doesn't get stuck in an infinite
loop.
In order to accomplish this, this PR separates `ColorSyntax`, which is
infallible from `FallibleColorSyntax`, which might fail. This allows the
type system to let us know if our coloring rules bottom out at at an
infallible rule.
It's not perfect: it's still possible for the coloring process to get
stuck or consume tokens non-atomically. I intend to reduce the
opportunity for those problems in a future commit. In the meantime, the
current system catches a number of mistakes (like trying to use a
fallible coloring rule in a loop without thinking about the possibility
that it will never terminate).
2019-10-06 20:22:50 +00:00
|
|
|
Err(e) => Err(ShellError::untagged_runtime_error(format!(
|
|
|
|
"Error: {:?}",
|
|
|
|
e
|
|
|
|
))),
|
2019-08-31 21:19:59 +00:00
|
|
|
};
|
|
|
|
|
2019-09-01 16:45:30 +00:00
|
|
|
let _ = child.wait();
|
|
|
|
|
2019-08-31 21:19:59 +00:00
|
|
|
result
|
2019-07-03 17:37:09 +00:00
|
|
|
}
|
|
|
|
|
2019-09-12 03:20:42 +00:00
|
|
|
fn search_paths() -> Vec<std::path::PathBuf> {
|
|
|
|
let mut search_paths = Vec::new();
|
2019-07-04 03:06:43 +00:00
|
|
|
|
2019-08-27 01:46:38 +00:00
|
|
|
#[cfg(debug_assertions)]
|
|
|
|
{
|
|
|
|
// Use our debug plugins in debug mode
|
|
|
|
let mut path = std::path::PathBuf::from(".");
|
|
|
|
path.push("target");
|
|
|
|
path.push("debug");
|
2019-09-12 23:49:29 +00:00
|
|
|
|
|
|
|
if path.exists() {
|
|
|
|
search_paths.push(path);
|
|
|
|
}
|
2019-08-27 01:46:38 +00:00
|
|
|
}
|
2019-07-03 17:37:09 +00:00
|
|
|
|
2019-08-27 01:46:38 +00:00
|
|
|
#[cfg(not(debug_assertions))]
|
|
|
|
{
|
2019-11-10 03:44:05 +00:00
|
|
|
use std::env;
|
|
|
|
|
|
|
|
match env::var_os("PATH") {
|
|
|
|
Some(paths) => {
|
|
|
|
search_paths = env::split_paths(&paths).collect::<Vec<_>>();
|
|
|
|
}
|
|
|
|
None => println!("PATH is not defined in the environment."),
|
|
|
|
}
|
|
|
|
|
2019-08-27 01:46:38 +00:00
|
|
|
// Use our release plugins in release mode
|
|
|
|
let mut path = std::path::PathBuf::from(".");
|
|
|
|
path.push("target");
|
|
|
|
path.push("release");
|
2019-09-12 23:49:29 +00:00
|
|
|
|
|
|
|
if path.exists() {
|
|
|
|
search_paths.push(path);
|
|
|
|
}
|
2019-09-12 03:20:42 +00:00
|
|
|
}
|
|
|
|
|
2019-09-12 23:49:29 +00:00
|
|
|
// permit Nu finding and picking up development plugins
|
|
|
|
// if there are any first.
|
|
|
|
search_paths.reverse();
|
2019-09-12 03:20:42 +00:00
|
|
|
search_paths
|
|
|
|
}
|
|
|
|
|
|
|
|
fn load_plugins(context: &mut Context) -> Result<(), ShellError> {
|
|
|
|
let opts = glob::MatchOptions {
|
|
|
|
case_sensitive: false,
|
|
|
|
require_literal_separator: false,
|
|
|
|
require_literal_leading_dot: false,
|
|
|
|
};
|
|
|
|
|
2019-11-02 03:41:58 +00:00
|
|
|
set_env_from_config();
|
2019-11-02 00:36:21 +00:00
|
|
|
|
2019-09-12 03:20:42 +00:00
|
|
|
for path in search_paths() {
|
2019-09-12 03:34:47 +00:00
|
|
|
let mut pattern = path.to_path_buf();
|
|
|
|
|
2019-09-12 07:11:38 +00:00
|
|
|
pattern.push(std::path::Path::new("nu_plugin_[a-z]*"));
|
2019-09-12 03:20:42 +00:00
|
|
|
|
|
|
|
match glob::glob_with(&pattern.to_string_lossy(), opts) {
|
|
|
|
Err(_) => {}
|
|
|
|
Ok(binaries) => {
|
|
|
|
for bin in binaries.filter_map(Result::ok) {
|
|
|
|
if !bin.is_file() {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2019-09-12 03:34:47 +00:00
|
|
|
let bin_name = {
|
|
|
|
if let Some(name) = bin.file_name() {
|
|
|
|
match name.to_str() {
|
|
|
|
Some(raw) => raw,
|
|
|
|
None => continue,
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
};
|
2019-09-12 03:20:42 +00:00
|
|
|
|
2019-09-12 07:11:38 +00:00
|
|
|
let is_valid_name = {
|
|
|
|
#[cfg(windows)]
|
|
|
|
{
|
|
|
|
bin_name
|
|
|
|
.chars()
|
|
|
|
.all(|c| c.is_ascii_alphabetic() || c == '_' || c == '.')
|
|
|
|
}
|
|
|
|
|
|
|
|
#[cfg(not(windows))]
|
|
|
|
{
|
|
|
|
bin_name
|
|
|
|
.chars()
|
|
|
|
.all(|c| c.is_ascii_alphabetic() || c == '_')
|
|
|
|
}
|
|
|
|
};
|
2019-09-12 03:20:42 +00:00
|
|
|
|
|
|
|
let is_executable = {
|
|
|
|
#[cfg(windows)]
|
|
|
|
{
|
2019-09-12 07:11:38 +00:00
|
|
|
bin_name.ends_with(".exe") || bin_name.ends_with(".bat")
|
2019-09-12 03:20:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#[cfg(not(windows))]
|
|
|
|
{
|
|
|
|
true
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
if is_valid_name && is_executable {
|
2019-09-12 03:34:47 +00:00
|
|
|
trace!("Trying {:?}", bin.display());
|
2019-09-23 22:27:18 +00:00
|
|
|
|
|
|
|
// we are ok if this plugin load fails
|
|
|
|
let _ = load_plugin(&bin, context);
|
2019-09-12 03:20:42 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2019-08-27 01:46:38 +00:00
|
|
|
}
|
2019-07-04 22:17:18 +00:00
|
|
|
|
2019-07-03 17:37:09 +00:00
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2019-09-19 21:10:29 +00:00
|
|
|
pub struct History;
|
2019-09-19 20:28:48 +00:00
|
|
|
|
|
|
|
impl History {
|
|
|
|
pub fn path() -> PathBuf {
|
|
|
|
const FNAME: &str = "history.txt";
|
|
|
|
config::user_data()
|
|
|
|
.map(|mut p| {
|
|
|
|
p.push(FNAME);
|
|
|
|
p
|
|
|
|
})
|
|
|
|
.unwrap_or(PathBuf::from(FNAME))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-06-03 03:48:58 +00:00
|
|
|
pub async fn cli() -> Result<(), Box<dyn Error>> {
|
2019-05-23 04:30:43 +00:00
|
|
|
let mut context = Context::basic()?;
|
|
|
|
|
|
|
|
{
|
|
|
|
use crate::commands::*;
|
|
|
|
|
|
|
|
context.add_commands(vec![
|
2019-09-07 15:49:15 +00:00
|
|
|
whole_stream_command(PWD),
|
2019-08-19 05:16:39 +00:00
|
|
|
whole_stream_command(LS),
|
|
|
|
whole_stream_command(CD),
|
|
|
|
whole_stream_command(Size),
|
|
|
|
whole_stream_command(Nth),
|
|
|
|
whole_stream_command(Next),
|
|
|
|
whole_stream_command(Previous),
|
|
|
|
whole_stream_command(Shells),
|
|
|
|
whole_stream_command(SplitColumn),
|
|
|
|
whole_stream_command(SplitRow),
|
|
|
|
whole_stream_command(Lines),
|
|
|
|
whole_stream_command(Reject),
|
2019-08-25 16:14:17 +00:00
|
|
|
whole_stream_command(Reverse),
|
2019-10-30 06:54:06 +00:00
|
|
|
whole_stream_command(Append),
|
|
|
|
whole_stream_command(Prepend),
|
2019-08-19 05:16:39 +00:00
|
|
|
whole_stream_command(Trim),
|
2019-08-26 14:16:34 +00:00
|
|
|
whole_stream_command(ToBSON),
|
2019-08-19 05:16:39 +00:00
|
|
|
whole_stream_command(ToCSV),
|
|
|
|
whole_stream_command(ToJSON),
|
2019-08-27 21:45:18 +00:00
|
|
|
whole_stream_command(ToSQLite),
|
2019-08-31 01:30:41 +00:00
|
|
|
whole_stream_command(ToDB),
|
2019-08-19 05:16:39 +00:00
|
|
|
whole_stream_command(ToTOML),
|
2019-08-29 09:02:16 +00:00
|
|
|
whole_stream_command(ToTSV),
|
2019-09-19 04:25:29 +00:00
|
|
|
whole_stream_command(ToURL),
|
2019-08-19 05:16:39 +00:00
|
|
|
whole_stream_command(ToYAML),
|
|
|
|
whole_stream_command(SortBy),
|
2019-10-20 23:42:07 +00:00
|
|
|
whole_stream_command(GroupBy),
|
2019-08-19 05:16:39 +00:00
|
|
|
whole_stream_command(Tags),
|
2019-10-15 10:19:06 +00:00
|
|
|
whole_stream_command(Count),
|
2019-08-19 05:16:39 +00:00
|
|
|
whole_stream_command(First),
|
2019-08-24 18:32:48 +00:00
|
|
|
whole_stream_command(Last),
|
2019-09-16 07:52:58 +00:00
|
|
|
whole_stream_command(Env),
|
2019-08-19 05:16:39 +00:00
|
|
|
whole_stream_command(FromCSV),
|
2019-08-29 09:02:16 +00:00
|
|
|
whole_stream_command(FromTSV),
|
2019-10-13 20:50:45 +00:00
|
|
|
whole_stream_command(FromSSV),
|
2019-08-19 05:16:39 +00:00
|
|
|
whole_stream_command(FromINI),
|
2019-08-24 18:19:22 +00:00
|
|
|
whole_stream_command(FromBSON),
|
2019-08-19 05:16:39 +00:00
|
|
|
whole_stream_command(FromJSON),
|
2019-08-31 01:30:41 +00:00
|
|
|
whole_stream_command(FromDB),
|
2019-08-27 21:45:18 +00:00
|
|
|
whole_stream_command(FromSQLite),
|
2019-08-19 05:16:39 +00:00
|
|
|
whole_stream_command(FromTOML),
|
2019-09-19 04:25:29 +00:00
|
|
|
whole_stream_command(FromURL),
|
2019-11-17 03:18:41 +00:00
|
|
|
whole_stream_command(FromXLSX),
|
2019-08-19 05:16:39 +00:00
|
|
|
whole_stream_command(FromXML),
|
|
|
|
whole_stream_command(FromYAML),
|
2019-08-29 03:53:45 +00:00
|
|
|
whole_stream_command(FromYML),
|
2019-08-19 05:16:39 +00:00
|
|
|
whole_stream_command(Pick),
|
2019-08-15 05:02:02 +00:00
|
|
|
whole_stream_command(Get),
|
2019-11-12 08:38:55 +00:00
|
|
|
whole_stream_command(Histogram),
|
2019-08-15 05:02:02 +00:00
|
|
|
per_item_command(Remove),
|
2019-09-03 06:04:46 +00:00
|
|
|
per_item_command(Fetch),
|
2019-08-15 05:02:02 +00:00
|
|
|
per_item_command(Open),
|
2019-08-30 18:27:15 +00:00
|
|
|
per_item_command(Post),
|
2019-08-14 17:02:39 +00:00
|
|
|
per_item_command(Where),
|
2019-09-07 23:43:53 +00:00
|
|
|
per_item_command(Echo),
|
2019-08-15 05:02:02 +00:00
|
|
|
whole_stream_command(Config),
|
2019-11-23 23:57:12 +00:00
|
|
|
whole_stream_command(Compact),
|
2019-11-24 09:20:08 +00:00
|
|
|
whole_stream_command(Default),
|
2019-08-15 05:02:02 +00:00
|
|
|
whole_stream_command(SkipWhile),
|
2019-08-14 17:02:39 +00:00
|
|
|
per_item_command(Enter),
|
2019-08-29 22:52:32 +00:00
|
|
|
per_item_command(Help),
|
2019-10-27 16:58:39 +00:00
|
|
|
per_item_command(History),
|
2019-08-15 05:02:02 +00:00
|
|
|
whole_stream_command(Exit),
|
|
|
|
whole_stream_command(Autoview),
|
2019-09-17 07:07:11 +00:00
|
|
|
whole_stream_command(Pivot),
|
2019-08-15 05:02:02 +00:00
|
|
|
per_item_command(Cpy),
|
|
|
|
whole_stream_command(Date),
|
|
|
|
per_item_command(Mkdir),
|
|
|
|
per_item_command(Move),
|
|
|
|
whole_stream_command(Save),
|
2019-11-12 08:38:55 +00:00
|
|
|
whole_stream_command(SplitBy),
|
2019-08-15 05:02:02 +00:00
|
|
|
whole_stream_command(Table),
|
2019-08-19 01:30:29 +00:00
|
|
|
whole_stream_command(Version),
|
2019-11-04 15:47:03 +00:00
|
|
|
whole_stream_command(What),
|
2019-08-15 05:02:02 +00:00
|
|
|
whole_stream_command(Which),
|
2019-11-22 08:31:58 +00:00
|
|
|
whole_stream_command(Debug),
|
2019-12-02 19:15:14 +00:00
|
|
|
whole_stream_command(Range),
|
2019-06-07 07:50:26 +00:00
|
|
|
]);
|
2019-08-23 03:29:08 +00:00
|
|
|
|
2019-11-04 01:55:34 +00:00
|
|
|
cfg_if::cfg_if! {
|
|
|
|
if #[cfg(data_processing_primitives)] {
|
|
|
|
context.add_commands(vec![
|
|
|
|
whole_stream_command(ReduceBy),
|
2019-11-12 07:07:43 +00:00
|
|
|
whole_stream_command(EvaluateBy),
|
|
|
|
whole_stream_command(TSortBy),
|
|
|
|
whole_stream_command(MapMaxBy),
|
2019-11-04 01:55:34 +00:00
|
|
|
]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-08-23 03:29:08 +00:00
|
|
|
#[cfg(feature = "clipboard")]
|
|
|
|
{
|
|
|
|
context.add_commands(vec![whole_stream_command(
|
|
|
|
crate::commands::clip::clipboard::Clip,
|
|
|
|
)]);
|
|
|
|
}
|
2019-05-23 04:30:43 +00:00
|
|
|
}
|
Overhaul the coloring system
This commit replaces the previous naive coloring system with a coloring
system that is more aligned with the parser.
The main benefit of this change is that it allows us to use parsing
rules to decide how to color tokens.
For example, consider the following syntax:
```
$ ps | where cpu > 10
```
Ideally, we could color `cpu` like a column name and not a string,
because `cpu > 10` is a shorthand block syntax that expands to
`{ $it.cpu > 10 }`.
The way that we know that it's a shorthand block is that the `where`
command declares that its first parameter is a `SyntaxShape::Block`,
which allows the shorthand block form.
In order to accomplish this, we need to color the tokens in a way that
corresponds to their expanded semantics, which means that high-fidelity
coloring requires expansion.
This commit adds a `ColorSyntax` trait that corresponds to the
`ExpandExpression` trait. The semantics are fairly similar, with a few
differences.
First `ExpandExpression` consumes N tokens and returns a single
`hir::Expression`. `ColorSyntax` consumes N tokens and writes M
`FlatShape` tokens to the output.
Concretely, for syntax like `[1 2 3]`
- `ExpandExpression` takes a single token node and produces a single
`hir::Expression`
- `ColorSyntax` takes the same token node and emits 7 `FlatShape`s
(open delimiter, int, whitespace, int, whitespace, int, close
delimiter)
Second, `ColorSyntax` is more willing to plow through failures than
`ExpandExpression`.
In particular, consider syntax like
```
$ ps | where cpu >
```
In this case
- `ExpandExpression` will see that the `where` command is expecting a
block, see that it's not a literal block and try to parse it as a
shorthand block. It will successfully find a member followed by an
infix operator, but not a following expression. That means that the
entire pipeline part fails to parse and is a syntax error.
- `ColorSyntax` will also try to parse it as a shorthand block and
ultimately fail, but it will fall back to "backoff coloring mode",
which parsing any unidentified tokens in an unfallible, simple way. In
this case, `cpu` will color as a string and `>` will color as an
operator.
Finally, it's very important that coloring a pipeline infallibly colors
the entire string, doesn't fail, and doesn't get stuck in an infinite
loop.
In order to accomplish this, this PR separates `ColorSyntax`, which is
infallible from `FallibleColorSyntax`, which might fail. This allows the
type system to let us know if our coloring rules bottom out at at an
infallible rule.
It's not perfect: it's still possible for the coloring process to get
stuck or consume tokens non-atomically. I intend to reduce the
opportunity for those problems in a future commit. In the meantime, the
current system catches a number of mistakes (like trying to use a
fallible coloring rule in a loop without thinking about the possibility
that it will never terminate).
2019-10-06 20:22:50 +00:00
|
|
|
|
2019-07-04 03:06:43 +00:00
|
|
|
let _ = load_plugins(&mut context);
|
2019-05-23 04:30:43 +00:00
|
|
|
|
2019-05-26 06:54:41 +00:00
|
|
|
let config = Config::builder().color_mode(ColorMode::Forced).build();
|
2019-08-07 17:49:11 +00:00
|
|
|
let mut rl: Editor<_> = Editor::with_config(config);
|
2019-05-26 06:54:41 +00:00
|
|
|
|
|
|
|
#[cfg(windows)]
|
|
|
|
{
|
|
|
|
let _ = ansi_term::enable_ansi_support();
|
|
|
|
}
|
|
|
|
|
2019-08-26 22:41:57 +00:00
|
|
|
// we are ok if history does not exist
|
2019-09-19 20:28:48 +00:00
|
|
|
let _ = rl.load_history(&History::path());
|
2019-05-26 06:54:41 +00:00
|
|
|
|
2019-10-13 04:12:43 +00:00
|
|
|
let cc = context.ctrl_c.clone();
|
2019-06-07 00:31:22 +00:00
|
|
|
ctrlc::set_handler(move || {
|
|
|
|
cc.store(true, Ordering::SeqCst);
|
|
|
|
})
|
|
|
|
.expect("Error setting Ctrl-C handler");
|
2019-06-15 18:36:17 +00:00
|
|
|
let mut ctrlcbreak = false;
|
2019-05-23 04:30:43 +00:00
|
|
|
loop {
|
2019-10-13 04:12:43 +00:00
|
|
|
if context.ctrl_c.load(Ordering::SeqCst) {
|
|
|
|
context.ctrl_c.store(false, Ordering::SeqCst);
|
2019-06-07 00:31:22 +00:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2019-08-07 17:49:11 +00:00
|
|
|
let cwd = context.shell_manager.path();
|
|
|
|
|
Overhaul the coloring system
This commit replaces the previous naive coloring system with a coloring
system that is more aligned with the parser.
The main benefit of this change is that it allows us to use parsing
rules to decide how to color tokens.
For example, consider the following syntax:
```
$ ps | where cpu > 10
```
Ideally, we could color `cpu` like a column name and not a string,
because `cpu > 10` is a shorthand block syntax that expands to
`{ $it.cpu > 10 }`.
The way that we know that it's a shorthand block is that the `where`
command declares that its first parameter is a `SyntaxShape::Block`,
which allows the shorthand block form.
In order to accomplish this, we need to color the tokens in a way that
corresponds to their expanded semantics, which means that high-fidelity
coloring requires expansion.
This commit adds a `ColorSyntax` trait that corresponds to the
`ExpandExpression` trait. The semantics are fairly similar, with a few
differences.
First `ExpandExpression` consumes N tokens and returns a single
`hir::Expression`. `ColorSyntax` consumes N tokens and writes M
`FlatShape` tokens to the output.
Concretely, for syntax like `[1 2 3]`
- `ExpandExpression` takes a single token node and produces a single
`hir::Expression`
- `ColorSyntax` takes the same token node and emits 7 `FlatShape`s
(open delimiter, int, whitespace, int, whitespace, int, close
delimiter)
Second, `ColorSyntax` is more willing to plow through failures than
`ExpandExpression`.
In particular, consider syntax like
```
$ ps | where cpu >
```
In this case
- `ExpandExpression` will see that the `where` command is expecting a
block, see that it's not a literal block and try to parse it as a
shorthand block. It will successfully find a member followed by an
infix operator, but not a following expression. That means that the
entire pipeline part fails to parse and is a syntax error.
- `ColorSyntax` will also try to parse it as a shorthand block and
ultimately fail, but it will fall back to "backoff coloring mode",
which parsing any unidentified tokens in an unfallible, simple way. In
this case, `cpu` will color as a string and `>` will color as an
operator.
Finally, it's very important that coloring a pipeline infallibly colors
the entire string, doesn't fail, and doesn't get stuck in an infinite
loop.
In order to accomplish this, this PR separates `ColorSyntax`, which is
infallible from `FallibleColorSyntax`, which might fail. This allows the
type system to let us know if our coloring rules bottom out at at an
infallible rule.
It's not perfect: it's still possible for the coloring process to get
stuck or consume tokens non-atomically. I intend to reduce the
opportunity for those problems in a future commit. In the meantime, the
current system catches a number of mistakes (like trying to use a
fallible coloring rule in a loop without thinking about the possibility
that it will never terminate).
2019-10-06 20:22:50 +00:00
|
|
|
rl.set_helper(Some(crate::shell::Helper::new(context.clone())));
|
2019-08-07 17:49:11 +00:00
|
|
|
|
2019-09-19 20:28:48 +00:00
|
|
|
let edit_mode = config::config(Tag::unknown())?
|
2019-08-25 05:12:23 +00:00
|
|
|
.get("edit_mode")
|
2019-11-21 14:33:14 +00:00
|
|
|
.map(|s| match s.value.expect_string() {
|
2019-08-25 05:12:23 +00:00
|
|
|
"vi" => EditMode::Vi,
|
|
|
|
"emacs" => EditMode::Emacs,
|
|
|
|
_ => EditMode::Emacs,
|
|
|
|
})
|
|
|
|
.unwrap_or(EditMode::Emacs);
|
|
|
|
|
|
|
|
rl.set_edit_mode(edit_mode);
|
2019-08-07 17:49:11 +00:00
|
|
|
|
2019-11-16 20:02:26 +00:00
|
|
|
let colored_prompt = {
|
|
|
|
#[cfg(feature = "starship-prompt")]
|
|
|
|
{
|
2019-11-29 19:38:44 +00:00
|
|
|
std::env::set_var("STARSHIP_SHELL", "");
|
2019-11-16 20:02:26 +00:00
|
|
|
starship::print::get_prompt(starship::context::Context::new_with_dir(
|
2019-10-08 13:47:30 +00:00
|
|
|
clap::ArgMatches::default(),
|
|
|
|
cwd,
|
|
|
|
))
|
2019-07-16 19:10:25 +00:00
|
|
|
}
|
2019-10-08 13:47:30 +00:00
|
|
|
#[cfg(not(feature = "starship-prompt"))]
|
|
|
|
{
|
2019-11-16 20:02:26 +00:00
|
|
|
format!(
|
2019-11-16 20:42:35 +00:00
|
|
|
"\x1b[32m{}{}\x1b[m> ",
|
2019-10-08 13:47:30 +00:00
|
|
|
cwd,
|
|
|
|
match current_branch() {
|
|
|
|
Some(s) => format!("({})", s),
|
|
|
|
None => "".to_string(),
|
|
|
|
}
|
|
|
|
)
|
|
|
|
}
|
|
|
|
};
|
2019-11-16 20:02:26 +00:00
|
|
|
|
2019-11-16 20:42:35 +00:00
|
|
|
let prompt = {
|
|
|
|
let bytes = strip_ansi_escapes::strip(&colored_prompt).unwrap();
|
|
|
|
|
|
|
|
String::from_utf8_lossy(&bytes).to_string()
|
|
|
|
};
|
|
|
|
|
2019-11-16 20:02:26 +00:00
|
|
|
rl.helper_mut().expect("No helper").colored_prompt = colored_prompt;
|
2019-09-17 22:21:39 +00:00
|
|
|
let mut initial_command = Some(String::new());
|
|
|
|
let mut readline = Err(ReadlineError::Eof);
|
|
|
|
while let Some(ref cmd) = initial_command {
|
2019-11-16 20:02:26 +00:00
|
|
|
readline = rl.readline_with_initial(&prompt, (&cmd, ""));
|
2019-11-22 08:25:09 +00:00
|
|
|
initial_command = None;
|
2019-09-17 22:21:39 +00:00
|
|
|
}
|
2019-05-23 04:30:43 +00:00
|
|
|
|
2019-11-04 15:47:03 +00:00
|
|
|
let line = process_line(readline, &mut context).await;
|
|
|
|
|
|
|
|
match line {
|
2019-05-23 04:30:43 +00:00
|
|
|
LineResult::Success(line) => {
|
|
|
|
rl.add_history_entry(line.clone());
|
2019-10-27 16:58:39 +00:00
|
|
|
let _ = rl.save_history(&History::path());
|
2019-11-04 15:47:03 +00:00
|
|
|
context.maybe_print_errors(Text::from(line));
|
|
|
|
}
|
|
|
|
|
|
|
|
LineResult::Error(line, err) => {
|
|
|
|
rl.add_history_entry(line.clone());
|
|
|
|
let _ = rl.save_history(&History::path());
|
|
|
|
|
|
|
|
context.with_host(|host| {
|
|
|
|
print_err(err, host, &Text::from(line.clone()));
|
|
|
|
});
|
|
|
|
|
|
|
|
context.maybe_print_errors(Text::from(line.clone()));
|
2019-05-23 04:30:43 +00:00
|
|
|
}
|
|
|
|
|
2019-06-15 18:36:17 +00:00
|
|
|
LineResult::CtrlC => {
|
2019-09-25 01:01:38 +00:00
|
|
|
let config_ctrlc_exit = config::config(Tag::unknown())?
|
|
|
|
.get("ctrlc_exit")
|
2019-11-21 14:33:14 +00:00
|
|
|
.map(|s| match s.value.expect_string() {
|
2019-09-25 01:01:38 +00:00
|
|
|
"true" => true,
|
|
|
|
_ => false,
|
|
|
|
})
|
|
|
|
.unwrap_or(false); // default behavior is to allow CTRL-C spamming similar to other shells
|
|
|
|
|
|
|
|
if !config_ctrlc_exit {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2019-06-15 18:36:17 +00:00
|
|
|
if ctrlcbreak {
|
2019-09-19 20:55:53 +00:00
|
|
|
let _ = rl.save_history(&History::path());
|
2019-06-15 18:36:17 +00:00
|
|
|
std::process::exit(0);
|
|
|
|
} else {
|
2019-08-28 17:01:16 +00:00
|
|
|
context.with_host(|host| host.stdout("CTRL-C pressed (again to quit)"));
|
2019-06-15 18:36:17 +00:00
|
|
|
ctrlcbreak = true;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-23 04:30:43 +00:00
|
|
|
LineResult::Break => {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2019-06-15 18:36:17 +00:00
|
|
|
ctrlcbreak = false;
|
2019-05-23 04:30:43 +00:00
|
|
|
}
|
2019-08-26 22:41:57 +00:00
|
|
|
|
|
|
|
// we are ok if we can not save history
|
2019-09-19 20:28:48 +00:00
|
|
|
let _ = rl.save_history(&History::path());
|
2019-05-23 04:30:43 +00:00
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
Overhaul the expansion system
The main thrust of this (very large) commit is an overhaul of the
expansion system.
The parsing pipeline is:
- Lightly parse the source file for atoms, basic delimiters and pipeline
structure into a token tree
- Expand the token tree into a HIR (high-level intermediate
representation) based upon the baseline syntax rules for expressions
and the syntactic shape of commands.
Somewhat non-traditionally, nu doesn't have an AST at all. It goes
directly from the token tree, which doesn't represent many important
distinctions (like the difference between `hello` and `5KB`) directly
into a high-level representation that doesn't have a direct
correspondence to the source code.
At a high level, nu commands work like macros, in the sense that the
syntactic shape of the invocation of a command depends on the
definition of a command.
However, commands do not have the ability to perform unrestricted
expansions of the token tree. Instead, they describe their arguments in
terms of syntactic shapes, and the expander expands the token tree into
HIR based upon that definition.
For example, the `where` command says that it takes a block as its first
required argument, and the description of the block syntactic shape
expands the syntax `cpu > 10` into HIR that represents
`{ $it.cpu > 10 }`.
This commit overhauls that system so that the syntactic shapes are
described in terms of a few new traits (`ExpandSyntax` and
`ExpandExpression` are the primary ones) that are more composable than
the previous system.
The first big win of this new system is the addition of the `ColumnPath`
shape, which looks like `cpu."max ghz"` or `package.version`.
Previously, while a variable path could look like `$it.cpu."max ghz"`,
the tail of a variable path could not be easily reused in other
contexts. Now, that tail is its own syntactic shape, and it can be used
as part of a command's signature.
This cleans up commands like `inc`, `add` and `edit` as well as
shorthand blocks, which can now look like `| where cpu."max ghz" > 10`
2019-09-17 22:26:27 +00:00
|
|
|
fn chomp_newline(s: &str) -> &str {
|
|
|
|
if s.ends_with('\n') {
|
|
|
|
&s[..s.len() - 1]
|
|
|
|
} else {
|
|
|
|
s
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-02 03:41:58 +00:00
|
|
|
fn set_env_from_config() {
|
2019-11-02 00:36:21 +00:00
|
|
|
let config = crate::data::config::read(Tag::unknown(), &None).unwrap();
|
2019-11-02 03:41:58 +00:00
|
|
|
|
|
|
|
if config.contains_key("env") {
|
|
|
|
// Clear the existing vars, we're about to replace them
|
|
|
|
for (key, _value) in std::env::vars() {
|
|
|
|
std::env::remove_var(key);
|
|
|
|
}
|
|
|
|
|
|
|
|
let value = config.get("env");
|
|
|
|
|
|
|
|
match value {
|
2019-11-21 14:33:14 +00:00
|
|
|
Some(Value {
|
|
|
|
value: UntaggedValue::Row(r),
|
2019-11-02 03:41:58 +00:00
|
|
|
..
|
|
|
|
}) => {
|
|
|
|
for (k, v) in &r.entries {
|
|
|
|
match v.as_string() {
|
|
|
|
Ok(value_string) => {
|
|
|
|
std::env::set_var(k, value_string);
|
|
|
|
}
|
|
|
|
_ => {}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
_ => {}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-02 00:36:21 +00:00
|
|
|
if config.contains_key("path") {
|
|
|
|
// Override the path with what they give us from config
|
|
|
|
let value = config.get("path");
|
|
|
|
|
|
|
|
match value {
|
|
|
|
Some(value) => match value {
|
2019-11-21 14:33:14 +00:00
|
|
|
Value {
|
|
|
|
value: UntaggedValue::Table(table),
|
2019-11-02 00:36:21 +00:00
|
|
|
..
|
|
|
|
} => {
|
|
|
|
let mut paths = vec![];
|
|
|
|
for val in table {
|
|
|
|
let path_str = val.as_string();
|
|
|
|
match path_str {
|
|
|
|
Err(_) => {}
|
|
|
|
Ok(path_str) => {
|
|
|
|
paths.push(PathBuf::from(path_str));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
let path_os_string = std::env::join_paths(&paths);
|
|
|
|
match path_os_string {
|
|
|
|
Ok(path_os_string) => {
|
|
|
|
std::env::set_var("PATH", path_os_string);
|
|
|
|
}
|
|
|
|
Err(_) => {}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
_ => {}
|
|
|
|
},
|
|
|
|
None => {}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-23 04:30:43 +00:00
|
|
|
enum LineResult {
|
|
|
|
Success(String),
|
2019-06-07 22:35:07 +00:00
|
|
|
Error(String, ShellError),
|
2019-06-15 18:36:17 +00:00
|
|
|
CtrlC,
|
2019-05-23 04:30:43 +00:00
|
|
|
Break,
|
|
|
|
}
|
|
|
|
|
2019-05-23 07:23:06 +00:00
|
|
|
async fn process_line(readline: Result<String, ReadlineError>, ctx: &mut Context) -> LineResult {
|
2019-05-23 04:30:43 +00:00
|
|
|
match &readline {
|
|
|
|
Ok(line) if line.trim() == "" => LineResult::Success(line.clone()),
|
|
|
|
|
|
|
|
Ok(line) => {
|
Overhaul the expansion system
The main thrust of this (very large) commit is an overhaul of the
expansion system.
The parsing pipeline is:
- Lightly parse the source file for atoms, basic delimiters and pipeline
structure into a token tree
- Expand the token tree into a HIR (high-level intermediate
representation) based upon the baseline syntax rules for expressions
and the syntactic shape of commands.
Somewhat non-traditionally, nu doesn't have an AST at all. It goes
directly from the token tree, which doesn't represent many important
distinctions (like the difference between `hello` and `5KB`) directly
into a high-level representation that doesn't have a direct
correspondence to the source code.
At a high level, nu commands work like macros, in the sense that the
syntactic shape of the invocation of a command depends on the
definition of a command.
However, commands do not have the ability to perform unrestricted
expansions of the token tree. Instead, they describe their arguments in
terms of syntactic shapes, and the expander expands the token tree into
HIR based upon that definition.
For example, the `where` command says that it takes a block as its first
required argument, and the description of the block syntactic shape
expands the syntax `cpu > 10` into HIR that represents
`{ $it.cpu > 10 }`.
This commit overhauls that system so that the syntactic shapes are
described in terms of a few new traits (`ExpandSyntax` and
`ExpandExpression` are the primary ones) that are more composable than
the previous system.
The first big win of this new system is the addition of the `ColumnPath`
shape, which looks like `cpu."max ghz"` or `package.version`.
Previously, while a variable path could look like `$it.cpu."max ghz"`,
the tail of a variable path could not be easily reused in other
contexts. Now, that tail is its own syntactic shape, and it can be used
as part of a command's signature.
This cleans up commands like `inc`, `add` and `edit` as well as
shorthand blocks, which can now look like `| where cpu."max ghz" > 10`
2019-09-17 22:26:27 +00:00
|
|
|
let line = chomp_newline(line);
|
|
|
|
|
Extract core stuff into own crates
This commit extracts five new crates:
- nu-source, which contains the core source-code handling logic in Nu,
including Text, Span, and also the pretty.rs-based debug logic
- nu-parser, which is the parser and expander logic
- nu-protocol, which is the bulk of the types and basic conveniences
used by plugins
- nu-errors, which contains ShellError, ParseError and error handling
conveniences
- nu-textview, which is the textview plugin extracted into a crate
One of the major consequences of this refactor is that it's no longer
possible to `impl X for Spanned<Y>` outside of the `nu-source` crate, so
a lot of types became more concrete (Value became a concrete type
instead of Spanned<Value>, for example).
This also turned a number of inherent methods in the main nu crate into
plain functions (impl Value {} became a bunch of functions in the
`value` namespace in `crate::data::value`).
2019-11-26 02:30:48 +00:00
|
|
|
let result = match nu_parser::parse(&line) {
|
2019-05-23 04:30:43 +00:00
|
|
|
Err(err) => {
|
Overhaul the expansion system
The main thrust of this (very large) commit is an overhaul of the
expansion system.
The parsing pipeline is:
- Lightly parse the source file for atoms, basic delimiters and pipeline
structure into a token tree
- Expand the token tree into a HIR (high-level intermediate
representation) based upon the baseline syntax rules for expressions
and the syntactic shape of commands.
Somewhat non-traditionally, nu doesn't have an AST at all. It goes
directly from the token tree, which doesn't represent many important
distinctions (like the difference between `hello` and `5KB`) directly
into a high-level representation that doesn't have a direct
correspondence to the source code.
At a high level, nu commands work like macros, in the sense that the
syntactic shape of the invocation of a command depends on the
definition of a command.
However, commands do not have the ability to perform unrestricted
expansions of the token tree. Instead, they describe their arguments in
terms of syntactic shapes, and the expander expands the token tree into
HIR based upon that definition.
For example, the `where` command says that it takes a block as its first
required argument, and the description of the block syntactic shape
expands the syntax `cpu > 10` into HIR that represents
`{ $it.cpu > 10 }`.
This commit overhauls that system so that the syntactic shapes are
described in terms of a few new traits (`ExpandSyntax` and
`ExpandExpression` are the primary ones) that are more composable than
the previous system.
The first big win of this new system is the addition of the `ColumnPath`
shape, which looks like `cpu."max ghz"` or `package.version`.
Previously, while a variable path could look like `$it.cpu."max ghz"`,
the tail of a variable path could not be easily reused in other
contexts. Now, that tail is its own syntactic shape, and it can be used
as part of a command's signature.
This cleans up commands like `inc`, `add` and `edit` as well as
shorthand blocks, which can now look like `| where cpu."max ghz" > 10`
2019-09-17 22:26:27 +00:00
|
|
|
return LineResult::Error(line.to_string(), err);
|
2019-05-23 04:30:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
Ok(val) => val,
|
|
|
|
};
|
|
|
|
|
2019-05-26 06:54:41 +00:00
|
|
|
debug!("=== Parsed ===");
|
|
|
|
debug!("{:#?}", result);
|
2019-05-24 07:29:16 +00:00
|
|
|
|
2019-08-29 01:12:10 +00:00
|
|
|
let mut pipeline = match classify_pipeline(&result, ctx, &Text::from(line)) {
|
|
|
|
Ok(pipeline) => pipeline,
|
Overhaul the expansion system
The main thrust of this (very large) commit is an overhaul of the
expansion system.
The parsing pipeline is:
- Lightly parse the source file for atoms, basic delimiters and pipeline
structure into a token tree
- Expand the token tree into a HIR (high-level intermediate
representation) based upon the baseline syntax rules for expressions
and the syntactic shape of commands.
Somewhat non-traditionally, nu doesn't have an AST at all. It goes
directly from the token tree, which doesn't represent many important
distinctions (like the difference between `hello` and `5KB`) directly
into a high-level representation that doesn't have a direct
correspondence to the source code.
At a high level, nu commands work like macros, in the sense that the
syntactic shape of the invocation of a command depends on the
definition of a command.
However, commands do not have the ability to perform unrestricted
expansions of the token tree. Instead, they describe their arguments in
terms of syntactic shapes, and the expander expands the token tree into
HIR based upon that definition.
For example, the `where` command says that it takes a block as its first
required argument, and the description of the block syntactic shape
expands the syntax `cpu > 10` into HIR that represents
`{ $it.cpu > 10 }`.
This commit overhauls that system so that the syntactic shapes are
described in terms of a few new traits (`ExpandSyntax` and
`ExpandExpression` are the primary ones) that are more composable than
the previous system.
The first big win of this new system is the addition of the `ColumnPath`
shape, which looks like `cpu."max ghz"` or `package.version`.
Previously, while a variable path could look like `$it.cpu."max ghz"`,
the tail of a variable path could not be easily reused in other
contexts. Now, that tail is its own syntactic shape, and it can be used
as part of a command's signature.
This cleans up commands like `inc`, `add` and `edit` as well as
shorthand blocks, which can now look like `| where cpu."max ghz" > 10`
2019-09-17 22:26:27 +00:00
|
|
|
Err(err) => return LineResult::Error(line.to_string(), err),
|
2019-08-29 01:12:10 +00:00
|
|
|
};
|
2019-06-07 06:34:42 +00:00
|
|
|
|
2019-11-21 14:33:14 +00:00
|
|
|
match pipeline.commands.list.last() {
|
2019-06-07 07:54:52 +00:00
|
|
|
Some(ClassifiedCommand::External(_)) => {}
|
2019-08-02 19:15:07 +00:00
|
|
|
_ => pipeline
|
|
|
|
.commands
|
2019-11-21 14:33:14 +00:00
|
|
|
.list
|
2019-08-02 19:15:07 +00:00
|
|
|
.push(ClassifiedCommand::Internal(InternalCommand {
|
Overhaul the expansion system
The main thrust of this (very large) commit is an overhaul of the
expansion system.
The parsing pipeline is:
- Lightly parse the source file for atoms, basic delimiters and pipeline
structure into a token tree
- Expand the token tree into a HIR (high-level intermediate
representation) based upon the baseline syntax rules for expressions
and the syntactic shape of commands.
Somewhat non-traditionally, nu doesn't have an AST at all. It goes
directly from the token tree, which doesn't represent many important
distinctions (like the difference between `hello` and `5KB`) directly
into a high-level representation that doesn't have a direct
correspondence to the source code.
At a high level, nu commands work like macros, in the sense that the
syntactic shape of the invocation of a command depends on the
definition of a command.
However, commands do not have the ability to perform unrestricted
expansions of the token tree. Instead, they describe their arguments in
terms of syntactic shapes, and the expander expands the token tree into
HIR based upon that definition.
For example, the `where` command says that it takes a block as its first
required argument, and the description of the block syntactic shape
expands the syntax `cpu > 10` into HIR that represents
`{ $it.cpu > 10 }`.
This commit overhauls that system so that the syntactic shapes are
described in terms of a few new traits (`ExpandSyntax` and
`ExpandExpression` are the primary ones) that are more composable than
the previous system.
The first big win of this new system is the addition of the `ColumnPath`
shape, which looks like `cpu."max ghz"` or `package.version`.
Previously, while a variable path could look like `$it.cpu."max ghz"`,
the tail of a variable path could not be easily reused in other
contexts. Now, that tail is its own syntactic shape, and it can be used
as part of a command's signature.
This cleans up commands like `inc`, `add` and `edit` as well as
shorthand blocks, which can now look like `| where cpu."max ghz" > 10`
2019-09-17 22:26:27 +00:00
|
|
|
name: "autoview".to_string(),
|
2019-09-14 16:30:24 +00:00
|
|
|
name_tag: Tag::unknown(),
|
2019-08-02 19:15:07 +00:00
|
|
|
args: hir::Call::new(
|
|
|
|
Box::new(hir::Expression::synthetic_string("autoview")),
|
|
|
|
None,
|
|
|
|
None,
|
2019-11-21 14:33:14 +00:00
|
|
|
Span::unknown(),
|
|
|
|
),
|
2019-08-02 19:15:07 +00:00
|
|
|
})),
|
2019-06-07 06:34:42 +00:00
|
|
|
}
|
2019-05-24 07:29:16 +00:00
|
|
|
|
2019-10-28 21:22:31 +00:00
|
|
|
// Check the config to see if we need to update the path
|
|
|
|
// TODO: make sure config is cached so we don't path this load every call
|
2019-11-02 03:41:58 +00:00
|
|
|
set_env_from_config();
|
2019-05-24 07:29:16 +00:00
|
|
|
|
2019-11-30 21:12:34 +00:00
|
|
|
let input = ClassifiedInputStream::new();
|
2019-05-24 07:29:16 +00:00
|
|
|
|
2019-12-02 16:14:05 +00:00
|
|
|
match run_pipeline(pipeline, ctx, input, line).await {
|
2019-11-30 21:12:34 +00:00
|
|
|
Ok(_) => LineResult::Success(line.to_string()),
|
|
|
|
Err(err) => LineResult::Error(line.to_string(), err),
|
2019-05-23 04:30:43 +00:00
|
|
|
}
|
|
|
|
}
|
2019-06-15 18:36:17 +00:00
|
|
|
Err(ReadlineError::Interrupted) => LineResult::CtrlC,
|
2019-09-17 22:21:39 +00:00
|
|
|
Err(ReadlineError::Eof) => LineResult::Break,
|
2019-05-23 04:30:43 +00:00
|
|
|
Err(err) => {
|
2019-11-04 15:47:03 +00:00
|
|
|
outln!("Error: {:?}", err);
|
2019-05-23 04:30:43 +00:00
|
|
|
LineResult::Break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-26 06:54:41 +00:00
|
|
|
fn classify_pipeline(
|
2019-06-22 03:43:37 +00:00
|
|
|
pipeline: &TokenNode,
|
2019-05-26 06:54:41 +00:00
|
|
|
context: &Context,
|
2019-06-22 20:46:16 +00:00
|
|
|
source: &Text,
|
2019-05-26 06:54:41 +00:00
|
|
|
) -> Result<ClassifiedPipeline, ShellError> {
|
Overhaul the coloring system
This commit replaces the previous naive coloring system with a coloring
system that is more aligned with the parser.
The main benefit of this change is that it allows us to use parsing
rules to decide how to color tokens.
For example, consider the following syntax:
```
$ ps | where cpu > 10
```
Ideally, we could color `cpu` like a column name and not a string,
because `cpu > 10` is a shorthand block syntax that expands to
`{ $it.cpu > 10 }`.
The way that we know that it's a shorthand block is that the `where`
command declares that its first parameter is a `SyntaxShape::Block`,
which allows the shorthand block form.
In order to accomplish this, we need to color the tokens in a way that
corresponds to their expanded semantics, which means that high-fidelity
coloring requires expansion.
This commit adds a `ColorSyntax` trait that corresponds to the
`ExpandExpression` trait. The semantics are fairly similar, with a few
differences.
First `ExpandExpression` consumes N tokens and returns a single
`hir::Expression`. `ColorSyntax` consumes N tokens and writes M
`FlatShape` tokens to the output.
Concretely, for syntax like `[1 2 3]`
- `ExpandExpression` takes a single token node and produces a single
`hir::Expression`
- `ColorSyntax` takes the same token node and emits 7 `FlatShape`s
(open delimiter, int, whitespace, int, whitespace, int, close
delimiter)
Second, `ColorSyntax` is more willing to plow through failures than
`ExpandExpression`.
In particular, consider syntax like
```
$ ps | where cpu >
```
In this case
- `ExpandExpression` will see that the `where` command is expecting a
block, see that it's not a literal block and try to parse it as a
shorthand block. It will successfully find a member followed by an
infix operator, but not a following expression. That means that the
entire pipeline part fails to parse and is a syntax error.
- `ColorSyntax` will also try to parse it as a shorthand block and
ultimately fail, but it will fall back to "backoff coloring mode",
which parsing any unidentified tokens in an unfallible, simple way. In
this case, `cpu` will color as a string and `>` will color as an
operator.
Finally, it's very important that coloring a pipeline infallibly colors
the entire string, doesn't fail, and doesn't get stuck in an infinite
loop.
In order to accomplish this, this PR separates `ColorSyntax`, which is
infallible from `FallibleColorSyntax`, which might fail. This allows the
type system to let us know if our coloring rules bottom out at at an
infallible rule.
It's not perfect: it's still possible for the coloring process to get
stuck or consume tokens non-atomically. I intend to reduce the
opportunity for those problems in a future commit. In the meantime, the
current system catches a number of mistakes (like trying to use a
fallible coloring rule in a loop without thinking about the possibility
that it will never terminate).
2019-10-06 20:22:50 +00:00
|
|
|
let mut pipeline_list = vec![pipeline.clone()];
|
2019-11-21 14:33:14 +00:00
|
|
|
let mut iterator = TokensIterator::all(&mut pipeline_list, source.clone(), pipeline.span());
|
Overhaul the coloring system
This commit replaces the previous naive coloring system with a coloring
system that is more aligned with the parser.
The main benefit of this change is that it allows us to use parsing
rules to decide how to color tokens.
For example, consider the following syntax:
```
$ ps | where cpu > 10
```
Ideally, we could color `cpu` like a column name and not a string,
because `cpu > 10` is a shorthand block syntax that expands to
`{ $it.cpu > 10 }`.
The way that we know that it's a shorthand block is that the `where`
command declares that its first parameter is a `SyntaxShape::Block`,
which allows the shorthand block form.
In order to accomplish this, we need to color the tokens in a way that
corresponds to their expanded semantics, which means that high-fidelity
coloring requires expansion.
This commit adds a `ColorSyntax` trait that corresponds to the
`ExpandExpression` trait. The semantics are fairly similar, with a few
differences.
First `ExpandExpression` consumes N tokens and returns a single
`hir::Expression`. `ColorSyntax` consumes N tokens and writes M
`FlatShape` tokens to the output.
Concretely, for syntax like `[1 2 3]`
- `ExpandExpression` takes a single token node and produces a single
`hir::Expression`
- `ColorSyntax` takes the same token node and emits 7 `FlatShape`s
(open delimiter, int, whitespace, int, whitespace, int, close
delimiter)
Second, `ColorSyntax` is more willing to plow through failures than
`ExpandExpression`.
In particular, consider syntax like
```
$ ps | where cpu >
```
In this case
- `ExpandExpression` will see that the `where` command is expecting a
block, see that it's not a literal block and try to parse it as a
shorthand block. It will successfully find a member followed by an
infix operator, but not a following expression. That means that the
entire pipeline part fails to parse and is a syntax error.
- `ColorSyntax` will also try to parse it as a shorthand block and
ultimately fail, but it will fall back to "backoff coloring mode",
which parsing any unidentified tokens in an unfallible, simple way. In
this case, `cpu` will color as a string and `>` will color as an
operator.
Finally, it's very important that coloring a pipeline infallibly colors
the entire string, doesn't fail, and doesn't get stuck in an infinite
loop.
In order to accomplish this, this PR separates `ColorSyntax`, which is
infallible from `FallibleColorSyntax`, which might fail. This allows the
type system to let us know if our coloring rules bottom out at at an
infallible rule.
It's not perfect: it's still possible for the coloring process to get
stuck or consume tokens non-atomically. I intend to reduce the
opportunity for those problems in a future commit. In the meantime, the
current system catches a number of mistakes (like trying to use a
fallible coloring rule in a loop without thinking about the possibility
that it will never terminate).
2019-10-06 20:22:50 +00:00
|
|
|
|
2019-10-28 14:46:50 +00:00
|
|
|
let result = expand_syntax(
|
Overhaul the coloring system
This commit replaces the previous naive coloring system with a coloring
system that is more aligned with the parser.
The main benefit of this change is that it allows us to use parsing
rules to decide how to color tokens.
For example, consider the following syntax:
```
$ ps | where cpu > 10
```
Ideally, we could color `cpu` like a column name and not a string,
because `cpu > 10` is a shorthand block syntax that expands to
`{ $it.cpu > 10 }`.
The way that we know that it's a shorthand block is that the `where`
command declares that its first parameter is a `SyntaxShape::Block`,
which allows the shorthand block form.
In order to accomplish this, we need to color the tokens in a way that
corresponds to their expanded semantics, which means that high-fidelity
coloring requires expansion.
This commit adds a `ColorSyntax` trait that corresponds to the
`ExpandExpression` trait. The semantics are fairly similar, with a few
differences.
First `ExpandExpression` consumes N tokens and returns a single
`hir::Expression`. `ColorSyntax` consumes N tokens and writes M
`FlatShape` tokens to the output.
Concretely, for syntax like `[1 2 3]`
- `ExpandExpression` takes a single token node and produces a single
`hir::Expression`
- `ColorSyntax` takes the same token node and emits 7 `FlatShape`s
(open delimiter, int, whitespace, int, whitespace, int, close
delimiter)
Second, `ColorSyntax` is more willing to plow through failures than
`ExpandExpression`.
In particular, consider syntax like
```
$ ps | where cpu >
```
In this case
- `ExpandExpression` will see that the `where` command is expecting a
block, see that it's not a literal block and try to parse it as a
shorthand block. It will successfully find a member followed by an
infix operator, but not a following expression. That means that the
entire pipeline part fails to parse and is a syntax error.
- `ColorSyntax` will also try to parse it as a shorthand block and
ultimately fail, but it will fall back to "backoff coloring mode",
which parsing any unidentified tokens in an unfallible, simple way. In
this case, `cpu` will color as a string and `>` will color as an
operator.
Finally, it's very important that coloring a pipeline infallibly colors
the entire string, doesn't fail, and doesn't get stuck in an infinite
loop.
In order to accomplish this, this PR separates `ColorSyntax`, which is
infallible from `FallibleColorSyntax`, which might fail. This allows the
type system to let us know if our coloring rules bottom out at at an
infallible rule.
It's not perfect: it's still possible for the coloring process to get
stuck or consume tokens non-atomically. I intend to reduce the
opportunity for those problems in a future commit. In the meantime, the
current system catches a number of mistakes (like trying to use a
fallible coloring rule in a loop without thinking about the possibility
that it will never terminate).
2019-10-06 20:22:50 +00:00
|
|
|
&PipelineShape,
|
|
|
|
&mut iterator,
|
2019-10-28 14:46:50 +00:00
|
|
|
&context.expand_context(source),
|
Overhaul the coloring system
This commit replaces the previous naive coloring system with a coloring
system that is more aligned with the parser.
The main benefit of this change is that it allows us to use parsing
rules to decide how to color tokens.
For example, consider the following syntax:
```
$ ps | where cpu > 10
```
Ideally, we could color `cpu` like a column name and not a string,
because `cpu > 10` is a shorthand block syntax that expands to
`{ $it.cpu > 10 }`.
The way that we know that it's a shorthand block is that the `where`
command declares that its first parameter is a `SyntaxShape::Block`,
which allows the shorthand block form.
In order to accomplish this, we need to color the tokens in a way that
corresponds to their expanded semantics, which means that high-fidelity
coloring requires expansion.
This commit adds a `ColorSyntax` trait that corresponds to the
`ExpandExpression` trait. The semantics are fairly similar, with a few
differences.
First `ExpandExpression` consumes N tokens and returns a single
`hir::Expression`. `ColorSyntax` consumes N tokens and writes M
`FlatShape` tokens to the output.
Concretely, for syntax like `[1 2 3]`
- `ExpandExpression` takes a single token node and produces a single
`hir::Expression`
- `ColorSyntax` takes the same token node and emits 7 `FlatShape`s
(open delimiter, int, whitespace, int, whitespace, int, close
delimiter)
Second, `ColorSyntax` is more willing to plow through failures than
`ExpandExpression`.
In particular, consider syntax like
```
$ ps | where cpu >
```
In this case
- `ExpandExpression` will see that the `where` command is expecting a
block, see that it's not a literal block and try to parse it as a
shorthand block. It will successfully find a member followed by an
infix operator, but not a following expression. That means that the
entire pipeline part fails to parse and is a syntax error.
- `ColorSyntax` will also try to parse it as a shorthand block and
ultimately fail, but it will fall back to "backoff coloring mode",
which parsing any unidentified tokens in an unfallible, simple way. In
this case, `cpu` will color as a string and `>` will color as an
operator.
Finally, it's very important that coloring a pipeline infallibly colors
the entire string, doesn't fail, and doesn't get stuck in an infinite
loop.
In order to accomplish this, this PR separates `ColorSyntax`, which is
infallible from `FallibleColorSyntax`, which might fail. This allows the
type system to let us know if our coloring rules bottom out at at an
infallible rule.
It's not perfect: it's still possible for the coloring process to get
stuck or consume tokens non-atomically. I intend to reduce the
opportunity for those problems in a future commit. In the meantime, the
current system catches a number of mistakes (like trying to use a
fallible coloring rule in a loop without thinking about the possibility
that it will never terminate).
2019-10-06 20:22:50 +00:00
|
|
|
)
|
2019-10-28 14:46:50 +00:00
|
|
|
.map_err(|err| err.into());
|
|
|
|
|
|
|
|
if log_enabled!(target: "nu::expand_syntax", log::Level::Debug) {
|
2019-11-04 15:47:03 +00:00
|
|
|
outln!("");
|
2019-10-28 14:46:50 +00:00
|
|
|
ptree::print_tree(&iterator.expand_tracer().print(source.clone())).unwrap();
|
2019-11-04 15:47:03 +00:00
|
|
|
outln!("");
|
2019-05-23 04:30:43 +00:00
|
|
|
}
|
2019-10-28 14:46:50 +00:00
|
|
|
|
|
|
|
result
|
2019-05-23 04:30:43 +00:00
|
|
|
}
|
2019-08-15 22:18:18 +00:00
|
|
|
|
Overhaul the expansion system
The main thrust of this (very large) commit is an overhaul of the
expansion system.
The parsing pipeline is:
- Lightly parse the source file for atoms, basic delimiters and pipeline
structure into a token tree
- Expand the token tree into a HIR (high-level intermediate
representation) based upon the baseline syntax rules for expressions
and the syntactic shape of commands.
Somewhat non-traditionally, nu doesn't have an AST at all. It goes
directly from the token tree, which doesn't represent many important
distinctions (like the difference between `hello` and `5KB`) directly
into a high-level representation that doesn't have a direct
correspondence to the source code.
At a high level, nu commands work like macros, in the sense that the
syntactic shape of the invocation of a command depends on the
definition of a command.
However, commands do not have the ability to perform unrestricted
expansions of the token tree. Instead, they describe their arguments in
terms of syntactic shapes, and the expander expands the token tree into
HIR based upon that definition.
For example, the `where` command says that it takes a block as its first
required argument, and the description of the block syntactic shape
expands the syntax `cpu > 10` into HIR that represents
`{ $it.cpu > 10 }`.
This commit overhauls that system so that the syntactic shapes are
described in terms of a few new traits (`ExpandSyntax` and
`ExpandExpression` are the primary ones) that are more composable than
the previous system.
The first big win of this new system is the addition of the `ColumnPath`
shape, which looks like `cpu."max ghz"` or `package.version`.
Previously, while a variable path could look like `$it.cpu."max ghz"`,
the tail of a variable path could not be easily reused in other
contexts. Now, that tail is its own syntactic shape, and it can be used
as part of a command's signature.
This cleans up commands like `inc`, `add` and `edit` as well as
shorthand blocks, which can now look like `| where cpu."max ghz" > 10`
2019-09-17 22:26:27 +00:00
|
|
|
pub fn print_err(err: ShellError, host: &dyn Host, source: &Text) {
|
|
|
|
let diag = err.to_diagnostic();
|
|
|
|
|
|
|
|
let writer = host.err_termcolor();
|
|
|
|
let mut source = source.to_string();
|
|
|
|
source.push_str(" ");
|
Extract core stuff into own crates
This commit extracts five new crates:
- nu-source, which contains the core source-code handling logic in Nu,
including Text, Span, and also the pretty.rs-based debug logic
- nu-parser, which is the parser and expander logic
- nu-protocol, which is the bulk of the types and basic conveniences
used by plugins
- nu-errors, which contains ShellError, ParseError and error handling
conveniences
- nu-textview, which is the textview plugin extracted into a crate
One of the major consequences of this refactor is that it's no longer
possible to `impl X for Spanned<Y>` outside of the `nu-source` crate, so
a lot of types became more concrete (Value became a concrete type
instead of Spanned<Value>, for example).
This also turned a number of inherent methods in the main nu crate into
plain functions (impl Value {} became a bunch of functions in the
`value` namespace in `crate::data::value`).
2019-11-26 02:30:48 +00:00
|
|
|
let files = nu_parser::Files::new(source);
|
Overhaul the expansion system
The main thrust of this (very large) commit is an overhaul of the
expansion system.
The parsing pipeline is:
- Lightly parse the source file for atoms, basic delimiters and pipeline
structure into a token tree
- Expand the token tree into a HIR (high-level intermediate
representation) based upon the baseline syntax rules for expressions
and the syntactic shape of commands.
Somewhat non-traditionally, nu doesn't have an AST at all. It goes
directly from the token tree, which doesn't represent many important
distinctions (like the difference between `hello` and `5KB`) directly
into a high-level representation that doesn't have a direct
correspondence to the source code.
At a high level, nu commands work like macros, in the sense that the
syntactic shape of the invocation of a command depends on the
definition of a command.
However, commands do not have the ability to perform unrestricted
expansions of the token tree. Instead, they describe their arguments in
terms of syntactic shapes, and the expander expands the token tree into
HIR based upon that definition.
For example, the `where` command says that it takes a block as its first
required argument, and the description of the block syntactic shape
expands the syntax `cpu > 10` into HIR that represents
`{ $it.cpu > 10 }`.
This commit overhauls that system so that the syntactic shapes are
described in terms of a few new traits (`ExpandSyntax` and
`ExpandExpression` are the primary ones) that are more composable than
the previous system.
The first big win of this new system is the addition of the `ColumnPath`
shape, which looks like `cpu."max ghz"` or `package.version`.
Previously, while a variable path could look like `$it.cpu."max ghz"`,
the tail of a variable path could not be easily reused in other
contexts. Now, that tail is its own syntactic shape, and it can be used
as part of a command's signature.
This cleans up commands like `inc`, `add` and `edit` as well as
shorthand blocks, which can now look like `| where cpu."max ghz" > 10`
2019-09-17 22:26:27 +00:00
|
|
|
let _ = std::panic::catch_unwind(move || {
|
|
|
|
let _ = language_reporting::emit(
|
|
|
|
&mut writer.lock(),
|
|
|
|
&files,
|
|
|
|
&diag,
|
|
|
|
&language_reporting::DefaultConfig,
|
|
|
|
);
|
|
|
|
});
|
2019-08-15 22:18:18 +00:00
|
|
|
}
|