mirror of
https://github.com/fish-shell/fish-shell
synced 2024-11-10 15:14:44 +00:00
Port tokenizer.cpp to Rust
In hindsight, I should probably have split this into three different commits.
This commit is contained in:
parent
7f8d247211
commit
39f3c894d7
16 changed files with 1552 additions and 1239 deletions
|
@ -126,7 +126,7 @@ set(FISH_SRCS
|
||||||
src/parser.cpp src/parser_keywords.cpp src/path.cpp src/postfork.cpp
|
src/parser.cpp src/parser_keywords.cpp src/path.cpp src/postfork.cpp
|
||||||
src/proc.cpp src/re.cpp src/reader.cpp src/screen.cpp
|
src/proc.cpp src/re.cpp src/reader.cpp src/screen.cpp
|
||||||
src/signals.cpp src/termsize.cpp src/timer.cpp src/tinyexpr.cpp
|
src/signals.cpp src/termsize.cpp src/timer.cpp src/tinyexpr.cpp
|
||||||
src/tokenizer.cpp src/trace.cpp src/utf8.cpp
|
src/trace.cpp src/utf8.cpp
|
||||||
src/wait_handle.cpp src/wcstringutil.cpp src/wgetopt.cpp src/wildcard.cpp
|
src/wait_handle.cpp src/wcstringutil.cpp src/wgetopt.cpp src/wildcard.cpp
|
||||||
src/wutil.cpp src/fds.cpp src/rustffi.cpp
|
src/wutil.cpp src/fds.cpp src/rustffi.cpp
|
||||||
)
|
)
|
||||||
|
|
File diff suppressed because it is too large
Load diff
19
src/ast.cpp
19
src/ast.cpp
|
@ -77,8 +77,7 @@ static parse_keyword_t keyword_for_token(token_type_t tok, const wcstring &token
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Convert from tokenizer_t's token type to a parse_token_t type.
|
/// Convert from tokenizer_t's token type to a parse_token_t type.
|
||||||
static parse_token_type_t parse_token_type_from_tokenizer_token(
|
static parse_token_type_t parse_token_type_from_tokenizer_token(token_type_t tokenizer_token_type) {
|
||||||
enum token_type_t tokenizer_token_type) {
|
|
||||||
switch (tokenizer_token_type) {
|
switch (tokenizer_token_type) {
|
||||||
case token_type_t::string:
|
case token_type_t::string:
|
||||||
return parse_token_type_t::string;
|
return parse_token_type_t::string;
|
||||||
|
@ -111,7 +110,7 @@ class token_stream_t {
|
||||||
explicit token_stream_t(const wcstring &src, parse_tree_flags_t flags,
|
explicit token_stream_t(const wcstring &src, parse_tree_flags_t flags,
|
||||||
std::vector<source_range_t> &comments)
|
std::vector<source_range_t> &comments)
|
||||||
: src_(src),
|
: src_(src),
|
||||||
tok_(src_.c_str(), tokenizer_flags_from_parse_flags(flags)),
|
tok_(new_tokenizer(src_.c_str(), tokenizer_flags_from_parse_flags(flags))),
|
||||||
comment_ranges(comments) {}
|
comment_ranges(comments) {}
|
||||||
|
|
||||||
/// \return the token at the given index, without popping it. If the token stream is exhausted,
|
/// \return the token at the given index, without popping it. If the token stream is exhausted,
|
||||||
|
@ -161,8 +160,8 @@ class token_stream_t {
|
||||||
/// \return a new parse token, advancing the tokenizer.
|
/// \return a new parse token, advancing the tokenizer.
|
||||||
/// This returns comments.
|
/// This returns comments.
|
||||||
parse_token_t advance_1() {
|
parse_token_t advance_1() {
|
||||||
auto mtoken = tok_.next();
|
auto mtoken = tok_->next();
|
||||||
if (!mtoken.has_value()) {
|
if (!mtoken) {
|
||||||
return parse_token_t{parse_token_type_t::terminate};
|
return parse_token_t{parse_token_type_t::terminate};
|
||||||
}
|
}
|
||||||
const tok_t &token = *mtoken;
|
const tok_t &token = *mtoken;
|
||||||
|
@ -171,9 +170,9 @@ class token_stream_t {
|
||||||
// `builtin --names` lists builtins, but `builtin "--names"` attempts to run --names as a
|
// `builtin --names` lists builtins, but `builtin "--names"` attempts to run --names as a
|
||||||
// command. Amazingly as of this writing (10/12/13) nobody seems to have noticed this.
|
// command. Amazingly as of this writing (10/12/13) nobody seems to have noticed this.
|
||||||
// Squint at it really hard and it even starts to look like a feature.
|
// Squint at it really hard and it even starts to look like a feature.
|
||||||
parse_token_t result{parse_token_type_from_tokenizer_token(token.type)};
|
parse_token_t result{parse_token_type_from_tokenizer_token(token.type_)};
|
||||||
const wcstring &text = tok_.copy_text_of(token, &storage_);
|
const wcstring &text = storage_ = *tok_->text_of(token);
|
||||||
result.keyword = keyword_for_token(token.type, text);
|
result.keyword = keyword_for_token(token.type_, text);
|
||||||
result.has_dash_prefix = !text.empty() && text.at(0) == L'-';
|
result.has_dash_prefix = !text.empty() && text.at(0) == L'-';
|
||||||
result.is_help_argument = (text == L"-h" || text == L"--help");
|
result.is_help_argument = (text == L"-h" || text == L"--help");
|
||||||
result.is_newline = (result.type == parse_token_type_t::end && text == L"\n");
|
result.is_newline = (result.type == parse_token_type_t::end && text == L"\n");
|
||||||
|
@ -222,7 +221,7 @@ class token_stream_t {
|
||||||
const wcstring &src_;
|
const wcstring &src_;
|
||||||
|
|
||||||
// The tokenizer to generate new tokens.
|
// The tokenizer to generate new tokens.
|
||||||
tokenizer_t tok_;
|
rust::Box<tokenizer_t> tok_;
|
||||||
|
|
||||||
/// Any comment nodes are collected here.
|
/// Any comment nodes are collected here.
|
||||||
/// These are only collected if parse_flag_include_comments is set.
|
/// These are only collected if parse_flag_include_comments is set.
|
||||||
|
@ -749,7 +748,7 @@ struct populator_t {
|
||||||
|
|
||||||
case parse_token_type_t::tokenizer_error:
|
case parse_token_type_t::tokenizer_error:
|
||||||
parse_error(tok, parse_error_from_tokenizer_error(tok.tok_error), L"%ls",
|
parse_error(tok, parse_error_from_tokenizer_error(tok.tok_error), L"%ls",
|
||||||
tokenizer_get_error_message(tok.tok_error));
|
tokenizer_get_error_message(tok.tok_error)->c_str());
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case parse_token_type_t::end:
|
case parse_token_type_t::end:
|
||||||
|
|
|
@ -103,12 +103,12 @@ static void write_part(const wchar_t *begin, const wchar_t *end, int cut_at_curs
|
||||||
// std::fwprintf( stderr, L"Subshell: %ls, end char %lc\n", buff, *end );
|
// std::fwprintf( stderr, L"Subshell: %ls, end char %lc\n", buff, *end );
|
||||||
wcstring out;
|
wcstring out;
|
||||||
wcstring buff(begin, end - begin);
|
wcstring buff(begin, end - begin);
|
||||||
tokenizer_t tok(buff.c_str(), TOK_ACCEPT_UNFINISHED);
|
auto tok = new_tokenizer(buff.c_str(), TOK_ACCEPT_UNFINISHED);
|
||||||
while (auto token = tok.next()) {
|
while (auto token = tok->next()) {
|
||||||
if ((cut_at_cursor) && (token->offset + token->length >= pos)) break;
|
if ((cut_at_cursor) && (token->offset + token->length >= pos)) break;
|
||||||
|
|
||||||
if (token->type == token_type_t::string) {
|
if (token->type_ == token_type_t::string) {
|
||||||
wcstring tmp = tok.text_of(*token);
|
wcstring tmp = *tok->text_of(*token);
|
||||||
unescape_string_in_place(&tmp, UNESCAPE_INCOMPLETE);
|
unescape_string_in_place(&tmp, UNESCAPE_INCOMPLETE);
|
||||||
out.append(tmp);
|
out.append(tmp);
|
||||||
out.push_back(L'\n');
|
out.push_back(L'\n');
|
||||||
|
|
|
@ -107,7 +107,7 @@ maybe_t<int> builtin_fg(parser_t &parser, io_streams_t &streams, const wchar_t *
|
||||||
std::fwprintf(stderr, FG_MSG, job->job_id(), job->command_wcstr());
|
std::fwprintf(stderr, FG_MSG, job->job_id(), job->command_wcstr());
|
||||||
}
|
}
|
||||||
|
|
||||||
wcstring ft = tok_command(job->command());
|
wcstring ft = *tok_command(job->command());
|
||||||
if (!ft.empty()) {
|
if (!ft.empty()) {
|
||||||
// Provide value for `status current-command`
|
// Provide value for `status current-command`
|
||||||
parser.libdata().status_vars.command = ft;
|
parser.libdata().status_vars.command = ft;
|
||||||
|
|
|
@ -425,7 +425,8 @@ static int validate_read_args(const wchar_t *cmd, read_cmd_opts_t &opts, int arg
|
||||||
return STATUS_INVALID_ARGS;
|
return STATUS_INVALID_ARGS;
|
||||||
}
|
}
|
||||||
if (env_var_t::flags_for(argv[i]) & env_var_t::flag_read_only) {
|
if (env_var_t::flags_for(argv[i]) & env_var_t::flag_read_only) {
|
||||||
streams.err.append_format(_(L"%ls: %ls: cannot overwrite read-only variable"), cmd, argv[i]);
|
streams.err.append_format(_(L"%ls: %ls: cannot overwrite read-only variable"), cmd,
|
||||||
|
argv[i]);
|
||||||
builtin_print_error_trailer(parser, streams.err, cmd);
|
builtin_print_error_trailer(parser, streams.err, cmd);
|
||||||
return STATUS_INVALID_ARGS;
|
return STATUS_INVALID_ARGS;
|
||||||
}
|
}
|
||||||
|
@ -529,13 +530,13 @@ maybe_t<int> builtin_read(parser_t &parser, io_streams_t &streams, const wchar_t
|
||||||
}
|
}
|
||||||
|
|
||||||
if (opts.tokenize) {
|
if (opts.tokenize) {
|
||||||
tokenizer_t tok{buff.c_str(), TOK_ACCEPT_UNFINISHED};
|
auto tok = new_tokenizer(buff.c_str(), TOK_ACCEPT_UNFINISHED);
|
||||||
wcstring out;
|
wcstring out;
|
||||||
if (opts.array) {
|
if (opts.array) {
|
||||||
// Array mode: assign each token as a separate element of the sole var.
|
// Array mode: assign each token as a separate element of the sole var.
|
||||||
wcstring_list_t tokens;
|
wcstring_list_t tokens;
|
||||||
while (auto t = tok.next()) {
|
while (auto t = tok->next()) {
|
||||||
auto text = tok.text_of(*t);
|
auto text = *tok->text_of(*t);
|
||||||
if (unescape_string(text, &out, UNESCAPE_DEFAULT)) {
|
if (unescape_string(text, &out, UNESCAPE_DEFAULT)) {
|
||||||
tokens.push_back(out);
|
tokens.push_back(out);
|
||||||
} else {
|
} else {
|
||||||
|
@ -545,9 +546,9 @@ maybe_t<int> builtin_read(parser_t &parser, io_streams_t &streams, const wchar_t
|
||||||
|
|
||||||
parser.set_var_and_fire(*var_ptr++, opts.place, std::move(tokens));
|
parser.set_var_and_fire(*var_ptr++, opts.place, std::move(tokens));
|
||||||
} else {
|
} else {
|
||||||
maybe_t<tok_t> t;
|
std::unique_ptr<tok_t> t;
|
||||||
while ((vars_left() - 1 > 0) && (t = tok.next())) {
|
while ((vars_left() - 1 > 0) && (t = tok->next())) {
|
||||||
auto text = tok.text_of(*t);
|
auto text = *tok->text_of(*t);
|
||||||
if (unescape_string(text, &out, UNESCAPE_DEFAULT)) {
|
if (unescape_string(text, &out, UNESCAPE_DEFAULT)) {
|
||||||
parser.set_var_and_fire(*var_ptr++, opts.place, out);
|
parser.set_var_and_fire(*var_ptr++, opts.place, out);
|
||||||
} else {
|
} else {
|
||||||
|
@ -556,7 +557,7 @@ maybe_t<int> builtin_read(parser_t &parser, io_streams_t &streams, const wchar_t
|
||||||
}
|
}
|
||||||
|
|
||||||
// If we still have tokens, set the last variable to them.
|
// If we still have tokens, set the last variable to them.
|
||||||
if ((t = tok.next())) {
|
if ((t = tok->next())) {
|
||||||
wcstring rest = wcstring(buff, t->offset);
|
wcstring rest = wcstring(buff, t->offset);
|
||||||
parser.set_var_and_fire(*var_ptr++, opts.place, std::move(rest));
|
parser.set_var_and_fire(*var_ptr++, opts.place, std::move(rest));
|
||||||
}
|
}
|
||||||
|
|
|
@ -857,7 +857,7 @@ bool completer_t::complete_param_for_command(const wcstring &cmd_orig, const wcs
|
||||||
if (wildcard_match(match, key.first)) {
|
if (wildcard_match(match, key.first)) {
|
||||||
// Copy all of their options into our list. Oof, this is a lot of copying.
|
// Copy all of their options into our list. Oof, this is a lot of copying.
|
||||||
// We have to copy them in reverse order to preserve legacy behavior (#9221).
|
// We have to copy them in reverse order to preserve legacy behavior (#9221).
|
||||||
const auto& options = kv.second.get_options();
|
const auto &options = kv.second.get_options();
|
||||||
all_options.emplace_back(options.rbegin(), options.rend());
|
all_options.emplace_back(options.rbegin(), options.rend());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -887,7 +887,8 @@ bool completer_t::complete_param_for_command(const wcstring &cmd_orig, const wcs
|
||||||
|
|
||||||
if (this->conditions_test(o.conditions)) {
|
if (this->conditions_test(o.conditions)) {
|
||||||
if (o.type == option_type_short) {
|
if (o.type == option_type_short) {
|
||||||
// Only override a true last_option_requires_param value with a false one
|
// Only override a true last_option_requires_param value with a false
|
||||||
|
// one
|
||||||
if (last_option_requires_param.has_value()) {
|
if (last_option_requires_param.has_value()) {
|
||||||
last_option_requires_param =
|
last_option_requires_param =
|
||||||
*last_option_requires_param && o.result_mode.requires_param;
|
*last_option_requires_param && o.result_mode.requires_param;
|
||||||
|
@ -1402,10 +1403,10 @@ void completer_t::walk_wrap_chain(const wcstring &cmd, const wcstring &cmdline,
|
||||||
|
|
||||||
// Separate the wrap target into any variable assignments VAR=... and the command itself.
|
// Separate the wrap target into any variable assignments VAR=... and the command itself.
|
||||||
wcstring wrapped_command;
|
wcstring wrapped_command;
|
||||||
tokenizer_t tokenizer(wt.c_str(), 0);
|
auto tokenizer = new_tokenizer(wt.c_str(), 0);
|
||||||
size_t wrapped_command_offset_in_wt = wcstring::npos;
|
size_t wrapped_command_offset_in_wt = wcstring::npos;
|
||||||
while (auto tok = tokenizer.next()) {
|
while (auto tok = tokenizer->next()) {
|
||||||
wcstring tok_src = tok->get_source(wt);
|
wcstring tok_src = *tok->get_source(wt);
|
||||||
if (variable_assignment_equals_pos(tok_src)) {
|
if (variable_assignment_equals_pos(tok_src)) {
|
||||||
ad->var_assignments->push_back(std::move(tok_src));
|
ad->var_assignments->push_back(std::move(tok_src));
|
||||||
} else {
|
} else {
|
||||||
|
@ -1485,7 +1486,7 @@ void completer_t::mark_completions_duplicating_arguments(const wcstring &cmd,
|
||||||
// Get all the arguments, unescaped, into an array that we're going to bsearch.
|
// Get all the arguments, unescaped, into an array that we're going to bsearch.
|
||||||
wcstring_list_t arg_strs;
|
wcstring_list_t arg_strs;
|
||||||
for (const auto &arg : args) {
|
for (const auto &arg : args) {
|
||||||
wcstring argstr = arg.get_source(cmd);
|
wcstring argstr = *arg.get_source(cmd);
|
||||||
wcstring argstr_unesc;
|
wcstring argstr_unesc;
|
||||||
if (unescape_string(argstr, &argstr_unesc, UNESCAPE_DEFAULT)) {
|
if (unescape_string(argstr, &argstr_unesc, UNESCAPE_DEFAULT)) {
|
||||||
arg_strs.push_back(std::move(argstr_unesc));
|
arg_strs.push_back(std::move(argstr_unesc));
|
||||||
|
@ -1542,7 +1543,7 @@ void completer_t::perform_for_commandline(wcstring cmdline) {
|
||||||
tokens.erase(
|
tokens.erase(
|
||||||
std::remove_if(tokens.begin(), tokens.end(),
|
std::remove_if(tokens.begin(), tokens.end(),
|
||||||
[&cmdline](const tok_t &token) {
|
[&cmdline](const tok_t &token) {
|
||||||
return parser_keywords_is_subcommand(token.get_source(cmdline));
|
return parser_keywords_is_subcommand(*token.get_source(cmdline));
|
||||||
}),
|
}),
|
||||||
tokens.end());
|
tokens.end());
|
||||||
}
|
}
|
||||||
|
@ -1552,7 +1553,7 @@ void completer_t::perform_for_commandline(wcstring cmdline) {
|
||||||
wcstring_list_t var_assignments;
|
wcstring_list_t var_assignments;
|
||||||
for (const tok_t &tok : tokens) {
|
for (const tok_t &tok : tokens) {
|
||||||
if (tok.location_in_or_at_end_of_source_range(cursor_pos)) break;
|
if (tok.location_in_or_at_end_of_source_range(cursor_pos)) break;
|
||||||
wcstring tok_src = tok.get_source(cmdline);
|
wcstring tok_src = *tok.get_source(cmdline);
|
||||||
if (!variable_assignment_equals_pos(tok_src)) break;
|
if (!variable_assignment_equals_pos(tok_src)) break;
|
||||||
var_assignments.push_back(std::move(tok_src));
|
var_assignments.push_back(std::move(tok_src));
|
||||||
}
|
}
|
||||||
|
@ -1576,26 +1577,27 @@ void completer_t::perform_for_commandline(wcstring cmdline) {
|
||||||
effective_cmdline = &effective_cmdline_buf;
|
effective_cmdline = &effective_cmdline_buf;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (tokens.back().type == token_type_t::comment) {
|
if (tokens.back().type_ == token_type_t::comment) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
tokens.erase(std::remove_if(tokens.begin(), tokens.end(),
|
tokens.erase(
|
||||||
[](const tok_t &tok) { return tok.type == token_type_t::comment; }),
|
std::remove_if(tokens.begin(), tokens.end(),
|
||||||
tokens.end());
|
[](const tok_t &tok) { return tok.type_ == token_type_t::comment; }),
|
||||||
|
tokens.end());
|
||||||
assert(!tokens.empty());
|
assert(!tokens.empty());
|
||||||
|
|
||||||
const tok_t &cmd_tok = tokens.front();
|
const tok_t &cmd_tok = tokens.front();
|
||||||
const tok_t &cur_tok = tokens.back();
|
const tok_t &cur_tok = tokens.back();
|
||||||
|
|
||||||
// Since fish does not currently support redirect in command position, we return here.
|
// Since fish does not currently support redirect in command position, we return here.
|
||||||
if (cmd_tok.type != token_type_t::string) return;
|
if (cmd_tok.type_ != token_type_t::string) return;
|
||||||
if (cur_tok.type == token_type_t::error) return;
|
if (cur_tok.type_ == token_type_t::error) return;
|
||||||
for (const auto &tok : tokens) { // If there was an error, it was in the last token.
|
for (const auto &tok : tokens) { // If there was an error, it was in the last token.
|
||||||
assert(tok.type == token_type_t::string || tok.type == token_type_t::redirect);
|
assert(tok.type_ == token_type_t::string || tok.type_ == token_type_t::redirect);
|
||||||
}
|
}
|
||||||
// If we are completing a variable name or a tilde expansion user name, we do that and
|
// If we are completing a variable name or a tilde expansion user name, we do that and
|
||||||
// return. No need for any other completions.
|
// return. No need for any other completions.
|
||||||
const wcstring current_token = cur_tok.get_source(cmdline);
|
const wcstring current_token = *cur_tok.get_source(cmdline);
|
||||||
if (cur_tok.location_in_or_at_end_of_source_range(cursor_pos)) {
|
if (cur_tok.location_in_or_at_end_of_source_range(cursor_pos)) {
|
||||||
if (try_complete_variable(current_token) || try_complete_user(current_token)) {
|
if (try_complete_variable(current_token) || try_complete_user(current_token)) {
|
||||||
return;
|
return;
|
||||||
|
@ -1614,11 +1616,11 @@ void completer_t::perform_for_commandline(wcstring cmdline) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
// See whether we are in an argument, in a redirection or in the whitespace in between.
|
// See whether we are in an argument, in a redirection or in the whitespace in between.
|
||||||
bool in_redirection = cur_tok.type == token_type_t::redirect;
|
bool in_redirection = cur_tok.type_ == token_type_t::redirect;
|
||||||
|
|
||||||
bool had_ddash = false;
|
bool had_ddash = false;
|
||||||
wcstring current_argument, previous_argument;
|
wcstring current_argument, previous_argument;
|
||||||
if (cur_tok.type == token_type_t::string &&
|
if (cur_tok.type_ == token_type_t::string &&
|
||||||
cur_tok.location_in_or_at_end_of_source_range(position_in_statement)) {
|
cur_tok.location_in_or_at_end_of_source_range(position_in_statement)) {
|
||||||
// If the cursor is in whitespace, then the "current" argument is empty and the
|
// If the cursor is in whitespace, then the "current" argument is empty and the
|
||||||
// previous argument is the matching one. But if the cursor was in or at the end
|
// previous argument is the matching one. But if the cursor was in or at the end
|
||||||
|
@ -1632,15 +1634,15 @@ void completer_t::perform_for_commandline(wcstring cmdline) {
|
||||||
current_argument = current_token;
|
current_argument = current_token;
|
||||||
if (tokens.size() >= 2) {
|
if (tokens.size() >= 2) {
|
||||||
tok_t prev_tok = tokens.at(tokens.size() - 2);
|
tok_t prev_tok = tokens.at(tokens.size() - 2);
|
||||||
if (prev_tok.type == token_type_t::string)
|
if (prev_tok.type_ == token_type_t::string)
|
||||||
previous_argument = prev_tok.get_source(cmdline);
|
previous_argument = *prev_tok.get_source(cmdline);
|
||||||
in_redirection = prev_tok.type == token_type_t::redirect;
|
in_redirection = prev_tok.type_ == token_type_t::redirect;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check to see if we have a preceding double-dash.
|
// Check to see if we have a preceding double-dash.
|
||||||
for (size_t i = 0; i < tokens.size() - 1; i++) {
|
for (size_t i = 0; i < tokens.size() - 1; i++) {
|
||||||
if (tokens.at(i).get_source(cmdline) == L"--") {
|
if (*tokens.at(i).get_source(cmdline) == L"--") {
|
||||||
had_ddash = true;
|
had_ddash = true;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -1658,7 +1660,7 @@ void completer_t::perform_for_commandline(wcstring cmdline) {
|
||||||
source_offset_t bias = cmdline.size() - effective_cmdline->size();
|
source_offset_t bias = cmdline.size() - effective_cmdline->size();
|
||||||
source_range_t command_range = {cmd_tok.offset - bias, cmd_tok.length};
|
source_range_t command_range = {cmd_tok.offset - bias, cmd_tok.length};
|
||||||
|
|
||||||
wcstring exp_command = cmd_tok.get_source(cmdline);
|
wcstring exp_command = *cmd_tok.get_source(cmdline);
|
||||||
bool unescaped =
|
bool unescaped =
|
||||||
expand_command_token(ctx, exp_command) &&
|
expand_command_token(ctx, exp_command) &&
|
||||||
unescape_string(previous_argument, &arg_data.previous_argument, UNESCAPE_DEFAULT) &&
|
unescape_string(previous_argument, &arg_data.previous_argument, UNESCAPE_DEFAULT) &&
|
||||||
|
|
|
@ -420,9 +420,9 @@ struct pretty_printer_t {
|
||||||
// always emit one.
|
// always emit one.
|
||||||
bool needs_nl = false;
|
bool needs_nl = false;
|
||||||
|
|
||||||
tokenizer_t tokenizer(gap_text.c_str(), TOK_SHOW_COMMENTS | TOK_SHOW_BLANK_LINES);
|
auto tokenizer = new_tokenizer(gap_text.c_str(), TOK_SHOW_COMMENTS | TOK_SHOW_BLANK_LINES);
|
||||||
while (maybe_t<tok_t> tok = tokenizer.next()) {
|
while (auto tok = tokenizer->next()) {
|
||||||
wcstring tok_text = tokenizer.text_of(*tok);
|
wcstring tok_text = *tokenizer->text_of(*tok);
|
||||||
|
|
||||||
if (needs_nl) {
|
if (needs_nl) {
|
||||||
emit_newline();
|
emit_newline();
|
||||||
|
@ -434,11 +434,11 @@ struct pretty_printer_t {
|
||||||
if (tok_text == L"\n") continue;
|
if (tok_text == L"\n") continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (tok->type == token_type_t::comment) {
|
if (tok->type_ == token_type_t::comment) {
|
||||||
emit_space_or_indent();
|
emit_space_or_indent();
|
||||||
output.append(tok_text);
|
output.append(tok_text);
|
||||||
needs_nl = true;
|
needs_nl = true;
|
||||||
} else if (tok->type == token_type_t::end) {
|
} else if (tok->type_ == token_type_t::end) {
|
||||||
// This may be either a newline or semicolon.
|
// This may be either a newline or semicolon.
|
||||||
// Semicolons found here are not part of the ast and can simply be removed.
|
// Semicolons found here are not part of the ast and can simply be removed.
|
||||||
// Newlines are preserved unless mask_newline is set.
|
// Newlines are preserved unless mask_newline is set.
|
||||||
|
@ -449,7 +449,7 @@ struct pretty_printer_t {
|
||||||
fprintf(stderr,
|
fprintf(stderr,
|
||||||
"Gap text should only have comments and newlines - instead found token "
|
"Gap text should only have comments and newlines - instead found token "
|
||||||
"type %d with text: %ls\n",
|
"type %d with text: %ls\n",
|
||||||
(int)tok->type, tok_text.c_str());
|
(int)tok->type_, tok_text.c_str());
|
||||||
DIE("Gap text should only have comments and newlines");
|
DIE("Gap text should only have comments and newlines");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -640,25 +640,25 @@ static void test_tokenizer() {
|
||||||
say(L"Testing tokenizer");
|
say(L"Testing tokenizer");
|
||||||
{
|
{
|
||||||
const wchar_t *str = L"alpha beta";
|
const wchar_t *str = L"alpha beta";
|
||||||
tokenizer_t t(str, 0);
|
auto t = new_tokenizer(str, 0);
|
||||||
maybe_t<tok_t> token{};
|
std::unique_ptr<tok_t> token{};
|
||||||
|
|
||||||
token = t.next(); // alpha
|
token = t->next(); // alpha
|
||||||
do_test(token.has_value());
|
do_test(token);
|
||||||
do_test(token->type == token_type_t::string);
|
do_test(token->type_ == token_type_t::string);
|
||||||
do_test(token->offset == 0);
|
do_test(token->offset == 0);
|
||||||
do_test(token->length == 5);
|
do_test(token->length == 5);
|
||||||
do_test(t.text_of(*token) == L"alpha");
|
do_test(*t->text_of(*token) == L"alpha");
|
||||||
|
|
||||||
token = t.next(); // beta
|
token = t->next(); // beta
|
||||||
do_test(token.has_value());
|
do_test(token);
|
||||||
do_test(token->type == token_type_t::string);
|
do_test(token->type_ == token_type_t::string);
|
||||||
do_test(token->offset == 6);
|
do_test(token->offset == 6);
|
||||||
do_test(token->length == 4);
|
do_test(token->length == 4);
|
||||||
do_test(t.text_of(*token) == L"beta");
|
do_test(*t->text_of(*token) == L"beta");
|
||||||
|
|
||||||
token = t.next();
|
token = t->next();
|
||||||
do_test(!token.has_value());
|
do_test(!token);
|
||||||
}
|
}
|
||||||
|
|
||||||
const wchar_t *str =
|
const wchar_t *str =
|
||||||
|
@ -678,21 +678,21 @@ static void test_tokenizer() {
|
||||||
say(L"Test correct tokenization");
|
say(L"Test correct tokenization");
|
||||||
|
|
||||||
{
|
{
|
||||||
tokenizer_t t(str, 0);
|
auto t = new_tokenizer(str, 0);
|
||||||
size_t i = 0;
|
size_t i = 0;
|
||||||
while (auto token = t.next()) {
|
while (auto token = t->next()) {
|
||||||
if (i >= sizeof types / sizeof *types) {
|
if (i >= sizeof types / sizeof *types) {
|
||||||
err(L"Too many tokens returned from tokenizer");
|
err(L"Too many tokens returned from tokenizer");
|
||||||
std::fwprintf(stdout, L"Got excess token type %ld\n", (long)token->type);
|
std::fwprintf(stdout, L"Got excess token type %ld\n", (long)token->type_);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
if (types[i] != token->type) {
|
if (types[i] != token->type_) {
|
||||||
err(L"Tokenization error:");
|
err(L"Tokenization error:");
|
||||||
std::fwprintf(
|
std::fwprintf(
|
||||||
stdout,
|
stdout,
|
||||||
L"Token number %zu of string \n'%ls'\n, expected type %ld, got token type "
|
L"Token number %zu of string \n'%ls'\n, expected type %ld, got token type "
|
||||||
L"%ld\n",
|
L"%ld\n",
|
||||||
i + 1, str, (long)types[i], (long)token->type);
|
i + 1, str, (long)types[i], (long)token->type_);
|
||||||
}
|
}
|
||||||
i++;
|
i++;
|
||||||
}
|
}
|
||||||
|
@ -703,50 +703,50 @@ static void test_tokenizer() {
|
||||||
|
|
||||||
// Test some errors.
|
// Test some errors.
|
||||||
{
|
{
|
||||||
tokenizer_t t(L"abc\\", 0);
|
auto t = new_tokenizer(L"abc\\", 0);
|
||||||
auto token = t.next();
|
auto token = t->next();
|
||||||
do_test(token.has_value());
|
do_test(token);
|
||||||
do_test(token->type == token_type_t::error);
|
do_test(token->type_ == token_type_t::error);
|
||||||
do_test(token->error == tokenizer_error_t::unterminated_escape);
|
do_test(token->error == tokenizer_error_t::unterminated_escape);
|
||||||
do_test(token->error_offset_within_token == 3);
|
do_test(token->error_offset_within_token == 3);
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
tokenizer_t t(L"abc )defg(hij", 0);
|
auto t = new_tokenizer(L"abc )defg(hij", 0);
|
||||||
auto token = t.next();
|
auto token = t->next();
|
||||||
do_test(token.has_value());
|
do_test(token);
|
||||||
token = t.next();
|
token = t->next();
|
||||||
do_test(token.has_value());
|
do_test(token);
|
||||||
do_test(token->type == token_type_t::error);
|
do_test(token->type_ == token_type_t::error);
|
||||||
do_test(token->error == tokenizer_error_t::closing_unopened_subshell);
|
do_test(token->error == tokenizer_error_t::closing_unopened_subshell);
|
||||||
do_test(token->offset == 4);
|
do_test(token->offset == 4);
|
||||||
do_test(token->error_offset_within_token == 0);
|
do_test(token->error_offset_within_token == 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
tokenizer_t t(L"abc defg(hij (klm)", 0);
|
auto t = new_tokenizer(L"abc defg(hij (klm)", 0);
|
||||||
auto token = t.next();
|
auto token = t->next();
|
||||||
do_test(token.has_value());
|
do_test(token);
|
||||||
token = t.next();
|
token = t->next();
|
||||||
do_test(token.has_value());
|
do_test(token);
|
||||||
do_test(token->type == token_type_t::error);
|
do_test(token->type_ == token_type_t::error);
|
||||||
do_test(token->error == tokenizer_error_t::unterminated_subshell);
|
do_test(token->error == tokenizer_error_t::unterminated_subshell);
|
||||||
do_test(token->error_offset_within_token == 4);
|
do_test(token->error_offset_within_token == 4);
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
tokenizer_t t(L"abc defg[hij (klm)", 0);
|
auto t = new_tokenizer(L"abc defg[hij (klm)", 0);
|
||||||
auto token = t.next();
|
auto token = t->next();
|
||||||
do_test(token.has_value());
|
do_test(token);
|
||||||
token = t.next();
|
token = t->next();
|
||||||
do_test(token.has_value());
|
do_test(token);
|
||||||
do_test(token->type == token_type_t::error);
|
do_test(token->type_ == token_type_t::error);
|
||||||
do_test(token->error == tokenizer_error_t::unterminated_slice);
|
do_test(token->error == tokenizer_error_t::unterminated_slice);
|
||||||
do_test(token->error_offset_within_token == 4);
|
do_test(token->error_offset_within_token == 4);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test some redirection parsing.
|
// Test some redirection parsing.
|
||||||
auto pipe_or_redir = [](const wchar_t *s) { return pipe_or_redir_t::from_string(s); };
|
auto pipe_or_redir = [](const wchar_t *s) { return pipe_or_redir_from_string(s); };
|
||||||
do_test(pipe_or_redir(L"|")->is_pipe);
|
do_test(pipe_or_redir(L"|")->is_pipe);
|
||||||
do_test(pipe_or_redir(L"0>|")->is_pipe);
|
do_test(pipe_or_redir(L"0>|")->is_pipe);
|
||||||
do_test(pipe_or_redir(L"0>|")->fd == 0);
|
do_test(pipe_or_redir(L"0>|")->fd == 0);
|
||||||
|
@ -770,7 +770,7 @@ static void test_tokenizer() {
|
||||||
do_test(pipe_or_redir(L"&>?")->stderr_merge);
|
do_test(pipe_or_redir(L"&>?")->stderr_merge);
|
||||||
|
|
||||||
auto get_redir_mode = [](const wchar_t *s) -> maybe_t<redirection_mode_t> {
|
auto get_redir_mode = [](const wchar_t *s) -> maybe_t<redirection_mode_t> {
|
||||||
if (auto redir = pipe_or_redir_t::from_string(s)) {
|
if (auto redir = pipe_or_redir_from_string(s)) {
|
||||||
return redir->mode;
|
return redir->mode;
|
||||||
}
|
}
|
||||||
return none();
|
return none();
|
||||||
|
@ -1520,6 +1520,12 @@ static void test_indents() {
|
||||||
0, "\nend" //
|
0, "\nend" //
|
||||||
);
|
);
|
||||||
|
|
||||||
|
tests.clear();
|
||||||
|
add_test(&tests, //
|
||||||
|
0, "echo 'continuation line' \\", //
|
||||||
|
1, "\ncont", //
|
||||||
|
0, "\n" //
|
||||||
|
);
|
||||||
int test_idx = 0;
|
int test_idx = 0;
|
||||||
for (const indent_test_t &test : tests) {
|
for (const indent_test_t &test : tests) {
|
||||||
// Construct the input text and expected indents.
|
// Construct the input text and expected indents.
|
||||||
|
@ -2740,11 +2746,11 @@ static void test_1_word_motion(word_motion_t motion, move_word_style_t style,
|
||||||
}
|
}
|
||||||
stops.erase(idx);
|
stops.erase(idx);
|
||||||
|
|
||||||
move_word_state_machine_t sm(style);
|
auto sm = new_move_word_state_machine(style);
|
||||||
while (idx != end) {
|
while (idx != end) {
|
||||||
size_t char_idx = (motion == word_motion_left ? idx - 1 : idx);
|
size_t char_idx = (motion == word_motion_left ? idx - 1 : idx);
|
||||||
wchar_t wc = command.at(char_idx);
|
wchar_t wc = command.at(char_idx);
|
||||||
bool will_stop = !sm.consume_char(wc);
|
bool will_stop = !sm->consume_char(wc);
|
||||||
// std::fwprintf(stdout, L"idx %lu, looking at %lu (%c): %d\n", idx, char_idx, (char)wc,
|
// std::fwprintf(stdout, L"idx %lu, looking at %lu (%c): %d\n", idx, char_idx, (char)wc,
|
||||||
// will_stop);
|
// will_stop);
|
||||||
bool expected_stop = (stops.count(idx) > 0);
|
bool expected_stop = (stops.count(idx) > 0);
|
||||||
|
@ -2765,7 +2771,7 @@ static void test_1_word_motion(word_motion_t motion, move_word_style_t style,
|
||||||
stops.erase(idx);
|
stops.erase(idx);
|
||||||
}
|
}
|
||||||
if (will_stop) {
|
if (will_stop) {
|
||||||
sm.reset();
|
sm->reset();
|
||||||
} else {
|
} else {
|
||||||
idx += (motion == word_motion_left ? -1 : 1);
|
idx += (motion == word_motion_left ? -1 : 1);
|
||||||
}
|
}
|
||||||
|
@ -2775,36 +2781,51 @@ static void test_1_word_motion(word_motion_t motion, move_word_style_t style,
|
||||||
/// Test word motion (forward-word, etc.). Carets represent cursor stops.
|
/// Test word motion (forward-word, etc.). Carets represent cursor stops.
|
||||||
static void test_word_motion() {
|
static void test_word_motion() {
|
||||||
say(L"Testing word motion");
|
say(L"Testing word motion");
|
||||||
test_1_word_motion(word_motion_left, move_word_style_punctuation, L"^echo ^hello_^world.^txt^");
|
test_1_word_motion(word_motion_left, move_word_style_t::move_word_style_punctuation,
|
||||||
test_1_word_motion(word_motion_right, move_word_style_punctuation,
|
L"^echo ^hello_^world.^txt^");
|
||||||
|
test_1_word_motion(word_motion_right, move_word_style_t::move_word_style_punctuation,
|
||||||
L"^echo^ hello^_world^.txt^");
|
L"^echo^ hello^_world^.txt^");
|
||||||
|
|
||||||
test_1_word_motion(word_motion_left, move_word_style_punctuation,
|
test_1_word_motion(word_motion_left, move_word_style_t::move_word_style_punctuation,
|
||||||
L"echo ^foo_^foo_^foo/^/^/^/^/^ ^");
|
L"echo ^foo_^foo_^foo/^/^/^/^/^ ^");
|
||||||
test_1_word_motion(word_motion_right, move_word_style_punctuation,
|
test_1_word_motion(word_motion_right, move_word_style_t::move_word_style_punctuation,
|
||||||
L"^echo^ foo^_foo^_foo^/^/^/^/^/ ^");
|
L"^echo^ foo^_foo^_foo^/^/^/^/^/ ^");
|
||||||
|
|
||||||
test_1_word_motion(word_motion_left, move_word_style_path_components, L"^/^foo/^bar/^baz/^");
|
test_1_word_motion(word_motion_left, move_word_style_t::move_word_style_path_components,
|
||||||
test_1_word_motion(word_motion_left, move_word_style_path_components, L"^echo ^--foo ^--bar^");
|
L"^/^foo/^bar/^baz/^");
|
||||||
test_1_word_motion(word_motion_left, move_word_style_path_components,
|
test_1_word_motion(word_motion_left, move_word_style_t::move_word_style_path_components,
|
||||||
|
L"^echo ^--foo ^--bar^");
|
||||||
|
test_1_word_motion(word_motion_left, move_word_style_t::move_word_style_path_components,
|
||||||
L"^echo ^hi ^> ^/^dev/^null^");
|
L"^echo ^hi ^> ^/^dev/^null^");
|
||||||
|
|
||||||
test_1_word_motion(word_motion_left, move_word_style_path_components,
|
test_1_word_motion(word_motion_left, move_word_style_t::move_word_style_path_components,
|
||||||
L"^echo ^/^foo/^bar{^aaa,^bbb,^ccc}^bak/^");
|
L"^echo ^/^foo/^bar{^aaa,^bbb,^ccc}^bak/^");
|
||||||
test_1_word_motion(word_motion_left, move_word_style_path_components, L"^echo ^bak ^///^");
|
test_1_word_motion(word_motion_left, move_word_style_t::move_word_style_path_components,
|
||||||
test_1_word_motion(word_motion_left, move_word_style_path_components, L"^aaa ^@ ^@^aaa^");
|
L"^echo ^bak ^///^");
|
||||||
test_1_word_motion(word_motion_left, move_word_style_path_components, L"^aaa ^a ^@^aaa^");
|
test_1_word_motion(word_motion_left, move_word_style_t::move_word_style_path_components,
|
||||||
test_1_word_motion(word_motion_left, move_word_style_path_components, L"^aaa ^@@@ ^@@^aa^");
|
L"^aaa ^@ ^@^aaa^");
|
||||||
test_1_word_motion(word_motion_left, move_word_style_path_components, L"^aa^@@ ^aa@@^a^");
|
test_1_word_motion(word_motion_left, move_word_style_t::move_word_style_path_components,
|
||||||
|
L"^aaa ^a ^@^aaa^");
|
||||||
|
test_1_word_motion(word_motion_left, move_word_style_t::move_word_style_path_components,
|
||||||
|
L"^aaa ^@@@ ^@@^aa^");
|
||||||
|
test_1_word_motion(word_motion_left, move_word_style_t::move_word_style_path_components,
|
||||||
|
L"^aa^@@ ^aa@@^a^");
|
||||||
|
|
||||||
test_1_word_motion(word_motion_right, move_word_style_punctuation, L"^a^ bcd^");
|
test_1_word_motion(word_motion_right, move_word_style_t::move_word_style_punctuation,
|
||||||
test_1_word_motion(word_motion_right, move_word_style_punctuation, L"a^b^ cde^");
|
L"^a^ bcd^");
|
||||||
test_1_word_motion(word_motion_right, move_word_style_punctuation, L"^ab^ cde^");
|
test_1_word_motion(word_motion_right, move_word_style_t::move_word_style_punctuation,
|
||||||
test_1_word_motion(word_motion_right, move_word_style_punctuation, L"^ab^&cd^ ^& ^e^ f^&");
|
L"a^b^ cde^");
|
||||||
|
test_1_word_motion(word_motion_right, move_word_style_t::move_word_style_punctuation,
|
||||||
|
L"^ab^ cde^");
|
||||||
|
test_1_word_motion(word_motion_right, move_word_style_t::move_word_style_punctuation,
|
||||||
|
L"^ab^&cd^ ^& ^e^ f^&");
|
||||||
|
|
||||||
test_1_word_motion(word_motion_right, move_word_style_whitespace, L"^^a-b-c^ d-e-f");
|
test_1_word_motion(word_motion_right, move_word_style_t::move_word_style_whitespace,
|
||||||
test_1_word_motion(word_motion_right, move_word_style_whitespace, L"^a-b-c^\n d-e-f^ ");
|
L"^^a-b-c^ d-e-f");
|
||||||
test_1_word_motion(word_motion_right, move_word_style_whitespace, L"^a-b-c^\n\nd-e-f^ ");
|
test_1_word_motion(word_motion_right, move_word_style_t::move_word_style_whitespace,
|
||||||
|
L"^a-b-c^\n d-e-f^ ");
|
||||||
|
test_1_word_motion(word_motion_right, move_word_style_t::move_word_style_whitespace,
|
||||||
|
L"^a-b-c^\n\nd-e-f^ ");
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Test is_potential_path.
|
/// Test is_potential_path.
|
||||||
|
@ -5694,6 +5715,14 @@ static void test_highlighting() {
|
||||||
{L"\\U110000", highlight_role_t::error},
|
{L"\\U110000", highlight_role_t::error},
|
||||||
});
|
});
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
highlight_tests.clear();
|
||||||
|
highlight_tests.push_back({
|
||||||
|
{L"echo", highlight_role_t::command},
|
||||||
|
{L"stuff", highlight_role_t::param},
|
||||||
|
{L"# comment", highlight_role_t::comment},
|
||||||
|
});
|
||||||
|
|
||||||
bool saved_flag = feature_test(feature_flag_t::ampersand_nobg_in_token);
|
bool saved_flag = feature_test(feature_flag_t::ampersand_nobg_in_token);
|
||||||
mutable_fish_features()->set(feature_flag_t::ampersand_nobg_in_token, true);
|
mutable_fish_features()->set(feature_flag_t::ampersand_nobg_in_token, true);
|
||||||
for (const highlight_component_list_t &components : highlight_tests) {
|
for (const highlight_component_list_t &components : highlight_tests) {
|
||||||
|
|
|
@ -1158,12 +1158,10 @@ static bool contains_pending_variable(const std::vector<wcstring> &pending_varia
|
||||||
}
|
}
|
||||||
|
|
||||||
void highlighter_t::visit(const ast::redirection_t &redir) {
|
void highlighter_t::visit(const ast::redirection_t &redir) {
|
||||||
maybe_t<pipe_or_redir_t> oper =
|
auto oper = pipe_or_redir_from_string(redir.oper.source(this->buff).c_str()); // like 2>
|
||||||
pipe_or_redir_t::from_string(redir.oper.source(this->buff)); // like 2>
|
wcstring target = redir.target.source(this->buff); // like &1 or file path
|
||||||
wcstring target = redir.target.source(this->buff); // like &1 or file path
|
|
||||||
|
|
||||||
assert(oper.has_value() &&
|
assert(oper && "Should have successfully parsed a pipe_or_redir_t since it was in our ast");
|
||||||
"Should have successfully parsed a pipe_or_redir_t since it was in our ast");
|
|
||||||
|
|
||||||
// Color the > part.
|
// Color the > part.
|
||||||
// It may have parsed successfully yet still be invalid (e.g. 9999999999999>&1)
|
// It may have parsed successfully yet still be invalid (e.g. 9999999999999>&1)
|
||||||
|
|
|
@ -1005,7 +1005,7 @@ end_execution_reason_t parse_execution_context_t::determine_redirections(
|
||||||
if (!arg_or_redir.is_redirection()) continue;
|
if (!arg_or_redir.is_redirection()) continue;
|
||||||
const ast::redirection_t &redir_node = arg_or_redir.redirection();
|
const ast::redirection_t &redir_node = arg_or_redir.redirection();
|
||||||
|
|
||||||
maybe_t<pipe_or_redir_t> oper = pipe_or_redir_t::from_string(get_source(redir_node.oper));
|
auto oper = pipe_or_redir_from_string(get_source(redir_node.oper).c_str());
|
||||||
if (!oper || !oper->is_valid()) {
|
if (!oper || !oper->is_valid()) {
|
||||||
// TODO: figure out if this can ever happen. If so, improve this error message.
|
// TODO: figure out if this can ever happen. If so, improve this error message.
|
||||||
return report_error(STATUS_INVALID_ARGS, redir_node, _(L"Invalid redirection: %ls"),
|
return report_error(STATUS_INVALID_ARGS, redir_node, _(L"Invalid redirection: %ls"),
|
||||||
|
@ -1202,8 +1202,8 @@ end_execution_reason_t parse_execution_context_t::populate_job_from_job_node(
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
// Handle the pipe, whose fd may not be the obvious stdout.
|
// Handle the pipe, whose fd may not be the obvious stdout.
|
||||||
auto parsed_pipe = pipe_or_redir_t::from_string(get_source(jc.pipe));
|
auto parsed_pipe = pipe_or_redir_from_string(get_source(jc.pipe).c_str());
|
||||||
assert(parsed_pipe.has_value() && parsed_pipe->is_pipe && "Failed to parse valid pipe");
|
assert(parsed_pipe && parsed_pipe->is_pipe && "Failed to parse valid pipe");
|
||||||
if (!parsed_pipe->is_valid()) {
|
if (!parsed_pipe->is_valid()) {
|
||||||
result = report_error(STATUS_INVALID_ARGS, jc.pipe, ILLEGAL_FD_ERR_MSG,
|
result = report_error(STATUS_INVALID_ARGS, jc.pipe, ILLEGAL_FD_ERR_MSG,
|
||||||
get_source(jc.pipe).c_str());
|
get_source(jc.pipe).c_str());
|
||||||
|
|
|
@ -178,7 +178,7 @@ static int parse_util_locate_cmdsub(const wchar_t *in, const wchar_t **begin, co
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
is_token_begin = is_token_delimiter(pos[0], pos[1]);
|
is_token_begin = is_token_delimiter(pos[0], std::make_shared<wchar_t>(pos[1]));
|
||||||
} else {
|
} else {
|
||||||
escaped = false;
|
escaped = false;
|
||||||
is_token_begin = false;
|
is_token_begin = false;
|
||||||
|
@ -367,12 +367,12 @@ static void job_or_process_extent(bool process, const wchar_t *buff, size_t curs
|
||||||
if (b) *b = end;
|
if (b) *b = end;
|
||||||
|
|
||||||
const wcstring buffcpy(begin, end);
|
const wcstring buffcpy(begin, end);
|
||||||
tokenizer_t tok(buffcpy.c_str(), TOK_ACCEPT_UNFINISHED | TOK_SHOW_COMMENTS);
|
auto tok = new_tokenizer(buffcpy.c_str(), TOK_ACCEPT_UNFINISHED | TOK_SHOW_COMMENTS);
|
||||||
maybe_t<tok_t> token{};
|
std::unique_ptr<tok_t> token{};
|
||||||
while ((token = tok.next()) && !finished) {
|
while ((token = tok->next()) && !finished) {
|
||||||
size_t tok_begin = token->offset;
|
size_t tok_begin = token->offset;
|
||||||
|
|
||||||
switch (token->type) {
|
switch (token->type_) {
|
||||||
case token_type_t::pipe: {
|
case token_type_t::pipe: {
|
||||||
if (!process) {
|
if (!process) {
|
||||||
break;
|
break;
|
||||||
|
@ -440,13 +440,13 @@ void parse_util_token_extent(const wchar_t *buff, size_t cursor_pos, const wchar
|
||||||
|
|
||||||
const wcstring buffcpy = wcstring(cmdsubst_begin, cmdsubst_end - cmdsubst_begin);
|
const wcstring buffcpy = wcstring(cmdsubst_begin, cmdsubst_end - cmdsubst_begin);
|
||||||
|
|
||||||
tokenizer_t tok(buffcpy.c_str(), TOK_ACCEPT_UNFINISHED);
|
auto tok = new_tokenizer(buffcpy.c_str(), TOK_ACCEPT_UNFINISHED);
|
||||||
while (maybe_t<tok_t> token = tok.next()) {
|
while (std::unique_ptr<tok_t> token = tok->next()) {
|
||||||
size_t tok_begin = token->offset;
|
size_t tok_begin = token->offset;
|
||||||
size_t tok_end = tok_begin;
|
size_t tok_end = tok_begin;
|
||||||
|
|
||||||
// Calculate end of token.
|
// Calculate end of token.
|
||||||
if (token->type == token_type_t::string) {
|
if (token->type_ == token_type_t::string) {
|
||||||
tok_end += token->length;
|
tok_end += token->length;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -459,14 +459,14 @@ void parse_util_token_extent(const wchar_t *buff, size_t cursor_pos, const wchar
|
||||||
|
|
||||||
// If cursor is inside the token, this is the token we are looking for. If so, set a and b
|
// If cursor is inside the token, this is the token we are looking for. If so, set a and b
|
||||||
// and break.
|
// and break.
|
||||||
if (token->type == token_type_t::string && tok_end >= offset_within_cmdsubst) {
|
if (token->type_ == token_type_t::string && tok_end >= offset_within_cmdsubst) {
|
||||||
a = cmdsubst_begin + token->offset;
|
a = cmdsubst_begin + token->offset;
|
||||||
b = a + token->length;
|
b = a + token->length;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Remember previous string token.
|
// Remember previous string token.
|
||||||
if (token->type == token_type_t::string) {
|
if (token->type_ == token_type_t::string) {
|
||||||
pa = cmdsubst_begin + token->offset;
|
pa = cmdsubst_begin + token->offset;
|
||||||
pb = pa + token->length;
|
pb = pa + token->length;
|
||||||
}
|
}
|
||||||
|
@ -541,11 +541,11 @@ static wchar_t get_quote(const wcstring &cmd_str, size_t len) {
|
||||||
}
|
}
|
||||||
|
|
||||||
wchar_t parse_util_get_quote_type(const wcstring &cmd, size_t pos) {
|
wchar_t parse_util_get_quote_type(const wcstring &cmd, size_t pos) {
|
||||||
tokenizer_t tok(cmd.c_str(), TOK_ACCEPT_UNFINISHED);
|
auto tok = new_tokenizer(cmd.c_str(), TOK_ACCEPT_UNFINISHED);
|
||||||
while (auto token = tok.next()) {
|
while (auto token = tok->next()) {
|
||||||
if (token->type == token_type_t::string &&
|
if (token->type_ == token_type_t::string &&
|
||||||
token->location_in_or_at_end_of_source_range(pos)) {
|
token->location_in_or_at_end_of_source_range(pos)) {
|
||||||
return get_quote(tok.text_of(*token), pos - token->offset);
|
return get_quote(*tok->text_of(*token), pos - token->offset);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return L'\0';
|
return L'\0';
|
||||||
|
|
|
@ -14,7 +14,8 @@ namespace ast {
|
||||||
struct argument_t;
|
struct argument_t;
|
||||||
class ast_t;
|
class ast_t;
|
||||||
} // namespace ast
|
} // namespace ast
|
||||||
struct tok_t;
|
struct Tok;
|
||||||
|
using tok_t = Tok;
|
||||||
|
|
||||||
/// Handles slices: the square brackets in an expression like $foo[5..4]
|
/// Handles slices: the square brackets in an expression like $foo[5..4]
|
||||||
/// \return the length of the slice starting at \p in, or 0 if there is no slice, or -1 on error.
|
/// \return the length of the slice starting at \p in, or 0 if there is no slice, or -1 on error.
|
||||||
|
|
|
@ -432,12 +432,12 @@ class reader_history_search_t {
|
||||||
assert(offset != wcstring::npos && "Should have found a match in the search result");
|
assert(offset != wcstring::npos && "Should have found a match in the search result");
|
||||||
add_if_new({std::move(text), offset});
|
add_if_new({std::move(text), offset});
|
||||||
} else if (mode_ == token) {
|
} else if (mode_ == token) {
|
||||||
tokenizer_t tok(text.c_str(), TOK_ACCEPT_UNFINISHED);
|
auto tok = new_tokenizer(text.c_str(), TOK_ACCEPT_UNFINISHED);
|
||||||
|
|
||||||
std::vector<match_t> local_tokens;
|
std::vector<match_t> local_tokens;
|
||||||
while (auto token = tok.next()) {
|
while (auto token = tok->next()) {
|
||||||
if (token->type != token_type_t::string) continue;
|
if (token->type_ != token_type_t::string) continue;
|
||||||
wcstring text = tok.text_of(*token);
|
wcstring text = *tok->text_of(*token);
|
||||||
size_t offset = find(text, needle);
|
size_t offset = find(text, needle);
|
||||||
if (offset != wcstring::npos) {
|
if (offset != wcstring::npos) {
|
||||||
local_tokens.push_back({std::move(text), offset});
|
local_tokens.push_back({std::move(text), offset});
|
||||||
|
@ -865,7 +865,7 @@ class reader_data_t : public std::enable_shared_from_this<reader_data_t> {
|
||||||
/// try expanding it as a wildcard, populating \p result with the expanded string.
|
/// try expanding it as a wildcard, populating \p result with the expanded string.
|
||||||
expand_result_t::result_t try_expand_wildcard(wcstring wc, size_t pos, wcstring *result);
|
expand_result_t::result_t try_expand_wildcard(wcstring wc, size_t pos, wcstring *result);
|
||||||
|
|
||||||
void move_word(editable_line_t *el, bool move_right, bool erase, enum move_word_style_t style,
|
void move_word(editable_line_t *el, bool move_right, bool erase, move_word_style_t style,
|
||||||
bool newv);
|
bool newv);
|
||||||
|
|
||||||
void run_input_command_scripts(const wcstring_list_t &cmds);
|
void run_input_command_scripts(const wcstring_list_t &cmds);
|
||||||
|
@ -898,8 +898,9 @@ class reader_data_t : public std::enable_shared_from_this<reader_data_t> {
|
||||||
bool can_autosuggest() const;
|
bool can_autosuggest() const;
|
||||||
void autosuggest_completed(autosuggestion_t result);
|
void autosuggest_completed(autosuggestion_t result);
|
||||||
void update_autosuggestion();
|
void update_autosuggestion();
|
||||||
void accept_autosuggestion(bool full, bool single = false,
|
void accept_autosuggestion(
|
||||||
move_word_style_t style = move_word_style_punctuation);
|
bool full, bool single = false,
|
||||||
|
move_word_style_t style = move_word_style_t::move_word_style_punctuation);
|
||||||
void super_highlight_me_plenty();
|
void super_highlight_me_plenty();
|
||||||
|
|
||||||
/// Finish up any outstanding syntax highlighting, before execution.
|
/// Finish up any outstanding syntax highlighting, before execution.
|
||||||
|
@ -2115,11 +2116,11 @@ void reader_data_t::accept_autosuggestion(bool full, bool single, move_word_styl
|
||||||
autosuggestion.text.substr(command_line.size(), 1));
|
autosuggestion.text.substr(command_line.size(), 1));
|
||||||
} else {
|
} else {
|
||||||
// Accept characters according to the specified style.
|
// Accept characters according to the specified style.
|
||||||
move_word_state_machine_t state(style);
|
auto state = new_move_word_state_machine(style);
|
||||||
size_t want;
|
size_t want;
|
||||||
for (want = command_line.size(); want < autosuggestion.text.size(); want++) {
|
for (want = command_line.size(); want < autosuggestion.text.size(); want++) {
|
||||||
wchar_t wc = autosuggestion.text.at(want);
|
wchar_t wc = autosuggestion.text.at(want);
|
||||||
if (!state.consume_char(wc)) break;
|
if (!state->consume_char(wc)) break;
|
||||||
}
|
}
|
||||||
size_t have = command_line.size();
|
size_t have = command_line.size();
|
||||||
replace_substring(&command_line, command_line.size(), 0,
|
replace_substring(&command_line, command_line.size(), 0,
|
||||||
|
@ -2648,13 +2649,13 @@ enum move_word_dir_t { MOVE_DIR_LEFT, MOVE_DIR_RIGHT };
|
||||||
/// \param erase Whether to erase the characters along the way or only move past them.
|
/// \param erase Whether to erase the characters along the way or only move past them.
|
||||||
/// \param newv if the new kill item should be appended to the previous kill item or not.
|
/// \param newv if the new kill item should be appended to the previous kill item or not.
|
||||||
void reader_data_t::move_word(editable_line_t *el, bool move_right, bool erase,
|
void reader_data_t::move_word(editable_line_t *el, bool move_right, bool erase,
|
||||||
enum move_word_style_t style, bool newv) {
|
move_word_style_t style, bool newv) {
|
||||||
// Return if we are already at the edge.
|
// Return if we are already at the edge.
|
||||||
const size_t boundary = move_right ? el->size() : 0;
|
const size_t boundary = move_right ? el->size() : 0;
|
||||||
if (el->position() == boundary) return;
|
if (el->position() == boundary) return;
|
||||||
|
|
||||||
// When moving left, a value of 1 means the character at index 0.
|
// When moving left, a value of 1 means the character at index 0.
|
||||||
move_word_state_machine_t state(style);
|
auto state = new_move_word_state_machine(style);
|
||||||
const wchar_t *const command_line = el->text().c_str();
|
const wchar_t *const command_line = el->text().c_str();
|
||||||
const size_t start_buff_pos = el->position();
|
const size_t start_buff_pos = el->position();
|
||||||
|
|
||||||
|
@ -2662,7 +2663,7 @@ void reader_data_t::move_word(editable_line_t *el, bool move_right, bool erase,
|
||||||
while (buff_pos != boundary) {
|
while (buff_pos != boundary) {
|
||||||
size_t idx = (move_right ? buff_pos : buff_pos - 1);
|
size_t idx = (move_right ? buff_pos : buff_pos - 1);
|
||||||
wchar_t c = command_line[idx];
|
wchar_t c = command_line[idx];
|
||||||
if (!state.consume_char(c)) break;
|
if (!state->consume_char(c)) break;
|
||||||
buff_pos = (move_right ? buff_pos + 1 : buff_pos - 1);
|
buff_pos = (move_right ? buff_pos + 1 : buff_pos - 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2710,7 +2711,7 @@ void reader_data_t::set_buffer_maintaining_pager(const wcstring &b, size_t pos,
|
||||||
/// Run the specified command with the correct terminal modes, and while taking care to perform job
|
/// Run the specified command with the correct terminal modes, and while taking care to perform job
|
||||||
/// notification, set the title, etc.
|
/// notification, set the title, etc.
|
||||||
static eval_res_t reader_run_command(parser_t &parser, const wcstring &cmd) {
|
static eval_res_t reader_run_command(parser_t &parser, const wcstring &cmd) {
|
||||||
wcstring ft = tok_command(cmd);
|
wcstring ft = *tok_command(cmd);
|
||||||
|
|
||||||
// Provide values for `status current-command` and `status current-commandline`
|
// Provide values for `status current-command` and `status current-commandline`
|
||||||
if (!ft.empty()) {
|
if (!ft.empty()) {
|
||||||
|
@ -3303,10 +3304,10 @@ static wchar_t unescaped_quote(const wcstring &str, size_t pos) {
|
||||||
|
|
||||||
/// Returns true if the last token is a comment.
|
/// Returns true if the last token is a comment.
|
||||||
static bool text_ends_in_comment(const wcstring &text) {
|
static bool text_ends_in_comment(const wcstring &text) {
|
||||||
tokenizer_t tok(text.c_str(), TOK_ACCEPT_UNFINISHED | TOK_SHOW_COMMENTS);
|
auto tok = new_tokenizer(text.c_str(), TOK_ACCEPT_UNFINISHED | TOK_SHOW_COMMENTS);
|
||||||
bool is_comment = false;
|
bool is_comment = false;
|
||||||
while (auto token = tok.next()) {
|
while (auto token = tok->next()) {
|
||||||
is_comment = token->type == token_type_t::comment;
|
is_comment = token->type_ == token_type_t::comment;
|
||||||
}
|
}
|
||||||
return is_comment;
|
return is_comment;
|
||||||
}
|
}
|
||||||
|
@ -3799,9 +3800,10 @@ void reader_data_t::handle_readline_command(readline_cmd_t c, readline_loop_stat
|
||||||
case rl::backward_kill_path_component:
|
case rl::backward_kill_path_component:
|
||||||
case rl::backward_kill_bigword: {
|
case rl::backward_kill_bigword: {
|
||||||
move_word_style_t style =
|
move_word_style_t style =
|
||||||
(c == rl::backward_kill_bigword ? move_word_style_whitespace
|
(c == rl::backward_kill_bigword ? move_word_style_t::move_word_style_whitespace
|
||||||
: c == rl::backward_kill_path_component ? move_word_style_path_components
|
: c == rl::backward_kill_path_component
|
||||||
: move_word_style_punctuation);
|
? move_word_style_t::move_word_style_path_components
|
||||||
|
: move_word_style_t::move_word_style_punctuation);
|
||||||
// Is this the same killring item as the last kill?
|
// Is this the same killring item as the last kill?
|
||||||
bool newv = (rls.last_cmd != rl::backward_kill_word &&
|
bool newv = (rls.last_cmd != rl::backward_kill_word &&
|
||||||
rls.last_cmd != rl::backward_kill_path_component &&
|
rls.last_cmd != rl::backward_kill_path_component &&
|
||||||
|
@ -3813,8 +3815,8 @@ void reader_data_t::handle_readline_command(readline_cmd_t c, readline_loop_stat
|
||||||
case rl::kill_bigword: {
|
case rl::kill_bigword: {
|
||||||
// The "bigword" functions differ only in that they move to the next whitespace, not
|
// The "bigword" functions differ only in that they move to the next whitespace, not
|
||||||
// punctuation.
|
// punctuation.
|
||||||
auto move_style =
|
auto move_style = (c == rl::kill_word) ? move_word_style_t::move_word_style_punctuation
|
||||||
(c == rl::kill_word) ? move_word_style_punctuation : move_word_style_whitespace;
|
: move_word_style_t::move_word_style_whitespace;
|
||||||
move_word(active_edit_line(), MOVE_DIR_RIGHT, true /* erase */, move_style,
|
move_word(active_edit_line(), MOVE_DIR_RIGHT, true /* erase */, move_style,
|
||||||
rls.last_cmd != c /* same kill item if same movement */);
|
rls.last_cmd != c /* same kill item if same movement */);
|
||||||
break;
|
break;
|
||||||
|
@ -3831,8 +3833,9 @@ void reader_data_t::handle_readline_command(readline_cmd_t c, readline_loop_stat
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
auto move_style = (c != rl::backward_bigword) ? move_word_style_punctuation
|
auto move_style = (c != rl::backward_bigword)
|
||||||
: move_word_style_whitespace;
|
? move_word_style_t::move_word_style_punctuation
|
||||||
|
: move_word_style_t::move_word_style_whitespace;
|
||||||
move_word(active_edit_line(), MOVE_DIR_LEFT, false /* do not erase */, move_style,
|
move_word(active_edit_line(), MOVE_DIR_LEFT, false /* do not erase */, move_style,
|
||||||
false);
|
false);
|
||||||
break;
|
break;
|
||||||
|
@ -3849,8 +3852,9 @@ void reader_data_t::handle_readline_command(readline_cmd_t c, readline_loop_stat
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
auto move_style = (c != rl::forward_bigword) ? move_word_style_punctuation
|
auto move_style = (c != rl::forward_bigword)
|
||||||
: move_word_style_whitespace;
|
? move_word_style_t::move_word_style_punctuation
|
||||||
|
: move_word_style_t::move_word_style_whitespace;
|
||||||
editable_line_t *el = active_edit_line();
|
editable_line_t *el = active_edit_line();
|
||||||
if (el->position() < el->size()) {
|
if (el->position() < el->size()) {
|
||||||
move_word(el, MOVE_DIR_RIGHT, false /* do not erase */, move_style, false);
|
move_word(el, MOVE_DIR_RIGHT, false /* do not erase */, move_style, false);
|
||||||
|
@ -4072,7 +4076,8 @@ void reader_data_t::handle_readline_command(readline_cmd_t c, readline_loop_stat
|
||||||
// We apply the operation from the current location to the end of the word.
|
// We apply the operation from the current location to the end of the word.
|
||||||
size_t pos = el->position();
|
size_t pos = el->position();
|
||||||
size_t init_pos = pos;
|
size_t init_pos = pos;
|
||||||
move_word(el, MOVE_DIR_RIGHT, false, move_word_style_punctuation, false);
|
move_word(el, MOVE_DIR_RIGHT, false, move_word_style_t::move_word_style_punctuation,
|
||||||
|
false);
|
||||||
wcstring replacement;
|
wcstring replacement;
|
||||||
for (; pos < el->position(); pos++) {
|
for (; pos < el->position(); pos++) {
|
||||||
wchar_t chr = el->text().at(pos);
|
wchar_t chr = el->text().at(pos);
|
||||||
|
|
|
@ -1,887 +0,0 @@
|
||||||
// A specialized tokenizer for tokenizing the fish language. In the future, the tokenizer should be
|
|
||||||
// extended to support marks, tokenizing multiple strings and disposing of unused string segments.
|
|
||||||
#include "config.h" // IWYU pragma: keep
|
|
||||||
|
|
||||||
#include "tokenizer.h"
|
|
||||||
|
|
||||||
#include <fcntl.h>
|
|
||||||
#include <limits.h>
|
|
||||||
#include <unistd.h>
|
|
||||||
#include <wctype.h>
|
|
||||||
|
|
||||||
#include <cwchar>
|
|
||||||
#include <utility>
|
|
||||||
#include <vector>
|
|
||||||
|
|
||||||
#include "common.h"
|
|
||||||
#include "fallback.h" // IWYU pragma: keep
|
|
||||||
#include "future_feature_flags.h"
|
|
||||||
#include "wutil.h" // IWYU pragma: keep
|
|
||||||
|
|
||||||
// _(s) is already wgettext(s).c_str(), so let's not convert back to wcstring
|
|
||||||
const wchar_t *tokenizer_get_error_message(tokenizer_error_t err) {
|
|
||||||
switch (err) {
|
|
||||||
case tokenizer_error_t::none:
|
|
||||||
return L"";
|
|
||||||
case tokenizer_error_t::unterminated_quote:
|
|
||||||
return _(L"Unexpected end of string, quotes are not balanced");
|
|
||||||
case tokenizer_error_t::unterminated_subshell:
|
|
||||||
return _(L"Unexpected end of string, expecting ')'");
|
|
||||||
case tokenizer_error_t::unterminated_slice:
|
|
||||||
return _(L"Unexpected end of string, square brackets do not match");
|
|
||||||
case tokenizer_error_t::unterminated_escape:
|
|
||||||
return _(L"Unexpected end of string, incomplete escape sequence");
|
|
||||||
case tokenizer_error_t::invalid_redirect:
|
|
||||||
return _(L"Invalid input/output redirection");
|
|
||||||
case tokenizer_error_t::invalid_pipe:
|
|
||||||
return _(L"Cannot use stdin (fd 0) as pipe output");
|
|
||||||
case tokenizer_error_t::invalid_pipe_ampersand:
|
|
||||||
return _(L"|& is not valid. In fish, use &| to pipe both stdout and stderr.");
|
|
||||||
case tokenizer_error_t::closing_unopened_subshell:
|
|
||||||
return _(L"Unexpected ')' for unopened parenthesis");
|
|
||||||
case tokenizer_error_t::illegal_slice:
|
|
||||||
return _(L"Unexpected '[' at this location");
|
|
||||||
case tokenizer_error_t::closing_unopened_brace:
|
|
||||||
return _(L"Unexpected '}' for unopened brace expansion");
|
|
||||||
case tokenizer_error_t::unterminated_brace:
|
|
||||||
return _(L"Unexpected end of string, incomplete parameter expansion");
|
|
||||||
case tokenizer_error_t::expected_pclose_found_bclose:
|
|
||||||
return _(L"Unexpected '}' found, expecting ')'");
|
|
||||||
case tokenizer_error_t::expected_bclose_found_pclose:
|
|
||||||
return _(L"Unexpected ')' found, expecting '}'");
|
|
||||||
}
|
|
||||||
assert(0 && "Unexpected tokenizer error");
|
|
||||||
return nullptr;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Return an error token and mark that we no longer have a next token.
|
|
||||||
tok_t tokenizer_t::call_error(tokenizer_error_t error_type, const wchar_t *token_start,
|
|
||||||
const wchar_t *error_loc, maybe_t<size_t> token_length,
|
|
||||||
size_t error_len) {
|
|
||||||
assert(error_type != tokenizer_error_t::none && "tokenizer_error_t::none passed to call_error");
|
|
||||||
assert(error_loc >= token_start && "Invalid error location");
|
|
||||||
assert(this->token_cursor >= token_start && "Invalid buff location");
|
|
||||||
|
|
||||||
// If continue_after_error is set and we have a real token length, then skip past it.
|
|
||||||
// Otherwise give up.
|
|
||||||
if (token_length.has_value() && continue_after_error) {
|
|
||||||
assert(this->token_cursor < error_loc + *token_length && "Unable to continue past error");
|
|
||||||
this->token_cursor = error_loc + *token_length;
|
|
||||||
} else {
|
|
||||||
this->has_next = false;
|
|
||||||
}
|
|
||||||
|
|
||||||
tok_t result{token_type_t::error};
|
|
||||||
result.error = error_type;
|
|
||||||
result.offset = token_start - this->start;
|
|
||||||
// If we are passed a token_length, then use it; otherwise infer it from the buffer.
|
|
||||||
result.length = token_length.has_value() ? *token_length : this->token_cursor - token_start;
|
|
||||||
result.error_offset_within_token = error_loc - token_start;
|
|
||||||
result.error_length = error_len;
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
|
|
||||||
tokenizer_t::tokenizer_t(const wchar_t *start, tok_flags_t flags)
|
|
||||||
: token_cursor(start), start(start) {
|
|
||||||
assert(start != nullptr && "Invalid start");
|
|
||||||
|
|
||||||
this->accept_unfinished = static_cast<bool>(flags & TOK_ACCEPT_UNFINISHED);
|
|
||||||
this->show_comments = static_cast<bool>(flags & TOK_SHOW_COMMENTS);
|
|
||||||
this->show_blank_lines = static_cast<bool>(flags & TOK_SHOW_BLANK_LINES);
|
|
||||||
this->continue_after_error = static_cast<bool>(flags & TOK_CONTINUE_AFTER_ERROR);
|
|
||||||
}
|
|
||||||
|
|
||||||
tok_t::tok_t(token_type_t type) : type(type) {}
|
|
||||||
|
|
||||||
/// Tests if this character can be a part of a string. Hash (#) starts a comment if it's the first
|
|
||||||
/// character in a token; otherwise it is considered a string character. See issue #953.
|
|
||||||
static bool tok_is_string_character(wchar_t c, maybe_t<wchar_t> next) {
|
|
||||||
switch (c) {
|
|
||||||
case L'\0':
|
|
||||||
case L' ':
|
|
||||||
case L'\n':
|
|
||||||
case L'|':
|
|
||||||
case L'\t':
|
|
||||||
case L';':
|
|
||||||
case L'\r':
|
|
||||||
case L'<':
|
|
||||||
case L'>': {
|
|
||||||
// Unconditional separators.
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
case L'&': {
|
|
||||||
if (!feature_test(feature_flag_t::ampersand_nobg_in_token)) return false;
|
|
||||||
bool next_is_string = next.has_value() && tok_is_string_character(*next, none());
|
|
||||||
// Unlike in other shells, '&' is not special if followed by a string character.
|
|
||||||
return next_is_string;
|
|
||||||
}
|
|
||||||
default: {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Quick test to catch the most common 'non-magical' characters, makes read_string slightly faster
|
|
||||||
/// by adding a fast path for the most common characters. This is obviously not a suitable
|
|
||||||
/// replacement for iswalpha.
|
|
||||||
static inline int myal(wchar_t c) { return (c >= L'a' && c <= L'z') || (c >= L'A' && c <= L'Z'); }
|
|
||||||
|
|
||||||
namespace tok_modes {
|
|
||||||
enum {
|
|
||||||
regular_text = 0, // regular text
|
|
||||||
subshell = 1 << 0, // inside of subshell parentheses
|
|
||||||
array_brackets = 1 << 1, // inside of array brackets
|
|
||||||
curly_braces = 1 << 2,
|
|
||||||
char_escape = 1 << 3,
|
|
||||||
};
|
|
||||||
} // namespace tok_modes
|
|
||||||
using tok_mode_t = uint32_t;
|
|
||||||
|
|
||||||
/// Read the next token as a string.
|
|
||||||
tok_t tokenizer_t::read_string() {
|
|
||||||
tok_mode_t mode{tok_modes::regular_text};
|
|
||||||
std::vector<int> paran_offsets;
|
|
||||||
std::vector<int> brace_offsets;
|
|
||||||
std::vector<char> expecting;
|
|
||||||
std::vector<size_t> quoted_cmdsubs;
|
|
||||||
int slice_offset = 0;
|
|
||||||
const wchar_t *const buff_start = this->token_cursor;
|
|
||||||
bool is_token_begin = true;
|
|
||||||
|
|
||||||
auto process_opening_quote = [&](wchar_t quote) -> const wchar_t * {
|
|
||||||
const wchar_t *end = quote_end(this->token_cursor, quote);
|
|
||||||
if (end) {
|
|
||||||
if (*end == L'$') quoted_cmdsubs.push_back(paran_offsets.size());
|
|
||||||
this->token_cursor = end;
|
|
||||||
return nullptr;
|
|
||||||
} else {
|
|
||||||
const wchar_t *error_loc = this->token_cursor;
|
|
||||||
this->token_cursor += std::wcslen(this->token_cursor);
|
|
||||||
return error_loc;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
while (true) {
|
|
||||||
wchar_t c = *this->token_cursor;
|
|
||||||
#if false
|
|
||||||
wcstring msg = L"Handling 0x%x (%lc)";
|
|
||||||
tok_mode mode_begin = mode;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
if (c == L'\0') {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Make sure this character isn't being escaped before anything else
|
|
||||||
if ((mode & tok_modes::char_escape) == tok_modes::char_escape) {
|
|
||||||
mode &= ~(tok_modes::char_escape);
|
|
||||||
// and do nothing more
|
|
||||||
} else if (myal(c)) {
|
|
||||||
// Early exit optimization in case the character is just a letter,
|
|
||||||
// which has no special meaning to the tokenizer, i.e. the same mode continues.
|
|
||||||
}
|
|
||||||
|
|
||||||
// Now proceed with the evaluation of the token, first checking to see if the token
|
|
||||||
// has been explicitly ignored (escaped).
|
|
||||||
else if (c == L'\\') {
|
|
||||||
mode |= tok_modes::char_escape;
|
|
||||||
} else if (c == L'#' && is_token_begin) {
|
|
||||||
this->token_cursor = comment_end(this->token_cursor) - 1;
|
|
||||||
} else if (c == L'(') {
|
|
||||||
paran_offsets.push_back(this->token_cursor - this->start);
|
|
||||||
expecting.push_back(L')');
|
|
||||||
mode |= tok_modes::subshell;
|
|
||||||
} else if (c == L'{') {
|
|
||||||
brace_offsets.push_back(this->token_cursor - this->start);
|
|
||||||
expecting.push_back(L'}');
|
|
||||||
mode |= tok_modes::curly_braces;
|
|
||||||
} else if (c == L')') {
|
|
||||||
if (!expecting.empty() && expecting.back() == L'}') {
|
|
||||||
return this->call_error(tokenizer_error_t::expected_bclose_found_pclose,
|
|
||||||
this->token_cursor, this->token_cursor, 1, 1);
|
|
||||||
}
|
|
||||||
if (paran_offsets.empty()) {
|
|
||||||
return this->call_error(tokenizer_error_t::closing_unopened_subshell,
|
|
||||||
this->token_cursor, this->token_cursor, 1, 1);
|
|
||||||
}
|
|
||||||
paran_offsets.pop_back();
|
|
||||||
if (paran_offsets.empty()) {
|
|
||||||
mode &= ~(tok_modes::subshell);
|
|
||||||
}
|
|
||||||
expecting.pop_back();
|
|
||||||
// Check if the ) completed a quoted command substitution.
|
|
||||||
if (!quoted_cmdsubs.empty() && quoted_cmdsubs.back() == paran_offsets.size()) {
|
|
||||||
quoted_cmdsubs.pop_back();
|
|
||||||
// The "$(" part of a quoted command substitution closes double quotes. To keep
|
|
||||||
// quotes balanced, act as if there was an invisible double quote after the ")".
|
|
||||||
if (const wchar_t *error_loc = process_opening_quote(L'"')) {
|
|
||||||
if (!this->accept_unfinished) {
|
|
||||||
return this->call_error(tokenizer_error_t::unterminated_quote, buff_start,
|
|
||||||
error_loc);
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else if (c == L'}') {
|
|
||||||
if (!expecting.empty() && expecting.back() == L')') {
|
|
||||||
return this->call_error(tokenizer_error_t::expected_pclose_found_bclose,
|
|
||||||
this->token_cursor, this->token_cursor, 1, 1);
|
|
||||||
}
|
|
||||||
if (brace_offsets.empty()) {
|
|
||||||
return this->call_error(tokenizer_error_t::closing_unopened_brace,
|
|
||||||
this->token_cursor,
|
|
||||||
this->token_cursor + wcslen(this->token_cursor));
|
|
||||||
}
|
|
||||||
brace_offsets.pop_back();
|
|
||||||
if (brace_offsets.empty()) {
|
|
||||||
mode &= ~(tok_modes::curly_braces);
|
|
||||||
}
|
|
||||||
expecting.pop_back();
|
|
||||||
} else if (c == L'[') {
|
|
||||||
if (this->token_cursor != buff_start) {
|
|
||||||
mode |= tok_modes::array_brackets;
|
|
||||||
slice_offset = this->token_cursor - this->start;
|
|
||||||
} else {
|
|
||||||
// This is actually allowed so the test operator `[` can be used as the head of a
|
|
||||||
// command
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Only exit bracket mode if we are in bracket mode.
|
|
||||||
// Reason: `]` can be a parameter, e.g. last parameter to `[` test alias.
|
|
||||||
// e.g. echo $argv[([ $x -eq $y ])] # must not end bracket mode on first bracket
|
|
||||||
else if (c == L']' && ((mode & tok_modes::array_brackets) == tok_modes::array_brackets)) {
|
|
||||||
mode &= ~(tok_modes::array_brackets);
|
|
||||||
} else if (c == L'\'' || c == L'"') {
|
|
||||||
if (const wchar_t *error_loc = process_opening_quote(c)) {
|
|
||||||
if (!this->accept_unfinished) {
|
|
||||||
return this->call_error(tokenizer_error_t::unterminated_quote, buff_start,
|
|
||||||
error_loc, none(), 1);
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
} else if (mode == tok_modes::regular_text &&
|
|
||||||
!tok_is_string_character(c, this->token_cursor[1])) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
#if false
|
|
||||||
if (mode != mode_begin) {
|
|
||||||
msg.append(L": mode 0x%x -> 0x%x\n");
|
|
||||||
} else {
|
|
||||||
msg.push_back(L'\n');
|
|
||||||
}
|
|
||||||
FLOGF(error, msg.c_str(), c, c, int(mode_begin), int(mode));
|
|
||||||
#endif
|
|
||||||
|
|
||||||
is_token_begin = is_token_delimiter(this->token_cursor[0], this->token_cursor[1]);
|
|
||||||
this->token_cursor++;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!this->accept_unfinished && (mode != tok_modes::regular_text)) {
|
|
||||||
// These are all "unterminated", so the only char we can mark as an error
|
|
||||||
// is the opener (the closing char could be anywhere!)
|
|
||||||
//
|
|
||||||
// (except for char_escape, which is one long by definition)
|
|
||||||
if (mode & tok_modes::char_escape) {
|
|
||||||
return this->call_error(tokenizer_error_t::unterminated_escape, buff_start,
|
|
||||||
this->token_cursor - 1, none(), 1);
|
|
||||||
} else if (mode & tok_modes::array_brackets) {
|
|
||||||
return this->call_error(tokenizer_error_t::unterminated_slice, buff_start,
|
|
||||||
this->start + slice_offset, none(), 1);
|
|
||||||
} else if (mode & tok_modes::subshell) {
|
|
||||||
assert(!paran_offsets.empty());
|
|
||||||
size_t offset_of_open_paran = paran_offsets.back();
|
|
||||||
|
|
||||||
return this->call_error(tokenizer_error_t::unterminated_subshell, buff_start,
|
|
||||||
this->start + offset_of_open_paran, none(), 1);
|
|
||||||
} else if (mode & tok_modes::curly_braces) {
|
|
||||||
assert(!brace_offsets.empty());
|
|
||||||
size_t offset_of_open_brace = brace_offsets.back();
|
|
||||||
|
|
||||||
return this->call_error(tokenizer_error_t::unterminated_brace, buff_start,
|
|
||||||
this->start + offset_of_open_brace, none(), 1);
|
|
||||||
} else {
|
|
||||||
DIE("Unknown non-regular-text mode");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
tok_t result(token_type_t::string);
|
|
||||||
result.offset = buff_start - this->start;
|
|
||||||
result.length = this->token_cursor - buff_start;
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse an fd from the non-empty string [start, end), all of which are digits.
|
|
||||||
// Return the fd, or -1 on overflow.
|
|
||||||
static int parse_fd(const wchar_t *start, const wchar_t *end) {
|
|
||||||
assert(start < end && "String cannot be empty");
|
|
||||||
long long big_fd = 0;
|
|
||||||
for (const wchar_t *cursor = start; cursor < end; ++cursor) {
|
|
||||||
assert(L'0' <= *cursor && *cursor <= L'9' && "Not a digit");
|
|
||||||
big_fd = big_fd * 10 + (*cursor - L'0');
|
|
||||||
if (big_fd > INT_MAX) return -1;
|
|
||||||
}
|
|
||||||
assert(big_fd <= INT_MAX && "big_fd should be in range");
|
|
||||||
return static_cast<int>(big_fd);
|
|
||||||
}
|
|
||||||
|
|
||||||
pipe_or_redir_t::pipe_or_redir_t() = default;
|
|
||||||
|
|
||||||
maybe_t<pipe_or_redir_t> pipe_or_redir_t::from_string(const wchar_t *buff) {
|
|
||||||
pipe_or_redir_t result{};
|
|
||||||
|
|
||||||
/* Examples of supported syntaxes.
|
|
||||||
Note we are only responsible for parsing the redirection part, not 'cmd' or 'file'.
|
|
||||||
|
|
||||||
cmd | cmd normal pipe
|
|
||||||
cmd &| cmd normal pipe plus stderr-merge
|
|
||||||
cmd >| cmd pipe with explicit fd
|
|
||||||
cmd 2>| cmd pipe with explicit fd
|
|
||||||
cmd < file stdin redirection
|
|
||||||
cmd > file redirection
|
|
||||||
cmd >> file appending redirection
|
|
||||||
cmd >? file noclobber redirection
|
|
||||||
cmd >>? file appending noclobber redirection
|
|
||||||
cmd 2> file file redirection with explicit fd
|
|
||||||
cmd >&2 fd redirection with no explicit src fd (stdout is used)
|
|
||||||
cmd 1>&2 fd redirection with an explicit src fd
|
|
||||||
cmd <&2 fd redirection with no explicit src fd (stdin is used)
|
|
||||||
cmd 3<&0 fd redirection with an explicit src fd
|
|
||||||
cmd &> file redirection with stderr merge
|
|
||||||
cmd ^ file caret (stderr) redirection, perhaps disabled via feature flags
|
|
||||||
cmd ^^ file caret (stderr) redirection, perhaps disabled via feature flags
|
|
||||||
*/
|
|
||||||
|
|
||||||
const wchar_t *cursor = buff;
|
|
||||||
|
|
||||||
// Extract a range of leading fd.
|
|
||||||
const wchar_t *fd_start = cursor;
|
|
||||||
while (iswdigit(*cursor)) cursor++;
|
|
||||||
const wchar_t *fd_end = cursor;
|
|
||||||
bool has_fd = (fd_end > fd_start);
|
|
||||||
|
|
||||||
// Try consuming a given character.
|
|
||||||
// Return true if consumed. On success, advances cursor.
|
|
||||||
auto try_consume = [&cursor](wchar_t c) -> bool {
|
|
||||||
if (*cursor != c) return false;
|
|
||||||
cursor++;
|
|
||||||
return true;
|
|
||||||
};
|
|
||||||
|
|
||||||
// Like try_consume, but asserts on failure.
|
|
||||||
auto consume = [&](wchar_t c) {
|
|
||||||
assert(*cursor == c && "Failed to consume char");
|
|
||||||
cursor++;
|
|
||||||
};
|
|
||||||
|
|
||||||
switch (*cursor) {
|
|
||||||
case L'|': {
|
|
||||||
if (has_fd) {
|
|
||||||
// Like 123|
|
|
||||||
return none();
|
|
||||||
}
|
|
||||||
consume(L'|');
|
|
||||||
assert(*cursor != L'|' &&
|
|
||||||
"|| passed as redirection, this should have been handled as 'or' by the caller");
|
|
||||||
result.fd = STDOUT_FILENO;
|
|
||||||
result.is_pipe = true;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case L'>': {
|
|
||||||
consume(L'>');
|
|
||||||
if (try_consume(L'>')) result.mode = redirection_mode_t::append;
|
|
||||||
if (try_consume(L'|')) {
|
|
||||||
// Note we differ from bash here.
|
|
||||||
// Consider `echo foo 2>| bar`
|
|
||||||
// In fish, this is a *pipe*. Run bar as a command and attach foo's stderr to bar's
|
|
||||||
// stdin, while leaving stdout as tty.
|
|
||||||
// In bash, this is a *redirection* to bar as a file. It is like > but ignores
|
|
||||||
// noclobber.
|
|
||||||
result.is_pipe = true;
|
|
||||||
result.fd = has_fd ? parse_fd(fd_start, fd_end) // like 2>|
|
|
||||||
: STDOUT_FILENO; // like >|
|
|
||||||
} else if (try_consume(L'&')) {
|
|
||||||
// This is a redirection to an fd.
|
|
||||||
// Note that we allow ">>&", but it's still just writing to the fd - "appending" to
|
|
||||||
// it doesn't make sense.
|
|
||||||
result.mode = redirection_mode_t::fd;
|
|
||||||
result.fd = has_fd ? parse_fd(fd_start, fd_end) // like 1>&2
|
|
||||||
: STDOUT_FILENO; // like >&2
|
|
||||||
} else {
|
|
||||||
// This is a redirection to a file.
|
|
||||||
result.fd = has_fd ? parse_fd(fd_start, fd_end) // like 1> file.txt
|
|
||||||
: STDOUT_FILENO; // like > file.txt
|
|
||||||
if (result.mode != redirection_mode_t::append)
|
|
||||||
result.mode = redirection_mode_t::overwrite;
|
|
||||||
// Note 'echo abc >>? file' is valid: it means append and noclobber.
|
|
||||||
// But here "noclobber" means the file must not exist, so appending
|
|
||||||
// can be ignored.
|
|
||||||
if (try_consume(L'?')) result.mode = redirection_mode_t::noclob;
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case L'<': {
|
|
||||||
consume(L'<');
|
|
||||||
if (try_consume('&')) {
|
|
||||||
result.mode = redirection_mode_t::fd;
|
|
||||||
} else {
|
|
||||||
result.mode = redirection_mode_t::input;
|
|
||||||
}
|
|
||||||
result.fd = has_fd ? parse_fd(fd_start, fd_end) // like 1<&3 or 1< /tmp/file.txt
|
|
||||||
: STDIN_FILENO; // like <&3 or < /tmp/file.txt
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case L'&': {
|
|
||||||
consume(L'&');
|
|
||||||
if (try_consume(L'|')) {
|
|
||||||
// &| is pipe with stderr merge.
|
|
||||||
result.fd = STDOUT_FILENO;
|
|
||||||
result.is_pipe = true;
|
|
||||||
result.stderr_merge = true;
|
|
||||||
} else if (try_consume(L'>')) {
|
|
||||||
result.fd = STDOUT_FILENO;
|
|
||||||
result.stderr_merge = true;
|
|
||||||
result.mode = redirection_mode_t::overwrite;
|
|
||||||
if (try_consume(L'>')) result.mode = redirection_mode_t::append; // like &>>
|
|
||||||
if (try_consume(L'?'))
|
|
||||||
result.mode = redirection_mode_t::noclob; // like &>? or &>>?
|
|
||||||
} else {
|
|
||||||
return none();
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
default: {
|
|
||||||
// Not a redirection.
|
|
||||||
return none();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
result.consumed = (cursor - buff);
|
|
||||||
assert(result.consumed > 0 && "Should have consumed at least one character on success");
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
|
|
||||||
int pipe_or_redir_t::oflags() const {
|
|
||||||
switch (mode) {
|
|
||||||
case redirection_mode_t::append: {
|
|
||||||
return O_CREAT | O_APPEND | O_WRONLY;
|
|
||||||
}
|
|
||||||
case redirection_mode_t::overwrite: {
|
|
||||||
return O_CREAT | O_WRONLY | O_TRUNC;
|
|
||||||
}
|
|
||||||
case redirection_mode_t::noclob: {
|
|
||||||
return O_CREAT | O_EXCL | O_WRONLY;
|
|
||||||
}
|
|
||||||
case redirection_mode_t::input: {
|
|
||||||
return O_RDONLY;
|
|
||||||
}
|
|
||||||
case redirection_mode_t::fd:
|
|
||||||
default: {
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Test if a character is whitespace. Differs from iswspace in that it does not consider a
|
|
||||||
/// newline to be whitespace.
|
|
||||||
static bool iswspace_not_nl(wchar_t c) {
|
|
||||||
switch (c) {
|
|
||||||
case L' ':
|
|
||||||
case L'\t':
|
|
||||||
case L'\r':
|
|
||||||
return true;
|
|
||||||
case L'\n':
|
|
||||||
return false;
|
|
||||||
default:
|
|
||||||
return iswspace(c);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
maybe_t<tok_t> tokenizer_t::next() {
|
|
||||||
if (!this->has_next) {
|
|
||||||
return none();
|
|
||||||
}
|
|
||||||
|
|
||||||
// Consume non-newline whitespace. If we get an escaped newline, mark it and continue past
|
|
||||||
// it.
|
|
||||||
for (;;) {
|
|
||||||
if (this->token_cursor[0] == L'\\' && this->token_cursor[1] == L'\n') {
|
|
||||||
this->token_cursor += 2;
|
|
||||||
this->continue_line_after_comment = true;
|
|
||||||
} else if (iswspace_not_nl(this->token_cursor[0])) {
|
|
||||||
this->token_cursor++;
|
|
||||||
} else {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
while (*this->token_cursor == L'#') {
|
|
||||||
// We have a comment, walk over the comment.
|
|
||||||
const wchar_t *comment_start = this->token_cursor;
|
|
||||||
this->token_cursor = comment_end(this->token_cursor);
|
|
||||||
size_t comment_len = this->token_cursor - comment_start;
|
|
||||||
|
|
||||||
// If we are going to continue after the comment, skip any trailing newline.
|
|
||||||
if (this->token_cursor[0] == L'\n' && this->continue_line_after_comment)
|
|
||||||
this->token_cursor++;
|
|
||||||
|
|
||||||
// Maybe return the comment.
|
|
||||||
if (this->show_comments) {
|
|
||||||
tok_t result(token_type_t::comment);
|
|
||||||
result.offset = comment_start - this->start;
|
|
||||||
result.length = comment_len;
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
while (iswspace_not_nl(this->token_cursor[0])) this->token_cursor++;
|
|
||||||
}
|
|
||||||
|
|
||||||
// We made it past the comments and ate any trailing newlines we wanted to ignore.
|
|
||||||
this->continue_line_after_comment = false;
|
|
||||||
const size_t start_pos = this->token_cursor - this->start;
|
|
||||||
|
|
||||||
maybe_t<tok_t> result{};
|
|
||||||
switch (*this->token_cursor) {
|
|
||||||
case L'\0': {
|
|
||||||
this->has_next = false;
|
|
||||||
return none();
|
|
||||||
}
|
|
||||||
case L'\r': // carriage-return
|
|
||||||
case L'\n': // newline
|
|
||||||
case L';': {
|
|
||||||
result.emplace(token_type_t::end);
|
|
||||||
result->offset = start_pos;
|
|
||||||
result->length = 1;
|
|
||||||
this->token_cursor++;
|
|
||||||
// Hack: when we get a newline, swallow as many as we can. This compresses multiple
|
|
||||||
// subsequent newlines into a single one.
|
|
||||||
if (!this->show_blank_lines) {
|
|
||||||
while (*this->token_cursor == L'\n' || *this->token_cursor == 13 /* CR */ ||
|
|
||||||
*this->token_cursor == ' ' || *this->token_cursor == '\t') {
|
|
||||||
this->token_cursor++;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case L'&': {
|
|
||||||
if (this->token_cursor[1] == L'&') {
|
|
||||||
// && is and.
|
|
||||||
result.emplace(token_type_t::andand);
|
|
||||||
result->offset = start_pos;
|
|
||||||
result->length = 2;
|
|
||||||
this->token_cursor += 2;
|
|
||||||
} else if (this->token_cursor[1] == L'>' || this->token_cursor[1] == L'|') {
|
|
||||||
// &> and &| redirect both stdout and stderr.
|
|
||||||
auto redir = pipe_or_redir_t::from_string(this->token_cursor);
|
|
||||||
assert(redir.has_value() &&
|
|
||||||
"Should always succeed to parse a &> or &| redirection");
|
|
||||||
result.emplace(redir->token_type());
|
|
||||||
result->offset = start_pos;
|
|
||||||
result->length = redir->consumed;
|
|
||||||
this->token_cursor += redir->consumed;
|
|
||||||
} else {
|
|
||||||
result.emplace(token_type_t::background);
|
|
||||||
result->offset = start_pos;
|
|
||||||
result->length = 1;
|
|
||||||
this->token_cursor++;
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case L'|': {
|
|
||||||
if (this->token_cursor[1] == L'|') {
|
|
||||||
// || is or.
|
|
||||||
result.emplace(token_type_t::oror);
|
|
||||||
result->offset = start_pos;
|
|
||||||
result->length = 2;
|
|
||||||
this->token_cursor += 2;
|
|
||||||
} else if (this->token_cursor[1] == L'&') {
|
|
||||||
// |& is a bashism; in fish it's &|.
|
|
||||||
return this->call_error(tokenizer_error_t::invalid_pipe_ampersand,
|
|
||||||
this->token_cursor, this->token_cursor, 2, 2);
|
|
||||||
} else {
|
|
||||||
auto pipe = pipe_or_redir_t::from_string(this->token_cursor);
|
|
||||||
assert(pipe.has_value() && pipe->is_pipe &&
|
|
||||||
"Should always succeed to parse a | pipe");
|
|
||||||
result.emplace(pipe->token_type());
|
|
||||||
result->offset = start_pos;
|
|
||||||
result->length = pipe->consumed;
|
|
||||||
this->token_cursor += pipe->consumed;
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case L'>':
|
|
||||||
case L'<': {
|
|
||||||
// There's some duplication with the code in the default case below. The key
|
|
||||||
// difference here is that we must never parse these as a string; a failed
|
|
||||||
// redirection is an error!
|
|
||||||
auto redir_or_pipe = pipe_or_redir_t::from_string(this->token_cursor);
|
|
||||||
if (!redir_or_pipe || redir_or_pipe->fd < 0) {
|
|
||||||
return this->call_error(tokenizer_error_t::invalid_redirect, this->token_cursor,
|
|
||||||
this->token_cursor,
|
|
||||||
redir_or_pipe ? redir_or_pipe->consumed : 0,
|
|
||||||
redir_or_pipe ? redir_or_pipe->consumed : 0);
|
|
||||||
}
|
|
||||||
result.emplace(redir_or_pipe->token_type());
|
|
||||||
result->offset = start_pos;
|
|
||||||
result->length = redir_or_pipe->consumed;
|
|
||||||
this->token_cursor += redir_or_pipe->consumed;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
default: {
|
|
||||||
// Maybe a redirection like '2>&1', maybe a pipe like 2>|, maybe just a string.
|
|
||||||
const wchar_t *error_location = this->token_cursor;
|
|
||||||
maybe_t<pipe_or_redir_t> redir_or_pipe{};
|
|
||||||
if (iswdigit(*this->token_cursor)) {
|
|
||||||
redir_or_pipe = pipe_or_redir_t::from_string(this->token_cursor);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (redir_or_pipe) {
|
|
||||||
// It looks like a redirection or a pipe. But we don't support piping fd 0. Note
|
|
||||||
// that fd 0 may be -1, indicating overflow; but we don't treat that as a
|
|
||||||
// tokenizer error.
|
|
||||||
if (redir_or_pipe->is_pipe && redir_or_pipe->fd == 0) {
|
|
||||||
return this->call_error(tokenizer_error_t::invalid_pipe, error_location,
|
|
||||||
error_location, redir_or_pipe->consumed,
|
|
||||||
redir_or_pipe->consumed);
|
|
||||||
}
|
|
||||||
result.emplace(redir_or_pipe->token_type());
|
|
||||||
result->offset = start_pos;
|
|
||||||
result->length = redir_or_pipe->consumed;
|
|
||||||
this->token_cursor += redir_or_pipe->consumed;
|
|
||||||
} else {
|
|
||||||
// Not a redirection or pipe, so just a string.
|
|
||||||
result = this->read_string();
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
assert(result.has_value() && "Should have a token");
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool is_token_delimiter(wchar_t c, maybe_t<wchar_t> next) {
|
|
||||||
return c == L'(' || !tok_is_string_character(c, std::move(next));
|
|
||||||
}
|
|
||||||
|
|
||||||
wcstring tok_command(const wcstring &str) {
|
|
||||||
tokenizer_t t(str.c_str(), 0);
|
|
||||||
while (auto token = t.next()) {
|
|
||||||
if (token->type != token_type_t::string) {
|
|
||||||
return {};
|
|
||||||
}
|
|
||||||
wcstring text = t.text_of(*token);
|
|
||||||
if (variable_assignment_equals_pos(text)) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
return text;
|
|
||||||
}
|
|
||||||
return {};
|
|
||||||
}
|
|
||||||
|
|
||||||
bool move_word_state_machine_t::consume_char_punctuation(wchar_t c) {
|
|
||||||
enum { s_always_one = 0, s_rest, s_whitespace_rest, s_whitespace, s_alphanumeric, s_end };
|
|
||||||
|
|
||||||
bool consumed = false;
|
|
||||||
while (state != s_end && !consumed) {
|
|
||||||
switch (state) {
|
|
||||||
case s_always_one: {
|
|
||||||
// Always consume the first character.
|
|
||||||
consumed = true;
|
|
||||||
if (iswspace(c)) {
|
|
||||||
state = s_whitespace;
|
|
||||||
} else if (iswalnum(c)) {
|
|
||||||
state = s_alphanumeric;
|
|
||||||
} else {
|
|
||||||
// Don't allow switching type (ws->nonws) after non-whitespace and
|
|
||||||
// non-alphanumeric.
|
|
||||||
state = s_rest;
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case s_rest: {
|
|
||||||
if (iswspace(c)) {
|
|
||||||
// Consume only trailing whitespace.
|
|
||||||
state = s_whitespace_rest;
|
|
||||||
} else if (iswalnum(c)) {
|
|
||||||
// Consume only alnums.
|
|
||||||
state = s_alphanumeric;
|
|
||||||
} else {
|
|
||||||
consumed = false;
|
|
||||||
state = s_end;
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case s_whitespace_rest:
|
|
||||||
case s_whitespace: {
|
|
||||||
// "whitespace" consumes whitespace and switches to alnums,
|
|
||||||
// "whitespace_rest" only consumes whitespace.
|
|
||||||
if (iswspace(c)) {
|
|
||||||
// Consumed whitespace.
|
|
||||||
consumed = true;
|
|
||||||
} else {
|
|
||||||
state = state == s_whitespace ? s_alphanumeric : s_end;
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case s_alphanumeric: {
|
|
||||||
if (iswalnum(c)) {
|
|
||||||
consumed = true; // consumed alphanumeric
|
|
||||||
} else {
|
|
||||||
state = s_end;
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case s_end:
|
|
||||||
default: {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return consumed;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool move_word_state_machine_t::is_path_component_character(wchar_t c) {
|
|
||||||
return tok_is_string_character(c, none()) && !std::wcschr(L"/={,}'\":@", c);
|
|
||||||
}
|
|
||||||
|
|
||||||
bool move_word_state_machine_t::consume_char_path_components(wchar_t c) {
|
|
||||||
enum {
|
|
||||||
s_initial_punctuation,
|
|
||||||
s_whitespace,
|
|
||||||
s_separator,
|
|
||||||
s_slash,
|
|
||||||
s_path_component_characters,
|
|
||||||
s_initial_separator,
|
|
||||||
s_end
|
|
||||||
};
|
|
||||||
|
|
||||||
bool consumed = false;
|
|
||||||
while (state != s_end && !consumed) {
|
|
||||||
switch (state) {
|
|
||||||
case s_initial_punctuation: {
|
|
||||||
if (!is_path_component_character(c) && !iswspace(c)) {
|
|
||||||
state = s_initial_separator;
|
|
||||||
} else {
|
|
||||||
if (!is_path_component_character(c)) {
|
|
||||||
consumed = true;
|
|
||||||
}
|
|
||||||
state = s_whitespace;
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case s_whitespace: {
|
|
||||||
if (iswspace(c)) {
|
|
||||||
consumed = true; // consumed whitespace
|
|
||||||
} else if (c == L'/' || is_path_component_character(c)) {
|
|
||||||
state = s_slash; // path component
|
|
||||||
} else {
|
|
||||||
state = s_separator; // path separator
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case s_separator: {
|
|
||||||
if (!iswspace(c) && !is_path_component_character(c)) {
|
|
||||||
consumed = true; // consumed separator
|
|
||||||
} else {
|
|
||||||
state = s_end;
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case s_slash: {
|
|
||||||
if (c == L'/') {
|
|
||||||
consumed = true; // consumed slash
|
|
||||||
} else {
|
|
||||||
state = s_path_component_characters;
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case s_path_component_characters: {
|
|
||||||
if (is_path_component_character(c)) {
|
|
||||||
consumed = true; // consumed string character except slash
|
|
||||||
} else {
|
|
||||||
state = s_end;
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case s_initial_separator: {
|
|
||||||
if (is_path_component_character(c)) {
|
|
||||||
consumed = true;
|
|
||||||
state = s_path_component_characters;
|
|
||||||
} else if (iswspace(c)) {
|
|
||||||
state = s_end;
|
|
||||||
} else {
|
|
||||||
consumed = true;
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case s_end:
|
|
||||||
default: {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return consumed;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool move_word_state_machine_t::consume_char_whitespace(wchar_t c) {
|
|
||||||
// Consume a "word" of printable characters plus any leading whitespace.
|
|
||||||
enum { s_always_one = 0, s_blank, s_graph, s_end };
|
|
||||||
|
|
||||||
bool consumed = false;
|
|
||||||
while (state != s_end && !consumed) {
|
|
||||||
switch (state) {
|
|
||||||
case s_always_one: {
|
|
||||||
consumed = true; // always consume the first character
|
|
||||||
// If it's not whitespace, only consume those from here.
|
|
||||||
if (!iswspace(c)) {
|
|
||||||
state = s_graph;
|
|
||||||
} else {
|
|
||||||
// If it's whitespace, keep consuming whitespace until the graphs.
|
|
||||||
state = s_blank;
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case s_blank: {
|
|
||||||
if (iswspace(c)) {
|
|
||||||
consumed = true; // consumed whitespace
|
|
||||||
} else {
|
|
||||||
state = s_graph;
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case s_graph: {
|
|
||||||
if (!iswspace(c)) {
|
|
||||||
consumed = true; // consumed printable non-space
|
|
||||||
} else {
|
|
||||||
state = s_end;
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case s_end:
|
|
||||||
default: {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return consumed;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool move_word_state_machine_t::consume_char(wchar_t c) {
|
|
||||||
switch (style) {
|
|
||||||
case move_word_style_punctuation: {
|
|
||||||
return consume_char_punctuation(c);
|
|
||||||
}
|
|
||||||
case move_word_style_path_components: {
|
|
||||||
return consume_char_path_components(c);
|
|
||||||
}
|
|
||||||
case move_word_style_whitespace: {
|
|
||||||
return consume_char_whitespace(c);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
DIE("should not reach this statement"); // silence some compiler errors about not returning
|
|
||||||
}
|
|
||||||
|
|
||||||
move_word_state_machine_t::move_word_state_machine_t(move_word_style_t syl)
|
|
||||||
: state(0), style(syl) {}
|
|
||||||
|
|
||||||
void move_word_state_machine_t::reset() { state = 0; }
|
|
202
src/tokenizer.h
202
src/tokenizer.h
|
@ -1,5 +1,3 @@
|
||||||
// A specialized tokenizer for tokenizing the fish language. In the future, the tokenizer should be
|
|
||||||
// extended to support marks, tokenizing multiple strings and disposing of unused string segments.
|
|
||||||
#ifndef FISH_TOKENIZER_H
|
#ifndef FISH_TOKENIZER_H
|
||||||
#define FISH_TOKENIZER_H
|
#define FISH_TOKENIZER_H
|
||||||
|
|
||||||
|
@ -10,39 +8,28 @@
|
||||||
#include "maybe.h"
|
#include "maybe.h"
|
||||||
#include "parse_constants.h"
|
#include "parse_constants.h"
|
||||||
#include "redirection.h"
|
#include "redirection.h"
|
||||||
#if INCLUDE_RUST_HEADERS
|
|
||||||
#include "tokenizer.rs.h"
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/// Token types. XXX Why this isn't parse_token_type_t, I'm not really sure.
|
|
||||||
enum class token_type_t : uint8_t {
|
|
||||||
error, /// Error reading token
|
|
||||||
string, /// String token
|
|
||||||
pipe, /// Pipe token
|
|
||||||
andand, /// && token
|
|
||||||
oror, /// || token
|
|
||||||
end, /// End token (semicolon or newline, not literal end)
|
|
||||||
redirect, /// redirection token
|
|
||||||
background, /// send job to bg token
|
|
||||||
comment, /// comment token
|
|
||||||
};
|
|
||||||
|
|
||||||
/// Flag telling the tokenizer to accept incomplete parameters, i.e. parameters with mismatching
|
|
||||||
/// parenthesis, etc. This is useful for tab-completion.
|
|
||||||
#define TOK_ACCEPT_UNFINISHED 1
|
|
||||||
|
|
||||||
/// Flag telling the tokenizer not to remove comments. Useful for syntax highlighting.
|
|
||||||
#define TOK_SHOW_COMMENTS 2
|
|
||||||
|
|
||||||
/// Ordinarily, the tokenizer ignores newlines following a newline, or a semicolon. This flag tells
|
|
||||||
/// the tokenizer to return each of them as a separate END.
|
|
||||||
#define TOK_SHOW_BLANK_LINES 4
|
|
||||||
|
|
||||||
/// Make an effort to continue after an error.
|
|
||||||
#define TOK_CONTINUE_AFTER_ERROR 8
|
|
||||||
|
|
||||||
using tok_flags_t = unsigned int;
|
using tok_flags_t = unsigned int;
|
||||||
|
|
||||||
|
#define TOK_ACCEPT_UNFINISHED 1
|
||||||
|
#define TOK_SHOW_COMMENTS 2
|
||||||
|
#define TOK_SHOW_BLANK_LINES 4
|
||||||
|
#define TOK_CONTINUE_AFTER_ERROR 8
|
||||||
|
|
||||||
|
#if INCLUDE_RUST_HEADERS
|
||||||
|
|
||||||
|
#include "tokenizer.rs.h"
|
||||||
|
using token_type_t = TokenType;
|
||||||
|
using tokenizer_error_t = TokenizerError;
|
||||||
|
using tok_t = Tok;
|
||||||
|
using tokenizer_t = Tokenizer;
|
||||||
|
using pipe_or_redir_t = PipeOrRedir;
|
||||||
|
using move_word_state_machine_t = MoveWordStateMachine;
|
||||||
|
using move_word_style_t = MoveWordStyle;
|
||||||
|
|
||||||
|
#else
|
||||||
|
|
||||||
|
// Hacks to allow us to compile without Rust headers.
|
||||||
enum class tokenizer_error_t : uint8_t {
|
enum class tokenizer_error_t : uint8_t {
|
||||||
none,
|
none,
|
||||||
unterminated_quote,
|
unterminated_quote,
|
||||||
|
@ -60,155 +47,6 @@ enum class tokenizer_error_t : uint8_t {
|
||||||
expected_bclose_found_pclose,
|
expected_bclose_found_pclose,
|
||||||
};
|
};
|
||||||
|
|
||||||
/// Get the error message for an error \p err.
|
#endif
|
||||||
const wchar_t *tokenizer_get_error_message(tokenizer_error_t err);
|
|
||||||
|
|
||||||
struct tok_t {
|
|
||||||
// Offset of the token.
|
|
||||||
source_offset_t offset{0};
|
|
||||||
// Length of the token.
|
|
||||||
source_offset_t length{0};
|
|
||||||
|
|
||||||
// If an error, this is the offset of the error within the token. A value of 0 means it occurred
|
|
||||||
// at 'offset'.
|
|
||||||
source_offset_t error_offset_within_token{SOURCE_OFFSET_INVALID};
|
|
||||||
source_offset_t error_length{0};
|
|
||||||
|
|
||||||
// If an error, this is the error code.
|
|
||||||
tokenizer_error_t error{tokenizer_error_t::none};
|
|
||||||
|
|
||||||
// The type of the token.
|
|
||||||
token_type_t type;
|
|
||||||
|
|
||||||
// Construct from a token type.
|
|
||||||
explicit tok_t(token_type_t type);
|
|
||||||
|
|
||||||
/// Returns whether the given location is within the source range or at its end.
|
|
||||||
bool location_in_or_at_end_of_source_range(size_t loc) const {
|
|
||||||
return offset <= loc && loc - offset <= length;
|
|
||||||
}
|
|
||||||
/// Gets source for the token, or the empty string if it has no source.
|
|
||||||
wcstring get_source(const wcstring &str) const { return wcstring(str, offset, length); }
|
|
||||||
};
|
|
||||||
static_assert(sizeof(tok_t) <= 32, "tok_t expected to be 32 bytes or less");
|
|
||||||
|
|
||||||
/// The tokenizer struct.
|
|
||||||
class tokenizer_t : noncopyable_t {
|
|
||||||
/// A pointer into the original string, showing where the next token begins.
|
|
||||||
const wchar_t *token_cursor;
|
|
||||||
/// The start of the original string.
|
|
||||||
const wchar_t *const start;
|
|
||||||
/// Whether we have additional tokens.
|
|
||||||
bool has_next{true};
|
|
||||||
/// Whether incomplete tokens are accepted.
|
|
||||||
bool accept_unfinished{false};
|
|
||||||
/// Whether comments should be returned.
|
|
||||||
bool show_comments{false};
|
|
||||||
/// Whether all blank lines are returned.
|
|
||||||
bool show_blank_lines{false};
|
|
||||||
/// Whether to attempt to continue after an error.
|
|
||||||
bool continue_after_error{false};
|
|
||||||
/// Whether to continue the previous line after the comment.
|
|
||||||
bool continue_line_after_comment{false};
|
|
||||||
|
|
||||||
tok_t call_error(tokenizer_error_t error_type, const wchar_t *token_start,
|
|
||||||
const wchar_t *error_loc, maybe_t<size_t> token_length = {},
|
|
||||||
size_t error_len = 0);
|
|
||||||
tok_t read_string();
|
|
||||||
|
|
||||||
public:
|
|
||||||
/// Constructor for a tokenizer. b is the string that is to be tokenized. It is not copied, and
|
|
||||||
/// should not be freed by the caller until after the tokenizer is destroyed.
|
|
||||||
///
|
|
||||||
/// \param b The string to tokenize
|
|
||||||
/// \param flags Flags to the tokenizer. Setting TOK_ACCEPT_UNFINISHED will cause the tokenizer
|
|
||||||
/// to accept incomplete tokens, such as a subshell without a closing parenthesis, as a valid
|
|
||||||
/// token. Setting TOK_SHOW_COMMENTS will return comments as tokens
|
|
||||||
tokenizer_t(const wchar_t *start, tok_flags_t flags);
|
|
||||||
|
|
||||||
/// Returns the next token, or none() if we are at the end.
|
|
||||||
maybe_t<tok_t> next();
|
|
||||||
|
|
||||||
/// Returns the text of a token, as a string.
|
|
||||||
wcstring text_of(const tok_t &tok) const { return wcstring(start + tok.offset, tok.length); }
|
|
||||||
|
|
||||||
/// Copies a token's text into a string. This is useful for reusing storage.
|
|
||||||
/// Returns a reference to the string.
|
|
||||||
const wcstring ©_text_of(const tok_t &tok, wcstring *result) {
|
|
||||||
return result->assign(start + tok.offset, tok.length);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
/// Tests if this character can delimit tokens.
|
|
||||||
bool is_token_delimiter(wchar_t c, maybe_t<wchar_t> next);
|
|
||||||
|
|
||||||
/// \return the first token from the string, skipping variable assignments like A=B.
|
|
||||||
wcstring tok_command(const wcstring &str);
|
|
||||||
|
|
||||||
/// Struct wrapping up a parsed pipe or redirection.
|
|
||||||
struct pipe_or_redir_t {
|
|
||||||
// The redirected fd, or -1 on overflow.
|
|
||||||
// In the common case of a pipe, this is 1 (STDOUT_FILENO).
|
|
||||||
// For example, in the case of "3>&1" this will be 3.
|
|
||||||
int fd{-1};
|
|
||||||
|
|
||||||
// Whether we are a pipe (true) or redirection (false).
|
|
||||||
bool is_pipe{false};
|
|
||||||
|
|
||||||
// The redirection mode if the type is redirect.
|
|
||||||
// Ignored for pipes.
|
|
||||||
redirection_mode_t mode{redirection_mode_t::overwrite};
|
|
||||||
|
|
||||||
// Whether, in addition to this redirection, stderr should also be dup'd to stdout
|
|
||||||
// For example &| or &>
|
|
||||||
bool stderr_merge{false};
|
|
||||||
|
|
||||||
// Number of characters consumed when parsing the string.
|
|
||||||
size_t consumed{0};
|
|
||||||
|
|
||||||
// Construct from a string.
|
|
||||||
static maybe_t<pipe_or_redir_t> from_string(const wchar_t *buff);
|
|
||||||
static maybe_t<pipe_or_redir_t> from_string(const wcstring &buff) {
|
|
||||||
return from_string(buff.c_str());
|
|
||||||
}
|
|
||||||
|
|
||||||
// \return the oflags (as in open(2)) for this redirection.
|
|
||||||
int oflags() const;
|
|
||||||
|
|
||||||
// \return if we are "valid". Here "valid" means only that the source fd did not overflow.
|
|
||||||
// For example 99999999999> is invalid.
|
|
||||||
bool is_valid() const { return fd >= 0; }
|
|
||||||
|
|
||||||
// \return the token type for this redirection.
|
|
||||||
token_type_t token_type() const {
|
|
||||||
return is_pipe ? token_type_t::pipe : token_type_t::redirect;
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
|
||||||
pipe_or_redir_t();
|
|
||||||
};
|
|
||||||
|
|
||||||
enum move_word_style_t {
|
|
||||||
move_word_style_punctuation, // stop at punctuation
|
|
||||||
move_word_style_path_components, // stops at path components
|
|
||||||
move_word_style_whitespace // stops at whitespace
|
|
||||||
};
|
|
||||||
|
|
||||||
/// Our state machine that implements "one word" movement or erasure.
|
|
||||||
class move_word_state_machine_t {
|
|
||||||
private:
|
|
||||||
bool consume_char_punctuation(wchar_t c);
|
|
||||||
bool consume_char_path_components(wchar_t c);
|
|
||||||
bool is_path_component_character(wchar_t c);
|
|
||||||
bool consume_char_whitespace(wchar_t c);
|
|
||||||
|
|
||||||
int state;
|
|
||||||
move_word_style_t style;
|
|
||||||
|
|
||||||
public:
|
|
||||||
explicit move_word_state_machine_t(move_word_style_t syl);
|
|
||||||
bool consume_char(wchar_t c);
|
|
||||||
void reset();
|
|
||||||
};
|
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
Loading…
Reference in a new issue