2016-05-02 23:09:46 +00:00
|
|
|
// Programmatic representation of fish code.
|
2016-05-18 22:30:21 +00:00
|
|
|
#include "config.h" // IWYU pragma: keep
|
|
|
|
|
2019-10-13 22:50:48 +00:00
|
|
|
#include "parse_tree.h"
|
|
|
|
|
2015-07-25 15:14:25 +00:00
|
|
|
#include <stdarg.h>
|
|
|
|
#include <stddef.h>
|
|
|
|
#include <stdio.h>
|
2017-02-11 02:47:02 +00:00
|
|
|
|
2016-04-21 06:00:54 +00:00
|
|
|
#include <algorithm>
|
2019-10-13 22:50:48 +00:00
|
|
|
#include <cwchar>
|
2016-05-02 23:09:46 +00:00
|
|
|
#include <string>
|
2017-02-11 02:47:02 +00:00
|
|
|
#include <type_traits>
|
2016-05-02 23:09:46 +00:00
|
|
|
#include <vector>
|
2016-04-21 06:00:54 +00:00
|
|
|
|
2020-07-02 21:51:45 +00:00
|
|
|
#include "ast.h"
|
2015-07-25 15:14:25 +00:00
|
|
|
#include "common.h"
|
2016-06-24 05:44:58 +00:00
|
|
|
#include "fallback.h"
|
2019-05-27 22:56:53 +00:00
|
|
|
#include "flog.h"
|
2015-07-25 15:14:25 +00:00
|
|
|
#include "parse_constants.h"
|
2019-10-13 23:06:16 +00:00
|
|
|
#include "parse_tree.h"
|
2016-05-02 23:09:46 +00:00
|
|
|
#include "proc.h"
|
2013-06-09 02:20:26 +00:00
|
|
|
#include "tokenizer.h"
|
2016-06-24 05:44:58 +00:00
|
|
|
#include "wutil.h" // IWYU pragma: keep
|
2013-06-02 05:14:47 +00:00
|
|
|
|
2020-06-20 22:27:10 +00:00
|
|
|
parse_error_code_t parse_error_from_tokenizer_error(tokenizer_error_t err) {
|
2018-09-28 01:25:49 +00:00
|
|
|
switch (err) {
|
|
|
|
case tokenizer_error_t::none:
|
|
|
|
return parse_error_none;
|
|
|
|
case tokenizer_error_t::unterminated_quote:
|
|
|
|
return parse_error_tokenizer_unterminated_quote;
|
|
|
|
case tokenizer_error_t::unterminated_subshell:
|
|
|
|
return parse_error_tokenizer_unterminated_subshell;
|
|
|
|
case tokenizer_error_t::unterminated_slice:
|
|
|
|
return parse_error_tokenizer_unterminated_slice;
|
|
|
|
case tokenizer_error_t::unterminated_escape:
|
|
|
|
return parse_error_tokenizer_unterminated_escape;
|
|
|
|
default:
|
|
|
|
return parse_error_tokenizer_other;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-02 23:09:46 +00:00
|
|
|
/// Returns a string description of this parse error.
|
|
|
|
wcstring parse_error_t::describe_with_prefix(const wcstring &src, const wcstring &prefix,
|
|
|
|
bool is_interactive, bool skip_caret) const {
|
2017-07-19 03:50:54 +00:00
|
|
|
if (skip_caret && this->text.empty()) return L"";
|
|
|
|
|
2017-04-30 04:33:50 +00:00
|
|
|
wcstring result = prefix;
|
2019-11-25 11:47:33 +00:00
|
|
|
switch (code) {
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
case parse_error_andor_in_pipeline:
|
|
|
|
append_format(result, EXEC_ERR_MSG,
|
|
|
|
src.substr(this->source_start, this->source_length).c_str());
|
|
|
|
return result;
|
|
|
|
case parse_error_bare_variable_assignment: {
|
|
|
|
wcstring assignment_src = src.substr(this->source_start, this->source_length);
|
|
|
|
maybe_t<size_t> equals_pos = variable_assignment_equals_pos(assignment_src);
|
|
|
|
assert(equals_pos);
|
|
|
|
wcstring variable = assignment_src.substr(0, *equals_pos);
|
|
|
|
wcstring value = assignment_src.substr(*equals_pos + 1);
|
|
|
|
append_format(result, ERROR_BAD_COMMAND_ASSIGN_ERR_MSG, variable.c_str(),
|
|
|
|
value.c_str());
|
|
|
|
return result;
|
|
|
|
}
|
2019-11-25 08:19:53 +00:00
|
|
|
}
|
2017-04-30 04:33:50 +00:00
|
|
|
result.append(this->text);
|
2017-06-18 05:36:56 +00:00
|
|
|
if (skip_caret || source_start >= src.size() || source_start + source_length > src.size()) {
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2016-10-30 03:51:03 +00:00
|
|
|
// Locate the beginning of this line of source.
|
|
|
|
size_t line_start = 0;
|
|
|
|
|
|
|
|
// Look for a newline prior to source_start. If we don't find one, start at the beginning of
|
|
|
|
// the string; otherwise start one past the newline. Note that source_start may itself point
|
|
|
|
// at a newline; we want to find the newline before it.
|
|
|
|
if (source_start > 0) {
|
|
|
|
size_t newline = src.find_last_of(L'\n', source_start - 1);
|
|
|
|
if (newline != wcstring::npos) {
|
|
|
|
line_start = newline + 1;
|
2013-06-15 22:21:35 +00:00
|
|
|
}
|
2016-10-30 03:51:03 +00:00
|
|
|
}
|
2014-01-15 09:40:40 +00:00
|
|
|
|
2016-10-30 03:51:03 +00:00
|
|
|
// Look for the newline after the source range. If the source range itself includes a
|
|
|
|
// newline, that's the one we want, so start just before the end of the range.
|
|
|
|
size_t last_char_in_range =
|
|
|
|
(source_length == 0 ? source_start : source_start + source_length - 1);
|
|
|
|
size_t line_end = src.find(L'\n', last_char_in_range);
|
|
|
|
if (line_end == wcstring::npos) {
|
|
|
|
line_end = src.size();
|
|
|
|
}
|
2013-07-23 01:26:15 +00:00
|
|
|
|
2016-10-30 03:51:03 +00:00
|
|
|
assert(line_end >= line_start);
|
|
|
|
assert(source_start >= line_start);
|
2014-01-15 09:40:40 +00:00
|
|
|
|
2019-10-27 22:44:08 +00:00
|
|
|
// Don't include the caret and line if we're interactive and this is the first line, because
|
2016-10-30 03:51:03 +00:00
|
|
|
// then it's obvious.
|
|
|
|
bool interactive_skip_caret = is_interactive && source_start == 0;
|
|
|
|
if (interactive_skip_caret) {
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Append the line of text.
|
2017-06-18 05:36:56 +00:00
|
|
|
if (!result.empty()) result.push_back(L'\n');
|
2016-10-30 03:51:03 +00:00
|
|
|
result.append(src, line_start, line_end - line_start);
|
|
|
|
|
|
|
|
// Append the caret line. The input source may include tabs; for that reason we
|
|
|
|
// construct a "caret line" that has tabs in corresponding positions.
|
|
|
|
wcstring caret_space_line;
|
|
|
|
caret_space_line.reserve(source_start - line_start);
|
2017-03-26 12:38:59 +00:00
|
|
|
for (size_t i = line_start; i < source_start; i++) {
|
|
|
|
wchar_t wc = src.at(i);
|
2016-10-30 03:51:03 +00:00
|
|
|
if (wc == L'\t') {
|
|
|
|
caret_space_line.push_back(L'\t');
|
|
|
|
} else if (wc == L'\n') {
|
|
|
|
// It's possible that the source_start points at a newline itself. In that case,
|
|
|
|
// pretend it's a space. We only expect this to be at the end of the string.
|
|
|
|
caret_space_line.push_back(L' ');
|
|
|
|
} else {
|
|
|
|
int width = fish_wcwidth(wc);
|
|
|
|
if (width > 0) {
|
|
|
|
caret_space_line.append(static_cast<size_t>(width), L' ');
|
2013-12-13 02:18:07 +00:00
|
|
|
}
|
|
|
|
}
|
2013-06-15 22:21:35 +00:00
|
|
|
}
|
2016-10-30 03:51:03 +00:00
|
|
|
result.push_back(L'\n');
|
|
|
|
result.append(caret_space_line);
|
|
|
|
result.push_back(L'^');
|
2013-06-15 22:21:35 +00:00
|
|
|
return result;
|
|
|
|
}
|
2013-06-02 05:14:47 +00:00
|
|
|
|
2019-05-27 21:52:48 +00:00
|
|
|
wcstring parse_error_t::describe(const wcstring &src, bool is_interactive) const {
|
|
|
|
return this->describe_with_prefix(src, wcstring(), is_interactive, false);
|
2014-02-17 22:51:51 +00:00
|
|
|
}
|
|
|
|
|
2016-05-02 23:09:46 +00:00
|
|
|
void parse_error_offset_source_start(parse_error_list_t *errors, size_t amt) {
|
2019-11-19 02:34:50 +00:00
|
|
|
assert(errors != nullptr);
|
2016-05-02 23:09:46 +00:00
|
|
|
if (amt > 0) {
|
2014-03-22 00:13:33 +00:00
|
|
|
size_t i, max = errors->size();
|
2016-05-02 23:09:46 +00:00
|
|
|
for (i = 0; i < max; i++) {
|
2014-03-22 00:13:33 +00:00
|
|
|
parse_error_t *error = &errors->at(i);
|
2016-05-02 23:09:46 +00:00
|
|
|
// Preserve the special meaning of -1 as 'unknown'.
|
|
|
|
if (error->source_start != SOURCE_LOCATION_UNKNOWN) {
|
2014-03-22 00:13:33 +00:00
|
|
|
error->source_start += amt;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-02 23:09:46 +00:00
|
|
|
/// Returns a string description for the given token type.
|
|
|
|
const wchar_t *token_type_description(parse_token_type_t type) {
|
2016-11-10 05:37:49 +00:00
|
|
|
const wchar_t *description = enum_to_str(type, token_enum_map);
|
|
|
|
if (description) return description;
|
2017-01-27 01:47:24 +00:00
|
|
|
return L"unknown_token_type";
|
2013-06-09 02:20:26 +00:00
|
|
|
}
|
|
|
|
|
2016-05-02 23:09:46 +00:00
|
|
|
const wchar_t *keyword_description(parse_keyword_t type) {
|
2016-11-10 05:37:49 +00:00
|
|
|
const wchar_t *keyword = enum_to_str(type, keyword_enum_map);
|
|
|
|
if (keyword) return keyword;
|
2017-01-27 01:47:24 +00:00
|
|
|
return L"unknown_keyword";
|
2013-06-23 09:09:46 +00:00
|
|
|
}
|
|
|
|
|
2020-06-20 22:27:10 +00:00
|
|
|
wcstring token_type_user_presentable_description(parse_token_type_t type, parse_keyword_t keyword) {
|
2020-06-09 22:13:02 +00:00
|
|
|
if (keyword != parse_keyword_t::none) {
|
2016-04-11 02:08:07 +00:00
|
|
|
return format_string(L"keyword '%ls'", keyword_description(keyword));
|
2014-01-01 08:04:02 +00:00
|
|
|
}
|
2014-01-15 09:40:40 +00:00
|
|
|
|
2016-05-02 23:09:46 +00:00
|
|
|
switch (type) {
|
2018-03-01 21:39:39 +00:00
|
|
|
case parse_token_type_string:
|
2014-01-01 08:04:02 +00:00
|
|
|
return L"a string";
|
2018-03-01 21:39:39 +00:00
|
|
|
case parse_token_type_pipe:
|
2014-01-01 08:04:02 +00:00
|
|
|
return L"a pipe";
|
2018-03-01 21:39:39 +00:00
|
|
|
case parse_token_type_redirection:
|
2014-01-01 08:04:02 +00:00
|
|
|
return L"a redirection";
|
2018-03-01 21:39:39 +00:00
|
|
|
case parse_token_type_background:
|
2014-01-01 08:04:02 +00:00
|
|
|
return L"a '&'";
|
2018-03-01 21:39:39 +00:00
|
|
|
case parse_token_type_andand:
|
|
|
|
return L"'&&'";
|
|
|
|
case parse_token_type_oror:
|
|
|
|
return L"'||'";
|
|
|
|
case parse_token_type_end:
|
2014-01-12 23:10:59 +00:00
|
|
|
return L"end of the statement";
|
2018-03-01 21:39:39 +00:00
|
|
|
case parse_token_type_terminate:
|
2014-10-14 07:37:01 +00:00
|
|
|
return L"end of the input";
|
2019-05-05 10:09:25 +00:00
|
|
|
default: {
|
|
|
|
return format_string(L"a %ls", token_type_description(type));
|
|
|
|
}
|
2014-01-12 23:10:59 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-02 23:09:46 +00:00
|
|
|
/// Returns a string description of the given parse token.
|
|
|
|
wcstring parse_token_t::describe() const {
|
2014-01-12 23:10:59 +00:00
|
|
|
wcstring result = token_type_description(type);
|
2020-06-09 22:13:02 +00:00
|
|
|
if (keyword != parse_keyword_t::none) {
|
2016-04-11 02:08:07 +00:00
|
|
|
append_format(result, L" <%ls>", keyword_description(keyword));
|
2014-01-01 08:04:02 +00:00
|
|
|
}
|
2014-01-12 23:10:59 +00:00
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2016-05-02 23:09:46 +00:00
|
|
|
/// A string description appropriate for presentation to the user.
|
|
|
|
wcstring parse_token_t::user_presentable_description() const {
|
2014-01-12 23:10:59 +00:00
|
|
|
return token_type_user_presentable_description(type, keyword);
|
2014-01-01 08:04:02 +00:00
|
|
|
}
|
2013-06-23 09:09:46 +00:00
|
|
|
|
2016-05-02 23:09:46 +00:00
|
|
|
/// Convert from tokenizer_t's token type to a parse_token_t type.
|
|
|
|
static inline parse_token_type_t parse_token_type_from_tokenizer_token(
|
2019-10-13 23:06:16 +00:00
|
|
|
enum token_type_t tokenizer_token_type) {
|
2016-05-02 23:09:46 +00:00
|
|
|
switch (tokenizer_token_type) {
|
2019-10-13 23:06:16 +00:00
|
|
|
case token_type_t::string:
|
2018-03-01 20:56:15 +00:00
|
|
|
return parse_token_type_string;
|
2019-10-13 23:06:16 +00:00
|
|
|
case token_type_t::pipe:
|
2018-03-01 20:56:15 +00:00
|
|
|
return parse_token_type_pipe;
|
2019-10-13 23:06:16 +00:00
|
|
|
case token_type_t::andand:
|
2018-03-01 21:39:39 +00:00
|
|
|
return parse_token_type_andand;
|
2019-10-13 23:06:16 +00:00
|
|
|
case token_type_t::oror:
|
2018-03-01 21:39:39 +00:00
|
|
|
return parse_token_type_oror;
|
2019-10-13 23:06:16 +00:00
|
|
|
case token_type_t::end:
|
2018-03-01 20:56:15 +00:00
|
|
|
return parse_token_type_end;
|
2019-10-13 23:06:16 +00:00
|
|
|
case token_type_t::background:
|
2018-03-01 20:56:15 +00:00
|
|
|
return parse_token_type_background;
|
2019-10-13 23:06:16 +00:00
|
|
|
case token_type_t::redirect:
|
2018-03-01 20:56:15 +00:00
|
|
|
return parse_token_type_redirection;
|
2019-10-13 23:06:16 +00:00
|
|
|
case token_type_t::error:
|
2018-03-01 20:56:15 +00:00
|
|
|
return parse_special_type_tokenizer_error;
|
2019-10-13 23:06:16 +00:00
|
|
|
case token_type_t::comment:
|
2018-03-01 20:56:15 +00:00
|
|
|
return parse_special_type_comment;
|
2013-06-02 05:14:47 +00:00
|
|
|
}
|
2020-04-08 23:56:59 +00:00
|
|
|
FLOGF(error, L"Bad token type %d passed to %s", static_cast<int>(tokenizer_token_type),
|
|
|
|
__FUNCTION__);
|
2018-03-01 20:56:15 +00:00
|
|
|
DIE("bad token type");
|
|
|
|
return token_type_invalid;
|
2013-06-02 05:14:47 +00:00
|
|
|
}
|
|
|
|
|
2016-05-02 23:09:46 +00:00
|
|
|
// Given an expanded string, returns any keyword it matches.
|
2016-11-10 05:37:49 +00:00
|
|
|
static inline parse_keyword_t keyword_with_name(const wchar_t *name) {
|
|
|
|
return str_to_enum(name, keyword_enum_map, keyword_enum_map_len);
|
2014-10-15 19:49:02 +00:00
|
|
|
}
|
|
|
|
|
2016-05-02 23:09:46 +00:00
|
|
|
static bool is_keyword_char(wchar_t c) {
|
|
|
|
return (c >= L'a' && c <= L'z') || (c >= L'A' && c <= L'Z') || (c >= L'0' && c <= L'9') ||
|
2018-03-05 00:06:32 +00:00
|
|
|
c == L'\'' || c == L'"' || c == L'\\' || c == '\n' || c == L'!';
|
2016-04-08 10:20:21 +00:00
|
|
|
}
|
|
|
|
|
2020-06-09 22:13:02 +00:00
|
|
|
/// Given a token, returns the keyword it matches, or parse_keyword_t::none.
|
2019-10-13 23:06:16 +00:00
|
|
|
static parse_keyword_t keyword_for_token(token_type_t tok, const wcstring &token) {
|
2014-10-15 19:49:02 +00:00
|
|
|
/* Only strings can be keywords */
|
2019-10-13 23:06:16 +00:00
|
|
|
if (tok != token_type_t::string) {
|
2020-06-09 22:13:02 +00:00
|
|
|
return parse_keyword_t::none;
|
2014-10-15 19:49:02 +00:00
|
|
|
}
|
2016-05-02 23:09:46 +00:00
|
|
|
|
|
|
|
// If tok_txt is clean (which most are), we can compare it directly. Otherwise we have to expand
|
|
|
|
// it. We only expand quotes, and we don't want to do expensive expansions like tilde
|
|
|
|
// expansions. So we do our own "cleanliness" check; if we find a character not in our allowed
|
|
|
|
// set we know it's not a keyword, and if we never find a quote we don't have to expand! Note
|
|
|
|
// that this lowercase set could be shrunk to be just the characters that are in keywords.
|
2020-06-09 22:13:02 +00:00
|
|
|
parse_keyword_t result = parse_keyword_t::none;
|
2014-10-15 19:49:02 +00:00
|
|
|
bool needs_expand = false, all_chars_valid = true;
|
2015-07-26 07:12:36 +00:00
|
|
|
const wchar_t *tok_txt = token.c_str();
|
2016-05-02 23:09:46 +00:00
|
|
|
for (size_t i = 0; tok_txt[i] != L'\0'; i++) {
|
2014-10-15 19:49:02 +00:00
|
|
|
wchar_t c = tok_txt[i];
|
2016-05-02 23:09:46 +00:00
|
|
|
if (!is_keyword_char(c)) {
|
2014-10-15 19:49:02 +00:00
|
|
|
all_chars_valid = false;
|
|
|
|
break;
|
|
|
|
}
|
2016-05-02 23:09:46 +00:00
|
|
|
// If we encounter a quote, we need expansion.
|
2016-04-08 10:20:21 +00:00
|
|
|
needs_expand = needs_expand || c == L'"' || c == L'\'' || c == L'\\';
|
2014-10-15 19:49:02 +00:00
|
|
|
}
|
2016-05-02 23:09:46 +00:00
|
|
|
|
|
|
|
if (all_chars_valid) {
|
|
|
|
// Expand if necessary.
|
|
|
|
if (!needs_expand) {
|
2014-10-15 19:49:02 +00:00
|
|
|
result = keyword_with_name(tok_txt);
|
2016-05-02 23:09:46 +00:00
|
|
|
} else {
|
2014-12-23 18:08:41 +00:00
|
|
|
wcstring storage;
|
2016-05-02 23:09:46 +00:00
|
|
|
if (unescape_string(tok_txt, &storage, 0)) {
|
2014-10-15 19:49:02 +00:00
|
|
|
result = keyword_with_name(storage.c_str());
|
2013-06-09 02:20:26 +00:00
|
|
|
}
|
|
|
|
}
|
2013-06-07 04:49:40 +00:00
|
|
|
}
|
2013-06-09 02:20:26 +00:00
|
|
|
return result;
|
2013-06-07 04:49:40 +00:00
|
|
|
}
|
|
|
|
|
2016-05-02 23:09:46 +00:00
|
|
|
/// Terminal token.
|
2018-05-07 22:22:09 +00:00
|
|
|
static constexpr parse_token_t kTerminalToken = {parse_token_type_terminate};
|
2013-12-26 22:52:15 +00:00
|
|
|
|
2017-04-05 04:28:57 +00:00
|
|
|
static inline bool is_help_argument(const wcstring &txt) {
|
|
|
|
return txt == L"-h" || txt == L"--help";
|
|
|
|
}
|
2014-01-14 10:29:53 +00:00
|
|
|
|
2016-05-02 23:09:46 +00:00
|
|
|
/// Return a new parse token, advancing the tokenizer.
|
2020-06-20 22:27:10 +00:00
|
|
|
parse_token_t next_parse_token(tokenizer_t *tok, maybe_t<tok_t> *out_token, wcstring *storage) {
|
2019-10-13 23:06:16 +00:00
|
|
|
*out_token = tok->next();
|
|
|
|
if (!out_token->has_value()) {
|
2013-12-26 22:52:15 +00:00
|
|
|
return kTerminalToken;
|
2013-10-09 22:57:10 +00:00
|
|
|
}
|
2019-10-13 23:06:16 +00:00
|
|
|
const tok_t &token = **out_token;
|
2014-01-15 09:40:40 +00:00
|
|
|
|
2016-05-02 23:09:46 +00:00
|
|
|
// Set the type, keyword, and whether there's a dash prefix. Note that this is quite sketchy,
|
|
|
|
// because it ignores quotes. This is the historical behavior. For example, `builtin --names`
|
|
|
|
// lists builtins, but `builtin "--names"` attempts to run --names as a command. Amazingly as of
|
|
|
|
// this writing (10/12/13) nobody seems to have noticed this. Squint at it really hard and it
|
|
|
|
// even starts to look like a feature.
|
2019-10-13 23:06:16 +00:00
|
|
|
parse_token_t result{parse_token_type_from_tokenizer_token(token.type)};
|
|
|
|
const wcstring &text = tok->copy_text_of(token, storage);
|
|
|
|
result.keyword = keyword_for_token(token.type, text);
|
2018-02-23 22:30:15 +00:00
|
|
|
result.has_dash_prefix = !text.empty() && text.at(0) == L'-';
|
|
|
|
result.is_help_argument = result.has_dash_prefix && is_help_argument(text);
|
|
|
|
result.is_newline = (result.type == parse_token_type_end && text == L"\n");
|
2019-10-13 23:06:16 +00:00
|
|
|
result.preceding_escaped_nl = token.preceding_escaped_nl;
|
2020-06-20 22:27:10 +00:00
|
|
|
result.may_be_variable_assignment = variable_assignment_equals_pos(text).has_value();
|
|
|
|
result.tok_error = token.error;
|
2016-05-02 23:09:46 +00:00
|
|
|
|
|
|
|
// These assertions are totally bogus. Basically our tokenizer works in size_t but we work in
|
|
|
|
// uint32_t to save some space. If we have a source file larger than 4 GB, we'll probably just
|
|
|
|
// crash.
|
2019-10-13 23:06:16 +00:00
|
|
|
assert(token.offset < SOURCE_OFFSET_INVALID);
|
2019-11-19 01:08:16 +00:00
|
|
|
result.source_start = static_cast<source_offset_t>(token.offset);
|
2016-05-02 23:09:46 +00:00
|
|
|
|
2019-10-13 23:06:16 +00:00
|
|
|
assert(token.length <= SOURCE_OFFSET_INVALID);
|
2019-11-19 01:08:16 +00:00
|
|
|
result.source_length = static_cast<source_offset_t>(token.length);
|
2015-07-26 07:12:36 +00:00
|
|
|
|
2013-10-09 22:57:10 +00:00
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2020-07-03 18:16:51 +00:00
|
|
|
parsed_source_t::parsed_source_t(wcstring s, ast::ast_t &&ast)
|
|
|
|
: src(std::move(s)), ast(make_unique<ast::ast_t>(std::move(ast))) {}
|
|
|
|
|
|
|
|
parsed_source_t::~parsed_source_t() = default;
|
|
|
|
|
2020-06-28 23:53:58 +00:00
|
|
|
parsed_source_ref_t parse_source(wcstring src, parse_tree_flags_t flags,
|
|
|
|
parse_error_list_t *errors) {
|
2020-07-03 18:16:51 +00:00
|
|
|
using namespace ast;
|
|
|
|
ast_t ast = ast_t::parse(src, flags, errors);
|
|
|
|
if (ast.errored() && !(flags & parse_flag_continue_after_error)) {
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
return std::make_shared<parsed_source_t>(std::move(src), std::move(ast));
|
2017-12-22 22:40:15 +00:00
|
|
|
}
|