Merge branch 'cmdsub_inherit_pgroup_3.1.1' into Integration_3.1.1

This merges three fixes around propagating pgroups, and preventing pipe
deadlock. This is equivalent to the merge at
c034c2c99b, but for 3.1.1
This commit is contained in:
ridiculousfish 2020-04-26 15:41:30 -07:00
commit ed8e7b934f
10 changed files with 106 additions and 39 deletions

View file

@ -17,6 +17,9 @@
/// Implementation of eval builtin.
int builtin_eval(parser_t &parser, io_streams_t &streams, wchar_t **argv) {
int argc = builtin_count_args(argv);
if (argc <= 1) {
return STATUS_CMD_OK;
}
wcstring new_cmd;
for (int i = 1; i < argc; ++i) {
@ -24,19 +27,42 @@ int builtin_eval(parser_t &parser, io_streams_t &streams, wchar_t **argv) {
new_cmd += argv[i];
}
// The output and errput of eval must go to our streams, not to the io_chain in our streams,
// because they may contain a pipe which is intended to be consumed by a process which is not
// yet launched (#6806). Make bufferfills to capture it.
// TODO: we are sloppy and do not honor other redirections; eval'd code won't see for example a
// redirection of fd 3.
shared_ptr<io_bufferfill_t> stdout_fill =
io_bufferfill_t::create(fd_set_t{}, parser.libdata().read_limit, STDOUT_FILENO);
shared_ptr<io_bufferfill_t> stderr_fill =
io_bufferfill_t::create(fd_set_t{}, parser.libdata().read_limit, STDERR_FILENO);
if (!stdout_fill || !stderr_fill) {
// This can happen if we are unable to create a pipe.
return STATUS_CMD_ERROR;
}
const auto cached_exec_count = parser.libdata().exec_count;
int status = STATUS_CMD_OK;
if (argc > 1) {
if (parser.eval(new_cmd, *streams.io_chain) != eval_result_t::ok) {
status = STATUS_CMD_ERROR;
} else if (cached_exec_count == parser.libdata().exec_count) {
// Issue #5692, in particular, to catch `eval ""`, `eval "begin; end;"`, etc.
// where we have an argument but nothing is executed.
status = STATUS_CMD_OK;
} else {
status = parser.get_last_status();
}
if (parser.eval(new_cmd, io_chain_t{stdout_fill, stderr_fill}, block_type_t::top,
streams.parent_pgid) != eval_result_t::ok) {
status = STATUS_CMD_ERROR;
} else if (cached_exec_count == parser.libdata().exec_count) {
// Issue #5692, in particular, to catch `eval ""`, `eval "begin; end;"`, etc.
// where we have an argument but nothing is executed.
status = STATUS_CMD_OK;
} else {
status = parser.get_last_status();
}
// Finish the bufferfills - exhaust and close our pipes.
// Note it is important that we hold no other references to the bufferfills here - they need to
// deallocate to close.
std::shared_ptr<io_buffer_t> output = io_bufferfill_t::finish(std::move(stdout_fill));
std::shared_ptr<io_buffer_t> errput = io_bufferfill_t::finish(std::move(stderr_fill));
// Copy the output from the bufferfill back to the streams.
streams.out.append_narrow_buffer(output->buffer());
streams.err.append_narrow_buffer(errput->buffer());
return status;
}

View file

@ -911,6 +911,7 @@ static bool exec_process_in_job(parser_t &parser, process_t *p, const std::share
case process_type_t::builtin: {
io_streams_t builtin_io_streams{stdout_read_limit};
if (j->pgid != INVALID_PID) builtin_io_streams.parent_pgid = j->pgid;
if (!exec_internal_builtin_proc(parser, j, p, pipe_read.get(), process_net_io_chain,
builtin_io_streams)) {
return false;
@ -1121,8 +1122,8 @@ bool exec_job(parser_t &parser, const shared_ptr<job_t> &j, const job_lineage_t
return true;
}
static int exec_subshell_internal(const wcstring &cmd, parser_t &parser, wcstring_list_t *lst,
bool apply_exit_status, bool is_subcmd) {
static int exec_subshell_internal(const wcstring &cmd, parser_t &parser, maybe_t<pid_t> parent_pgid,
wcstring_list_t *lst, bool apply_exit_status, bool is_subcmd) {
ASSERT_IS_MAIN_THREAD();
auto &ld = parser.libdata();
bool prev_subshell = ld.is_subshell;
@ -1144,7 +1145,8 @@ static int exec_subshell_internal(const wcstring &cmd, parser_t &parser, wcstrin
// be null.
std::shared_ptr<io_buffer_t> buffer;
if (auto bufferfill = io_bufferfill_t::create(fd_set_t{}, ld.read_limit)) {
if (parser.eval(cmd, io_chain_t{bufferfill}, block_type_t::subst) == eval_result_t::ok) {
if (parser.eval(cmd, io_chain_t{bufferfill}, block_type_t::subst, parent_pgid) ==
eval_result_t::ok) {
subcommand_statuses = parser.get_last_statuses();
}
buffer = io_bufferfill_t::finish(std::move(bufferfill));
@ -1215,12 +1217,12 @@ static int exec_subshell_internal(const wcstring &cmd, parser_t &parser, wcstrin
}
int exec_subshell(const wcstring &cmd, parser_t &parser, wcstring_list_t &outputs,
bool apply_exit_status, bool is_subcmd) {
bool apply_exit_status, bool is_subcmd, maybe_t<pid_t> parent_pgid) {
ASSERT_IS_MAIN_THREAD();
return exec_subshell_internal(cmd, parser, &outputs, apply_exit_status, is_subcmd);
return exec_subshell_internal(cmd, parser, parent_pgid, &outputs, apply_exit_status, is_subcmd);
}
int exec_subshell(const wcstring &cmd, parser_t &parser, bool apply_exit_status, bool is_subcmd) {
ASSERT_IS_MAIN_THREAD();
return exec_subshell_internal(cmd, parser, nullptr, apply_exit_status, is_subcmd);
return exec_subshell_internal(cmd, parser, none(), nullptr, apply_exit_status, is_subcmd);
}

View file

@ -22,10 +22,12 @@ bool exec_job(parser_t &parser, const std::shared_ptr<job_t> &j, const job_linea
///
/// \param cmd the command to execute
/// \param outputs The list to insert output into.
/// \param parent_pgid if set, the pgid for any spawned jobs
///
/// \return the status of the last job to exit, or -1 if en error was encountered.
int exec_subshell(const wcstring &cmd, parser_t &parser, wcstring_list_t &outputs,
bool apply_exit_status, bool is_subcmd = false);
bool apply_exit_status, bool is_subcmd = false,
maybe_t<pid_t> parent_pgid = none());
int exec_subshell(const wcstring &cmd, parser_t &parser, bool apply_exit_status,
bool is_subcmd = false);

View file

@ -577,8 +577,9 @@ static expand_result_t expand_braces(const wcstring &instr, expand_flags_t flags
}
/// Perform cmdsubst expansion.
static bool expand_cmdsubst(wcstring input, parser_t &parser, completion_list_t *out_list,
parse_error_list_t *errors) {
static bool expand_cmdsubst(wcstring input, const operation_context_t &ctx,
completion_list_t *out_list, parse_error_list_t *errors) {
assert(ctx.parser && "Cannot expand without a parser");
wchar_t *paren_begin = nullptr, *paren_end = nullptr;
wchar_t *tail_begin = nullptr;
size_t i, j;
@ -605,14 +606,14 @@ static bool expand_cmdsubst(wcstring input, parser_t &parser, completion_list_t
wcstring_list_t sub_res;
const wcstring subcmd(paren_begin + 1, paren_end - paren_begin - 1);
if (exec_subshell(subcmd, parser, sub_res, true /* apply_exit_status */,
true /* is_subcmd */) == -1) {
if (exec_subshell(subcmd, *ctx.parser, sub_res, true /* apply_exit_status */,
true /* is_subcmd */, ctx.parent_pgid) == -1) {
append_cmdsub_error(errors, SOURCE_LOCATION_UNKNOWN,
L"Unknown error while evaluating command substitution");
return false;
}
if (parser.get_last_status() == STATUS_READ_TOO_MUCH) {
if (ctx.parser->get_last_status() == STATUS_READ_TOO_MUCH) {
append_cmdsub_error(
errors, in - paren_begin,
_(L"Too much data emitted by command substitution so it was discarded\n"));
@ -652,7 +653,7 @@ static bool expand_cmdsubst(wcstring input, parser_t &parser, completion_list_t
// Recursively call ourselves to expand any remaining command substitutions. The result of this
// recursive call using the tail of the string is inserted into the tail_expand array list
completion_list_t tail_expand;
expand_cmdsubst(tail_begin, parser, &tail_expand, errors); // TODO: offset error locations
expand_cmdsubst(tail_begin, ctx, &tail_expand, errors); // TODO: offset error locations
// Combine the result of the current command substitution with the result of the recursive tail
// expansion.
@ -686,7 +687,7 @@ static bool expand_cmdsubst(wcstring input, parser_t &parser, completion_list_t
}
}
return parser.get_last_status() != STATUS_READ_TOO_MUCH;
return ctx.parser->get_last_status() != STATUS_READ_TOO_MUCH;
}
// Given that input[0] is HOME_DIRECTORY or tilde (ugh), return the user's name. Return the empty
@ -897,7 +898,7 @@ expand_result_t expander_t::stage_cmdsubst(wcstring input, completion_list_t *ou
}
} else {
assert(ctx.parser && "Must have a parser to expand command substitutions");
bool cmdsubst_ok = expand_cmdsubst(std::move(input), *ctx.parser, out, errors);
bool cmdsubst_ok = expand_cmdsubst(std::move(input), ctx, out, errors);
if (!cmdsubst_ok) return expand_result_t::error;
}

View file

@ -174,8 +174,10 @@ void io_buffer_t::complete_background_fillthread() {
fillthread_waiter_ = {};
}
shared_ptr<io_bufferfill_t> io_bufferfill_t::create(const fd_set_t &conflicts,
size_t buffer_limit) {
shared_ptr<io_bufferfill_t> io_bufferfill_t::create(const fd_set_t &conflicts, size_t buffer_limit,
int target) {
assert(target >= 0 && "Invalid target fd");
// Construct our pipes.
auto pipes = make_autoclose_pipes(conflicts);
if (!pipes) {
@ -192,7 +194,7 @@ shared_ptr<io_bufferfill_t> io_bufferfill_t::create(const fd_set_t &conflicts,
// Our fillthread gets the read end of the pipe; out_pipe gets the write end.
auto buffer = std::make_shared<io_buffer_t>(buffer_limit);
buffer->begin_background_fillthread(std::move(pipes->read));
return std::make_shared<io_bufferfill_t>(std::move(pipes->write), buffer);
return std::make_shared<io_bufferfill_t>(target, std::move(pipes->write), buffer);
}
std::shared_ptr<io_buffer_t> io_bufferfill_t::finish(std::shared_ptr<io_bufferfill_t> &&filler) {
@ -346,3 +348,9 @@ shared_ptr<const io_data_t> io_chain_t::io_for_fd(int fd) const {
}
return nullptr;
}
void output_stream_t::append_narrow_buffer(const separated_buffer_t<std::string> &buffer) {
for (const auto &rhs_elem : buffer.elements()) {
buffer_.append(str2wcstring(rhs_elem.contents), rhs_elem.separation);
}
}

View file

@ -252,7 +252,6 @@ class io_buffer_t;
class io_chain_t;
/// Represents filling an io_buffer_t. Very similar to io_pipe_t.
/// Bufferfills always target stdout.
class io_bufferfill_t : public io_data_t {
/// Write end. The other end is connected to an io_buffer_t.
const autoclose_fd_t write_fd_;
@ -265,8 +264,8 @@ class io_bufferfill_t : public io_data_t {
// The ctor is public to support make_shared() in the static create function below.
// Do not invoke this directly.
io_bufferfill_t(autoclose_fd_t write_fd, std::shared_ptr<io_buffer_t> buffer)
: io_data_t(io_mode_t::bufferfill, STDOUT_FILENO, write_fd.fd()),
io_bufferfill_t(int target, autoclose_fd_t write_fd, std::shared_ptr<io_buffer_t> buffer)
: io_data_t(io_mode_t::bufferfill, target, write_fd.fd()),
write_fd_(std::move(write_fd)),
buffer_(std::move(buffer)) {
assert(write_fd_.valid() && "fd is not valid");
@ -279,9 +278,11 @@ class io_bufferfill_t : public io_data_t {
/// Create an io_bufferfill_t which, when written from, fills a buffer with the contents.
/// \returns nullptr on failure, e.g. too many open fds.
///
/// \param target the fd which this will be dup2'd to - typically stdout.
/// \param conflicts A set of fds. The function ensures that any pipe it makes does
/// not conflict with an fd redirection in this list.
static shared_ptr<io_bufferfill_t> create(const fd_set_t &conflicts, size_t buffer_limit = 0);
static shared_ptr<io_bufferfill_t> create(const fd_set_t &conflicts, size_t buffer_limit = 0,
int target = STDOUT_FILENO);
/// Reset the receiver (possibly closing the write end of the pipe), and complete the fillthread
/// of the buffer. \return the buffer.
@ -421,6 +422,9 @@ class output_stream_t {
void append(const wchar_t *s, size_t amt) { buffer_.append(s, s + amt); }
// Append data from a narrow buffer, widening it.
void append_narrow_buffer(const separated_buffer_t<std::string> &buffer);
void push_back(wchar_t c) { append(c); }
void append_format(const wchar_t *format, ...) {
@ -454,6 +458,11 @@ struct io_streams_t {
// Actual IO redirections. This is only used by the source builtin. Unowned.
const io_chain_t *io_chain{nullptr};
// The pgid of the job, if any. This enables builtins which run more code like eval() to share
// pgid.
// TODO: this is awkwardly placed, consider just embedding a lineage here.
maybe_t<pid_t> parent_pgid{};
// io_streams_t cannot be copied.
io_streams_t(const io_streams_t &) = delete;
void operator=(const io_streams_t &) = delete;

View file

@ -29,6 +29,11 @@ class operation_context_t {
// context itself.
const environment_t &vars;
/// The pgid of the parental job.
/// This is used only when expanding command substitutions. If this is set, any jobs created by
/// the command substitions should use this pgid.
maybe_t<pid_t> parent_pgid{};
// A function which may be used to poll for cancellation.
cancel_checker_t cancel_checker;

View file

@ -634,11 +634,11 @@ profile_item_t *parser_t::create_profile_item() {
}
eval_result_t parser_t::eval(const wcstring &cmd, const io_chain_t &io,
enum block_type_t block_type) {
enum block_type_t block_type, maybe_t<pid_t> parent_pgid) {
// Parse the source into a tree, if we can.
parse_error_list_t error_list;
if (parsed_source_ref_t ps = parse_source(cmd, parse_flag_none, &error_list)) {
return this->eval(ps, io, block_type);
return this->eval(ps, io, block_type, parent_pgid);
} else {
// Get a backtrace. This includes the message.
wcstring backtrace_and_desc;
@ -651,11 +651,12 @@ eval_result_t parser_t::eval(const wcstring &cmd, const io_chain_t &io,
}
eval_result_t parser_t::eval(const parsed_source_ref_t &ps, const io_chain_t &io,
enum block_type_t block_type) {
enum block_type_t block_type, maybe_t<pid_t> parent_pgid) {
assert(block_type == block_type_t::top || block_type == block_type_t::subst);
if (!ps->tree.empty()) {
job_lineage_t lineage;
lineage.block_io = io;
lineage.parent_pgid = parent_pgid;
// Execute the first node.
tnode_t<grammar::job_list> start{&ps->tree, &ps->tree.front()};
return this->eval_node(ps, start, std::move(lineage), block_type);
@ -689,6 +690,9 @@ eval_result_t parser_t::eval_node(const parsed_source_ref_t &ps, tnode_t<T> node
operation_context_t op_ctx = this->context();
block_t *scope_block = this->push_block(block_t::scope_block(block_type));
// Propogate any parent pgid.
op_ctx.parent_pgid = lineage.parent_pgid;
// Create and set a new execution context.
using exc_ctx_ref_t = std::unique_ptr<parse_execution_context_t>;
scoped_push<exc_ctx_ref_t> exc(&execution_context, make_unique<parse_execution_context_t>(

View file

@ -266,15 +266,18 @@ class parser_t : public std::enable_shared_from_this<parser_t> {
/// \param io io redirections to perform on all started jobs
/// \param block_type The type of block to push on the block stack, which must be either 'top'
/// or 'subst'.
/// \param parent_pgid if set, the pgid to give to spawned jobs
///
/// \return the eval result,
eval_result_t eval(const wcstring &cmd, const io_chain_t &io,
block_type_t block_type = block_type_t::top);
block_type_t block_type = block_type_t::top,
maybe_t<pid_t> parent_pgid = {});
/// Evaluate the parsed source ps.
/// Because the source has been parsed, a syntax error is impossible.
eval_result_t eval(const parsed_source_ref_t &ps, const io_chain_t &io,
block_type_t block_type = block_type_t::top);
block_type_t block_type = block_type_t::top,
maybe_t<pid_t> parent_pgid = {});
/// Evaluates a node.
/// The node type must be grammar::statement or grammar::job_list.

View file

@ -9,11 +9,13 @@ end
# Here everything should live in the pgroup of the first fish_test_helper.
$fth print_pgrp | read -g global_group | save_pgroup g1 | begin
save_pgroup g2
end | begin
echo (save_pgroup g3) >/dev/null
end
[ "$global_group" -eq "$g1" ] && [ "$g1" -eq "$g2" ]
[ "$global_group" -eq "$g1" ] && [ "$g1" -eq "$g2" ] && [ "$g2" -eq "$g3" ]
and echo "All pgroups agreed"
or echo "Pgroups disagreed. Should be in $global_group but found $g1 and $g2"
or echo "Pgroups disagreed. Should be in $global_group but found $g1, $g2, $g3"
# CHECK: All pgroups agreed
# Here everything should live in fish's pgroup.
@ -35,3 +37,8 @@ end
and echo "All pgroups agreed"
or echo "Pgroups disagreed. Found $a0 $a1 $a2, and $b0 $b1 $b2"
# CHECK: All pgroups agreed
# Ensure that eval retains pgroups - #6806.
# Our regex will capture the first pgroup and use a positive lookahead on the second.
$fth print_pgrp | tr \n ' ' 1>&2 | eval '$fth print_pgrp' 1>&2
# CHECKERR: {{(\d+) (?=\1)\d+}}