Tighten up pipeline-aborting errors

Prior to this change, the functions in exec.cpp would return true or false
and it was not clear what significance that value had.

Switch to an enum to make this more explicit. In particular we have the
idea of a "pipeline breaking" error which should us to skip processes
which have not yet launched; if no process launches then we can bail out
to a different path which avoids reaping processes.
This commit is contained in:
ridiculousfish 2020-12-13 15:39:20 -08:00
parent 364c6001dc
commit 2caeec24f7
3 changed files with 136 additions and 113 deletions

View file

@ -54,6 +54,14 @@
/// Number of calls to fork() or posix_spawn().
static relaxed_atomic_t<int> s_fork_count{0};
/// A launch_result_t indicates when a process failed to launch, and therefore the rest of the
/// pipeline should be aborted. This includes failed redirections, fd exhaustion, fork() failures,
/// etc.
enum class launch_result_t {
ok,
failed,
} __warn_unused_type;
/// This function is executed by the child process created by a call to fork(). It should be called
/// after \c child_setup_process. It calls execve to replace the fish process image with the command
/// specified in \c p. It never returns. Called in a forked child! Do not allocate memory, etc.
@ -318,10 +326,10 @@ bool blocked_signals_for_job(const job_t &job, sigset_t *sigmask) {
}
/// Call fork() as part of executing a process \p p in a job \j. Execute \p child_action in the
/// context of the child. Returns true if fork succeeded, false if fork failed.
static bool fork_child_for_process(const std::shared_ptr<job_t> &job, process_t *p,
const dup2_list_t &dup2s, const char *fork_type,
const std::function<void()> &child_action) {
/// context of the child.
static launch_result_t fork_child_for_process(const std::shared_ptr<job_t> &job, process_t *p,
const dup2_list_t &dup2s, const char *fork_type,
const std::function<void()> &child_action) {
assert(!job->group->is_internal() && "Internal groups should never need to fork");
// Decide if we want to job to control the tty.
// If so we need to get our pgroup; if not we don't need the pgroup.
@ -345,9 +353,7 @@ static bool fork_child_for_process(const std::shared_ptr<job_t> &job, process_t
}
if (pid < 0) {
FLOGF(warning, L"Failed to fork %s!\n", fork_type);
job_mark_process_as_failed(job, p);
return false;
return launch_result_t::failed;
}
// This is the parent process. Store away information on the child, and
@ -365,14 +371,17 @@ static bool fork_child_for_process(const std::shared_ptr<job_t> &job, process_t
if (err != EACCES) report_setpgid_error(err, true /* is_parent */, pgid, job.get(), p);
}
terminal_maybe_give_to_job_group(job->group.get(), false);
return true;
return launch_result_t::ok;
}
/// Execute an internal builtin. Given a parser and a builtin process, execute the builtin with the
/// given streams. If pipe_read is set, assign stdin to it; otherwise infer stdin from the IO chain.
/// \return true on success, false if there is an exec error.
static bool exec_internal_builtin_proc(parser_t &parser, process_t *p, const io_pipe_t *pipe_read,
const io_chain_t &proc_io_chain, io_streams_t &streams) {
/// An error return here indicates that the process failed to launch, and the rest of
/// the pipeline should be cancelled.
static launch_result_t exec_internal_builtin_proc(parser_t &parser, process_t *p,
const io_pipe_t *pipe_read,
const io_chain_t &proc_io_chain,
io_streams_t &streams) {
assert(p->type == process_type_t::builtin && "Process must be a builtin");
int local_builtin_stdin = STDIN_FILENO;
autoclose_fd_t locally_opened_stdin{};
@ -394,7 +403,7 @@ static bool exec_internal_builtin_proc(parser_t &parser, process_t *p, const io_
}
}
if (local_builtin_stdin == -1) return false;
if (local_builtin_stdin == -1) return launch_result_t::failed;
// Determine if we have a "direct" redirection for stdin.
bool stdin_is_directly_redirected = false;
@ -427,7 +436,7 @@ static bool exec_internal_builtin_proc(parser_t &parser, process_t *p, const io_
// Note this call may block for a long time, while the builtin performs I/O.
p->status = builtin_run(parser, p->get_argv(), streams);
return true; // "success"
return launch_result_t::ok;
}
/// \return an newly allocated output stream for the given fd, which is typically stdout or stderr.
@ -460,7 +469,7 @@ static std::unique_ptr<output_stream_t> create_output_stream_for_builtin(const p
/// Handle output from a builtin, by printing the contents of builtin_io_streams to the redirections
/// given in io_chain.
static bool handle_builtin_output(parser_t &parser, const std::shared_ptr<job_t> &j, process_t *p,
static void handle_builtin_output(parser_t &parser, const std::shared_ptr<job_t> &j, process_t *p,
io_chain_t *io_chain, const io_streams_t &builtin_io_streams) {
assert(p->type == process_type_t::builtin && "Process is not a builtin");
@ -538,13 +547,13 @@ static bool handle_builtin_output(parser_t &parser, const std::shared_ptr<job_t>
// Construct and run our background process.
run_internal_process_or_short_circuit(parser, j, p, std::move(outbuff), std::move(errbuff),
*io_chain);
return true;
}
/// Executes an external command.
/// \return true on success, false if there is an exec error.
static bool exec_external_command(parser_t &parser, const std::shared_ptr<job_t> &j, process_t *p,
const io_chain_t &proc_io_chain) {
/// An error return here indicates that the process failed to launch, and the rest of
/// the pipeline should be cancelled.
static launch_result_t exec_external_command(parser_t &parser, const std::shared_ptr<job_t> &j,
process_t *p, const io_chain_t &proc_io_chain) {
assert(p->type == process_type_t::external && "Process is not external");
// Get argv and envv before we fork.
null_terminated_array_t<char> argv_array;
@ -579,8 +588,7 @@ static bool exec_external_command(parser_t &parser, const std::shared_ptr<job_t>
const_cast<char *const *>(envv));
if (int err = spawner.get_error()) {
safe_report_exec_error(err, actual_cmd, argv, envv);
job_mark_process_as_failed(j, p);
return false;
return launch_result_t::failed;
}
assert(pid.has_value() && *pid > 0 && "Should have either a valid pid, or an error");
@ -601,16 +609,13 @@ static bool exec_external_command(parser_t &parser, const std::shared_ptr<job_t>
// Ensure it gets set. See #4715, also https://github.com/Microsoft/WSL/issues/2997.
execute_setpgid(p->pid, pgid, true /* is parent */);
terminal_maybe_give_to_job_group(j->group.get(), false);
return launch_result_t::ok;
} else
#endif
{
if (!fork_child_for_process(j, p, dup2s, "external command",
[&] { safe_launch_process(p, actual_cmd, argv, envv); })) {
return false;
}
return fork_child_for_process(j, p, dup2s, "external command",
[&] { safe_launch_process(p, actual_cmd, argv, envv); });
}
return true;
}
// Given that we are about to execute a function, push a function block and set up the
@ -710,18 +715,16 @@ static proc_performer_t get_performer_for_process(process_t *p, job_t *job,
/// Execute a block node or function "process".
/// \p conflicts contains the list of fds which pipes should avoid.
/// \p allow_buffering if true, permit buffering the output.
/// \return true on success, false on error.
static bool exec_block_or_func_process(parser_t &parser, const std::shared_ptr<job_t> &j,
process_t *p, const fd_set_t &conflicts, io_chain_t io_chain,
bool allow_buffering) {
static launch_result_t exec_block_or_func_process(parser_t &parser, const std::shared_ptr<job_t> &j,
process_t *p, const fd_set_t &conflicts,
io_chain_t io_chain, bool allow_buffering) {
// Create an output buffer if we're piping to another process.
shared_ptr<io_bufferfill_t> block_output_bufferfill{};
if (!p->is_last_in_job && allow_buffering) {
// Be careful to handle failure, e.g. too many open fds.
block_output_bufferfill = io_bufferfill_t::create(conflicts);
if (!block_output_bufferfill) {
job_mark_process_as_failed(j, p);
return false;
return launch_result_t::failed;
}
// Teach the job about its bufferfill, and add it to our io chain.
io_chain.push_back(block_output_bufferfill);
@ -733,7 +736,7 @@ static bool exec_block_or_func_process(parser_t &parser, const std::shared_ptr<j
if (proc_performer_t performer = get_performer_for_process(p, j.get(), io_chain)) {
p->status = performer(parser);
} else {
return false;
return launch_result_t::failed;
}
// If we have a block output buffer, populate it now.
@ -748,18 +751,22 @@ static bool exec_block_or_func_process(parser_t &parser, const std::shared_ptr<j
run_internal_process_or_short_circuit(parser, j, p, std::move(buffer_contents),
{} /* errdata */, io_chain);
return true;
return launch_result_t::ok;
}
/// Executes a process \p \p in \p job, using the pipes \p pipes (which may have invalid fds if this
/// is the first or last process).
/// \p deferred_pipes represents the pipes from our deferred process; if set ensure they get closed
/// in any child. If \p is_deferred_run is true, then this is a deferred run; this affects how
/// certain buffering works. \returns true on success, false on exec error.
static bool exec_process_in_job(parser_t &parser, process_t *p, const std::shared_ptr<job_t> &j,
const io_chain_t &block_io, autoclose_pipes_t pipes,
const fd_set_t &conflicts, const autoclose_pipes_t &deferred_pipes,
bool is_deferred_run = false) {
/// certain buffering works.
/// An error return here indicates that the process failed to launch, and the rest of
/// the pipeline should be cancelled.
static launch_result_t exec_process_in_job(parser_t &parser, process_t *p,
const std::shared_ptr<job_t> &j,
const io_chain_t &block_io, autoclose_pipes_t pipes,
const fd_set_t &conflicts,
const autoclose_pipes_t &deferred_pipes,
bool is_deferred_run = false) {
// The write pipe (destined for stdout) needs to occur before redirections. For example,
// with a redirection like this:
//
@ -803,21 +810,10 @@ static bool exec_process_in_job(parser_t &parser, process_t *p, const std::share
}
// Append IOs from the process's redirection specs.
// This may fail.
// This may fail, e.g. a failed redirection.
if (!process_net_io_chain.append_from_specs(p->redirection_specs(),
parser.vars().get_pwd_slash())) {
// If the redirection to a file failed, append_from_specs closes the corresponding handle
// allowing us to recover from failure mid-way through execution of a piped job to e.g.
// recover from loss of terminal control, etc.
// It is unsafe to abort execution in the middle of a multi-command job as we might end up
// trying to resume execution without control of the terminal.
if (p->is_first_in_job) {
// It's OK to abort here
return false;
}
// Otherwise, just rely on the closed fd to take care of things. It's also probably more
// semantically correct!
return launch_result_t::failed;
}
// Read pipe goes last.
@ -859,9 +855,9 @@ static bool exec_process_in_job(parser_t &parser, process_t *p, const std::share
// Allow buffering unless this is a deferred run. If deferred, then processes after us
// were already launched, so they are ready to receive (or reject) our output.
bool allow_buffering = !is_deferred_run;
if (!exec_block_or_func_process(parser, j, p, conflicts, process_net_io_chain,
allow_buffering)) {
return false;
if (exec_block_or_func_process(parser, j, p, conflicts, process_net_io_chain,
allow_buffering) == launch_result_t::failed) {
return launch_result_t::failed;
}
break;
}
@ -873,19 +869,19 @@ static bool exec_process_in_job(parser_t &parser, process_t *p, const std::share
create_output_stream_for_builtin(parser, process_net_io_chain, STDERR_FILENO);
io_streams_t builtin_io_streams{*output_stream, *errput_stream};
builtin_io_streams.job_group = j->group;
if (!exec_internal_builtin_proc(parser, p, pipe_read.get(), process_net_io_chain,
builtin_io_streams)) {
return false;
}
if (!handle_builtin_output(parser, j, p, &process_net_io_chain, builtin_io_streams)) {
return false;
if (exec_internal_builtin_proc(parser, p, pipe_read.get(), process_net_io_chain,
builtin_io_streams) == launch_result_t::failed) {
return launch_result_t::failed;
}
handle_builtin_output(parser, j, p, &process_net_io_chain, builtin_io_streams);
break;
}
case process_type_t::external: {
if (!exec_external_command(parser, j, p, process_net_io_chain)) {
return false;
if (exec_external_command(parser, j, p, process_net_io_chain) ==
launch_result_t::failed) {
return launch_result_t::failed;
}
break;
}
@ -896,7 +892,7 @@ static bool exec_process_in_job(parser_t &parser, process_t *p, const std::share
"Aborting.");
}
}
return true;
return launch_result_t::ok;
}
// Do we have a fish internal process that pipes into a real process? If so, we are going to
@ -907,22 +903,33 @@ static bool exec_process_in_job(parser_t &parser, process_t *p, const std::share
// This should show the output as it comes, not buffer until the end.
// Any such process (only one per job) will be called the "deferred" process.
static process_t *get_deferred_process(const shared_ptr<job_t> &j) {
// By enumerating in reverse order, we can avoid walking the entire list
// Common case is no deferred proc.
if (j->processes.size() <= 1) return nullptr;
// Skip execs, which can only appear at the front.
if (j->processes.front()->type == process_type_t::exec) return nullptr;
// Find the last non-external process, and return it if it pipes into an extenal process.
for (auto i = j->processes.rbegin(); i != j->processes.rend(); ++i) {
const auto &p = *i;
if (p->type == process_type_t::exec) {
// No tail reordering for execs.
return nullptr;
} else if (p->type != process_type_t::external) {
if (p->is_last_in_job) {
return nullptr;
}
return p.get();
process_t *p = i->get();
if (p->type != process_type_t::external) {
return p->is_last_in_job ? nullptr : p;
}
}
return nullptr;
}
/// Given that we failed to execute process \p failed_proc in job \p job, mark that process and
/// every subsequent process in the pipeline as aborted before launch.
static void abort_pipeline_from(const shared_ptr<job_t> &job, const process_t *failed_proc) {
bool found = false;
for (process_ptr_t &p : job->processes) {
found = found || (p.get() == failed_proc);
if (found) p->mark_aborted_before_launch();
}
assert(found && "Process not present in job");
}
// Given that we are about to execute an exec() call, check if the parser is interactive and there
// are extant background jobs. If so, warn the user and do not exec().
// \return true if we should allow exec, false to disallow it.
@ -950,10 +957,6 @@ static bool allow_exec_with_background_jobs(parser_t &parser) {
bool exec_job(parser_t &parser, const shared_ptr<job_t> &j, const io_chain_t &block_io) {
assert(j && "null job_t passed to exec_job!");
// Set to true if something goes wrong while executing the job, in which case the cleanup
// code will kick in.
bool exec_error = false;
// If fish was invoked with -n or --no-execute, then no_exec will be set and we do nothing.
if (no_exec()) {
return true;
@ -971,6 +974,9 @@ bool exec_job(parser_t &parser, const shared_ptr<job_t> &j, const io_chain_t &bl
if (j->processes.front()->type == process_type_t::exec) {
// If we are interactive, perhaps disallow exec if there are background jobs.
if (!allow_exec_with_background_jobs(parser)) {
for (const auto &p : j->processes) {
p->mark_aborted_before_launch();
}
return false;
}
@ -981,6 +987,9 @@ bool exec_job(parser_t &parser, const shared_ptr<job_t> &j, const io_chain_t &bl
parser.set_last_statuses(statuses_t::just(status));
// A false return tells the caller to remove the job from the list.
for (const auto &p : j->processes) {
p->mark_aborted_before_launch();
}
return false;
}
cleanup_t timer = push_timer(j->wants_timing() && !no_exec());
@ -1000,8 +1009,14 @@ bool exec_job(parser_t &parser, const shared_ptr<job_t> &j, const io_chain_t &bl
// 2. The pipe that the current process should write to
// 3. The pipe that the next process should read from (courtesy of us)
//
// Lastly, a process may experience a pipeline-aborting error, which prevents launching
// further processes in the pipeline.
autoclose_fd_t pipe_next_read;
for (const auto &p : j->processes) {
bool aborted_pipeline = false;
size_t procs_launched = 0;
for (const auto &procptr : j->processes) {
process_t *p = procptr.get();
// proc_pipes is the pipes applied to this process. That is, it is the read end
// containing the output of the previous process (if any), plus the write end that will
// output to the next process (if any).
@ -1012,32 +1027,50 @@ bool exec_job(parser_t &parser, const shared_ptr<job_t> &j, const io_chain_t &bl
if (!pipes) {
FLOGF(warning, PIPE_ERROR);
wperror(L"pipe");
job_mark_process_as_failed(j, p.get());
exec_error = true;
aborted_pipeline = true;
abort_pipeline_from(j, p);
break;
}
pipe_next_read = std::move(pipes->read);
proc_pipes.write = std::move(pipes->write);
}
if (p.get() == deferred_process) {
// Save any deferred process for last.
if (p == deferred_process) {
deferred_pipes = std::move(proc_pipes);
} else {
if (!exec_process_in_job(parser, p.get(), j, block_io, std::move(proc_pipes), conflicts,
deferred_pipes)) {
exec_error = true;
break;
}
continue;
}
// Regular process.
if (exec_process_in_job(parser, p, j, block_io, std::move(proc_pipes), conflicts,
deferred_pipes) == launch_result_t::failed) {
aborted_pipeline = true;
abort_pipeline_from(j, p);
break;
}
procs_launched += 1;
}
pipe_next_read.close();
// Now execute any deferred process.
if (!exec_error && deferred_process) {
assert(deferred_pipes.write.valid() && "Deferred process should always have a write pipe");
if (!exec_process_in_job(parser, deferred_process, j, block_io, std::move(deferred_pipes),
conflicts, {}, true)) {
exec_error = true;
// If our pipeline was aborted before any process was successfully launched, then there is
// nothing to reap, and we can perform an early return.
// Note we must never return false if we have launched even one process, since it will not be
// properly reaped; see #7038.
if (aborted_pipeline && procs_launched == 0) {
return false;
}
// Ok, at least one thing got launched.
// Handle any deferred process.
if (deferred_process) {
if (aborted_pipeline) {
// Some other process already aborted our pipeline.
deferred_process->mark_aborted_before_launch();
} else if (exec_process_in_job(parser, deferred_process, j, block_io,
std::move(deferred_pipes), conflicts, {},
true) == launch_result_t::failed) {
// The deferred proc itself failed to launch.
deferred_process->mark_aborted_before_launch();
}
}
@ -1053,10 +1086,6 @@ bool exec_job(parser_t &parser, const shared_ptr<job_t> &j, const io_chain_t &bl
parser.vars().set_one(L"last_pid", ENV_GLOBAL, to_string(*pgid));
}
if (exec_error) {
return false;
}
j->continue_job(parser, !j->is_initially_background());
return true;
}

View file

@ -232,18 +232,6 @@ void print_exit_warning_for_jobs(const job_list_t &jobs) {
reader_schedule_prompt_repaint();
}
void job_mark_process_as_failed(const std::shared_ptr<job_t> &job, const process_t *failed_proc) {
// The given process failed to even lift off (e.g. posix_spawn failed) and so doesn't have a
// valid pid. Mark it and everything after it as dead.
bool found = false;
for (process_ptr_t &p : job->processes) {
found = found || (p.get() == failed_proc);
if (found) {
p->completed = true;
}
}
}
/// Set the status of \p proc to \p status.
static void handle_child_status(const shared_ptr<job_t> &job, process_t *proc,
proc_status_t status) {
@ -282,6 +270,11 @@ void process_t::check_generations_before_launch() {
gens_ = topic_monitor_t::principal().current_generations();
}
void process_t::mark_aborted_before_launch() {
completed = true;
status = proc_status_t::from_exit_code(EXIT_FAILURE);
}
bool process_t::is_internal() const {
switch (type) {
case process_type_t::builtin:

View file

@ -259,6 +259,10 @@ class process_t {
/// launch. This helps us avoid spurious waitpid calls.
void check_generations_before_launch();
/// Mark that this process was part of a pipeline which was aborted.
/// The process was never successfully launched; give it a status of EXIT_FAILURE.
void mark_aborted_before_launch();
/// \return whether this process type is internal (block, function, or builtin).
bool is_internal() const;
@ -517,9 +521,6 @@ job_list_t jobs_requiring_warning_on_exit(const parser_t &parser);
/// jobs_requiring_warning_on_exit().
void print_exit_warning_for_jobs(const job_list_t &jobs);
/// Mark a process as failed to execute (and therefore completed).
void job_mark_process_as_failed(const std::shared_ptr<job_t> &job, const process_t *failed_proc);
/// Use the procfs filesystem to look up how many jiffies of cpu time was used by this process. This
/// function is only available on systems with the procfs file entry 'stat', i.e. Linux.
unsigned long proc_get_jiffies(process_t *p);