2016-05-03 04:41:17 +00:00
|
|
|
// Utilities for keeping track of jobs, processes and subshells, as well as signal handling
|
|
|
|
// functions for tracking children. These functions do not themselves launch new processes, the exec
|
|
|
|
// library will call proc to create representations of the running jobs as needed.
|
|
|
|
//
|
|
|
|
// Some of the code in this file is based on code from the Glibc manual.
|
2016-04-21 06:00:54 +00:00
|
|
|
// IWYU pragma: no_include <__bit_reference>
|
2005-09-20 13:26:39 +00:00
|
|
|
#include "config.h"
|
|
|
|
|
2016-05-03 04:41:17 +00:00
|
|
|
#include <errno.h>
|
2020-01-28 18:43:37 +00:00
|
|
|
#include <fcntl.h>
|
2016-05-03 04:41:17 +00:00
|
|
|
#include <signal.h>
|
2005-09-20 13:26:39 +00:00
|
|
|
#include <stdio.h>
|
2016-05-03 04:41:17 +00:00
|
|
|
#include <unistd.h>
|
2015-07-25 15:14:25 +00:00
|
|
|
#include <wctype.h>
|
2019-10-13 22:50:48 +00:00
|
|
|
|
2019-05-05 10:09:25 +00:00
|
|
|
#include <atomic>
|
|
|
|
#include <cwchar>
|
2017-02-14 04:37:27 +00:00
|
|
|
|
2006-01-19 12:22:07 +00:00
|
|
|
#if HAVE_TERM_H
|
2018-02-04 08:59:37 +00:00
|
|
|
#include <curses.h>
|
2005-09-20 13:26:39 +00:00
|
|
|
#include <term.h>
|
2006-01-19 12:22:07 +00:00
|
|
|
#elif HAVE_NCURSES_TERM_H
|
|
|
|
#include <ncurses/term.h>
|
|
|
|
#endif
|
2018-02-04 08:59:37 +00:00
|
|
|
#include <termios.h>
|
2006-07-30 20:26:59 +00:00
|
|
|
#ifdef HAVE_SIGINFO_H
|
|
|
|
#include <siginfo.h>
|
|
|
|
#endif
|
2006-08-09 22:26:05 +00:00
|
|
|
#ifdef HAVE_SYS_SELECT_H
|
|
|
|
#include <sys/select.h>
|
|
|
|
#endif
|
2016-04-21 06:00:54 +00:00
|
|
|
#include <sys/time.h> // IWYU pragma: keep
|
2016-05-03 04:41:17 +00:00
|
|
|
#include <sys/types.h>
|
2021-05-11 19:01:08 +00:00
|
|
|
#include <sys/wait.h>
|
2017-02-14 04:37:27 +00:00
|
|
|
|
2016-04-21 06:00:54 +00:00
|
|
|
#include <algorithm> // IWYU pragma: keep
|
2017-02-14 04:37:27 +00:00
|
|
|
#include <memory>
|
2018-02-19 02:39:03 +00:00
|
|
|
#include <utility>
|
2017-02-14 04:37:27 +00:00
|
|
|
#include <vector>
|
2006-08-09 22:26:05 +00:00
|
|
|
|
2005-09-20 13:26:39 +00:00
|
|
|
#include "common.h"
|
2005-10-11 19:23:43 +00:00
|
|
|
#include "event.h"
|
2016-05-03 04:41:17 +00:00
|
|
|
#include "fallback.h" // IWYU pragma: keep
|
2019-05-18 20:47:27 +00:00
|
|
|
#include "flog.h"
|
2019-04-30 06:08:01 +00:00
|
|
|
#include "global_safety.h"
|
2016-04-21 06:00:54 +00:00
|
|
|
#include "io.h"
|
2020-07-19 23:41:58 +00:00
|
|
|
#include "job_group.h"
|
2016-05-03 04:41:17 +00:00
|
|
|
#include "output.h"
|
2016-04-21 06:00:54 +00:00
|
|
|
#include "parse_tree.h"
|
2016-05-03 04:41:17 +00:00
|
|
|
#include "parser.h"
|
|
|
|
#include "proc.h"
|
|
|
|
#include "reader.h"
|
|
|
|
#include "sanity.h"
|
|
|
|
#include "signal.h"
|
2020-07-29 23:37:23 +00:00
|
|
|
#include "wcstringutil.h"
|
2016-05-03 04:41:17 +00:00
|
|
|
#include "wutil.h" // IWYU pragma: keep
|
2005-09-20 13:26:39 +00:00
|
|
|
|
2018-09-18 13:56:36 +00:00
|
|
|
/// The signals that signify crashes to us.
|
2018-09-29 04:58:44 +00:00
|
|
|
static const int crashsignals[] = {SIGABRT, SIGBUS, SIGFPE, SIGILL, SIGSEGV, SIGSYS};
|
2018-09-18 13:56:36 +00:00
|
|
|
|
2020-12-06 21:40:45 +00:00
|
|
|
static relaxed_atomic_bool_t s_is_interactive_session{false};
|
|
|
|
bool is_interactive_session() { return s_is_interactive_session; }
|
|
|
|
void set_interactive_session(bool flag) { s_is_interactive_session = flag; }
|
2019-05-12 22:48:00 +00:00
|
|
|
|
|
|
|
static relaxed_atomic_bool_t s_is_login{false};
|
|
|
|
bool get_login() { return s_is_login; }
|
|
|
|
void mark_login() { s_is_login = true; }
|
|
|
|
|
|
|
|
static relaxed_atomic_bool_t s_no_exec{false};
|
2019-05-12 22:04:18 +00:00
|
|
|
bool no_exec() { return s_no_exec; }
|
2019-05-12 22:48:00 +00:00
|
|
|
void mark_no_exec() { s_no_exec = true; }
|
2019-05-12 21:59:30 +00:00
|
|
|
|
|
|
|
bool have_proc_stat() {
|
|
|
|
// Check for /proc/self/stat to see if we are running with Linux-style procfs.
|
|
|
|
static const bool s_result = (access("/proc/self/stat", R_OK) == 0);
|
|
|
|
return s_result;
|
|
|
|
}
|
2006-03-18 01:04:59 +00:00
|
|
|
|
2019-04-30 06:08:01 +00:00
|
|
|
static relaxed_atomic_t<job_control_t> job_control_mode{job_control_t::interactive};
|
|
|
|
|
|
|
|
job_control_t get_job_control_mode() { return job_control_mode; }
|
|
|
|
|
2020-01-30 23:16:03 +00:00
|
|
|
void set_job_control_mode(job_control_t mode) {
|
|
|
|
job_control_mode = mode;
|
|
|
|
|
|
|
|
// HACK: when fish (or any shell) launches a job with job control, it will put the job into its
|
|
|
|
// own pgroup and call tcsetpgrp() to allow that pgroup to own the terminal (making fish a
|
|
|
|
// background process). When the job finishes, fish will try to reclaim the terminal via
|
|
|
|
// tcsetpgrp(), but as fish is now a background process it will receive SIGTTOU and stop! Ensure
|
|
|
|
// that doesn't happen by ignoring SIGTTOU.
|
|
|
|
// Note that if we become interactive, we also ignore SIGTTOU.
|
|
|
|
if (mode == job_control_t::all) {
|
|
|
|
signal(SIGTTOU, SIG_IGN);
|
|
|
|
}
|
|
|
|
}
|
2019-04-30 06:08:01 +00:00
|
|
|
|
2019-05-27 21:52:48 +00:00
|
|
|
void proc_init() { signal_set_handlers_once(false); }
|
2005-10-14 11:40:33 +00:00
|
|
|
|
2021-11-03 19:23:25 +00:00
|
|
|
/// Return true if all processes in the job are stopped or completed, and there is at least one
|
|
|
|
/// stopped process.
|
2018-10-02 17:30:23 +00:00
|
|
|
bool job_t::is_stopped() const {
|
2021-11-03 19:23:25 +00:00
|
|
|
bool has_stopped = false;
|
2018-10-02 17:30:23 +00:00
|
|
|
for (const process_ptr_t &p : processes) {
|
2016-05-03 04:41:17 +00:00
|
|
|
if (!p->completed && !p->stopped) {
|
2018-10-02 17:30:23 +00:00
|
|
|
return false;
|
2012-11-19 00:30:30 +00:00
|
|
|
}
|
2021-11-03 19:23:25 +00:00
|
|
|
has_stopped |= p->stopped;
|
2012-11-19 00:30:30 +00:00
|
|
|
}
|
2021-11-03 19:23:25 +00:00
|
|
|
return has_stopped;
|
2005-09-20 13:26:39 +00:00
|
|
|
}
|
|
|
|
|
2021-11-03 19:23:25 +00:00
|
|
|
/// Return true if all processes in the job have completed.
|
2018-10-02 17:30:23 +00:00
|
|
|
bool job_t::is_completed() const {
|
|
|
|
assert(!processes.empty());
|
|
|
|
for (const process_ptr_t &p : processes) {
|
2016-05-03 04:41:17 +00:00
|
|
|
if (!p->completed) {
|
2018-10-02 17:30:23 +00:00
|
|
|
return false;
|
2013-06-16 09:53:14 +00:00
|
|
|
}
|
|
|
|
}
|
2018-10-02 17:30:23 +00:00
|
|
|
return true;
|
2005-09-20 13:26:39 +00:00
|
|
|
}
|
|
|
|
|
2021-11-03 22:03:37 +00:00
|
|
|
bool job_t::posts_job_exit_events() const {
|
2019-07-28 21:05:51 +00:00
|
|
|
// If we never got a pgid then we never launched the external process, so don't report it.
|
2021-11-03 22:03:37 +00:00
|
|
|
if (!this->get_pgid()) return false;
|
2019-07-28 21:05:51 +00:00
|
|
|
|
2020-02-08 23:12:58 +00:00
|
|
|
// Only report root job exits.
|
|
|
|
// For example in `ls | begin ; cat ; end` we don't need to report the cat sub-job.
|
2021-11-03 22:03:37 +00:00
|
|
|
if (!flags().is_group_root) return false;
|
2020-02-08 23:12:58 +00:00
|
|
|
|
2021-11-03 22:03:37 +00:00
|
|
|
// Only jobs with external processes post job_exit events.
|
2021-04-06 04:16:18 +00:00
|
|
|
return this->has_external_proc();
|
2019-07-28 21:05:51 +00:00
|
|
|
}
|
|
|
|
|
2020-05-30 21:05:07 +00:00
|
|
|
bool job_t::job_chain_is_fully_constructed() const { return group->is_root_constructed(); }
|
2018-11-04 09:11:12 +00:00
|
|
|
|
2018-10-02 20:10:42 +00:00
|
|
|
bool job_t::signal(int signal) {
|
|
|
|
// Presumably we are distinguishing between the two cases below because we do
|
|
|
|
// not want to send ourselves the signal in question in case the job shares
|
|
|
|
// a pgid with the shell.
|
2020-05-29 21:51:48 +00:00
|
|
|
auto pgid = get_pgid();
|
|
|
|
if (pgid.has_value() && *pgid != getpgrp()) {
|
|
|
|
if (killpg(*pgid, signal) == -1) {
|
2018-10-20 18:58:51 +00:00
|
|
|
char buffer[512];
|
2020-05-29 21:51:48 +00:00
|
|
|
sprintf(buffer, "killpg(%d, %s)", *pgid, strsignal(signal));
|
2018-10-20 18:58:51 +00:00
|
|
|
wperror(str2wcstring(buffer).c_str());
|
2018-10-02 20:10:42 +00:00
|
|
|
return false;
|
|
|
|
}
|
2016-05-03 04:41:17 +00:00
|
|
|
} else {
|
2018-10-02 20:10:42 +00:00
|
|
|
for (const auto &p : processes) {
|
|
|
|
if (!p->completed && p->pid && kill(p->pid, signal) == -1) {
|
|
|
|
return false;
|
2012-11-19 00:30:30 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-10-02 20:10:42 +00:00
|
|
|
return true;
|
2006-11-20 13:12:24 +00:00
|
|
|
}
|
|
|
|
|
2020-07-18 17:25:43 +00:00
|
|
|
maybe_t<statuses_t> job_t::get_statuses() const {
|
2019-02-25 09:21:32 +00:00
|
|
|
statuses_t st{};
|
2020-07-18 17:25:43 +00:00
|
|
|
bool has_status = false;
|
|
|
|
int laststatus = 0;
|
2019-02-25 09:21:32 +00:00
|
|
|
st.pipestatus.reserve(processes.size());
|
|
|
|
for (const auto &p : processes) {
|
2020-03-31 18:27:17 +00:00
|
|
|
auto status = p->status;
|
2020-07-18 17:25:43 +00:00
|
|
|
if (status.is_empty()) {
|
|
|
|
// Corner case for if a variable assignment is part of a pipeline.
|
|
|
|
// e.g. `false | set foo bar | true` will push 1 in the second spot,
|
|
|
|
// for a complete pipestatus of `1 1 0`.
|
|
|
|
st.pipestatus.push_back(laststatus);
|
|
|
|
continue;
|
|
|
|
}
|
2020-03-31 18:27:17 +00:00
|
|
|
if (status.signal_exited()) {
|
|
|
|
st.kill_signal = status.signal_code();
|
|
|
|
}
|
2020-07-18 17:25:43 +00:00
|
|
|
laststatus = status.status_value();
|
|
|
|
has_status = true;
|
2020-03-31 18:27:17 +00:00
|
|
|
st.pipestatus.push_back(status.status_value());
|
2019-02-25 09:21:32 +00:00
|
|
|
}
|
2020-07-18 17:25:43 +00:00
|
|
|
if (!has_status) {
|
|
|
|
return none();
|
|
|
|
}
|
2019-10-15 21:37:10 +00:00
|
|
|
st.status = flags().negate ? !laststatus : laststatus;
|
2019-02-25 09:21:32 +00:00
|
|
|
return st;
|
|
|
|
}
|
|
|
|
|
2019-02-25 18:05:42 +00:00
|
|
|
void internal_proc_t::mark_exited(proc_status_t status) {
|
2019-02-13 23:17:07 +00:00
|
|
|
assert(!exited() && "Process is already exited");
|
2019-02-25 18:05:42 +00:00
|
|
|
status_.store(status, std::memory_order_relaxed);
|
|
|
|
exited_.store(true, std::memory_order_release);
|
2019-02-13 23:17:07 +00:00
|
|
|
topic_monitor_t::principal().post(topic_t::internal_exit);
|
2019-05-30 11:04:40 +00:00
|
|
|
FLOG(proc_internal_proc, L"Internal proc", internal_proc_id_, L"exited with status",
|
2019-05-28 16:38:45 +00:00
|
|
|
status.status_value());
|
2019-02-13 23:17:07 +00:00
|
|
|
}
|
|
|
|
|
2019-05-28 16:38:45 +00:00
|
|
|
static int64_t next_proc_id() {
|
|
|
|
static std::atomic<uint64_t> s_next{};
|
|
|
|
return ++s_next;
|
|
|
|
}
|
|
|
|
|
|
|
|
internal_proc_t::internal_proc_t() : internal_proc_id_(next_proc_id()) {}
|
|
|
|
|
2020-02-20 01:22:54 +00:00
|
|
|
job_list_t jobs_requiring_warning_on_exit(const parser_t &parser) {
|
|
|
|
job_list_t result;
|
|
|
|
for (const auto &job : parser.jobs()) {
|
|
|
|
if (!job->is_foreground() && job->is_constructed() && !job->is_completed()) {
|
|
|
|
result.push_back(job);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
void print_exit_warning_for_jobs(const job_list_t &jobs) {
|
|
|
|
fputws(_(L"There are still jobs active:\n"), stdout);
|
|
|
|
fputws(_(L"\n PID Command\n"), stdout);
|
|
|
|
for (const auto &j : jobs) {
|
|
|
|
fwprintf(stdout, L"%6d %ls\n", j->processes[0]->pid, j->command_wcstr());
|
|
|
|
}
|
|
|
|
fputws(L"\n", stdout);
|
|
|
|
fputws(_(L"A second attempt to exit will terminate them.\n"), stdout);
|
|
|
|
fputws(_(L"Use 'disown PID' to remove jobs from the list without terminating them.\n"), stdout);
|
2020-08-27 19:18:26 +00:00
|
|
|
reader_schedule_prompt_repaint();
|
2020-02-20 01:22:54 +00:00
|
|
|
}
|
|
|
|
|
2019-03-03 19:45:05 +00:00
|
|
|
/// Set the status of \p proc to \p status.
|
2020-07-12 18:35:27 +00:00
|
|
|
static void handle_child_status(const shared_ptr<job_t> &job, process_t *proc,
|
|
|
|
proc_status_t status) {
|
2019-03-03 19:45:05 +00:00
|
|
|
proc->status = status;
|
|
|
|
if (status.stopped()) {
|
|
|
|
proc->stopped = true;
|
2020-03-27 19:21:32 +00:00
|
|
|
} else if (status.continued()) {
|
|
|
|
proc->stopped = false;
|
2019-03-03 19:45:05 +00:00
|
|
|
} else {
|
|
|
|
proc->completed = true;
|
|
|
|
}
|
2005-09-20 13:26:39 +00:00
|
|
|
|
2019-03-03 19:45:05 +00:00
|
|
|
// If the child was killed by SIGINT or SIGQUIT, then treat it as if we received that signal.
|
|
|
|
if (status.signal_exited()) {
|
|
|
|
int sig = status.signal_code();
|
|
|
|
if (sig == SIGINT || sig == SIGQUIT) {
|
2020-12-06 21:40:45 +00:00
|
|
|
if (is_interactive_session()) {
|
2020-07-12 18:35:27 +00:00
|
|
|
// Mark the job group as cancelled.
|
Implement cancel groups
This concerns how "internal job groups" know to stop executing when an
external command receives a "cancel signal" (SIGINT or SIGQUIT). For
example:
while true
sleep 1
end
The intent is that if any 'sleep' exits from a cancel signal, then so would
the while loop. This is why you can hit control-C to end the loop even
if the SIGINT is delivered to sleep and not fish.
Here the 'while' loop is considered an "internal job group" (no separate
pgid, bash would not fork) while each 'sleep' is a separate external
command with its own job group, pgroup, etc. Prior to this change, after
running each 'sleep', parse_execution_context_t would check to see if its
exit status was a cancel signal, and if so, stash it into an int that the
cancel checker would check. But this became unwieldy: now there were three
sources of cancellation signals (that int, the job group, and fish itself).
Introduce the notion of a "cancellation group" which is a set of job
groups that should cancel together. Even though the while loop and sleep
are in different job groups, they are in the same cancellation group. When
any job gets a SIGINT or SIGQUIT, it marks that signal in its cancellation
group, which prevents running new jobs in that group.
This reduces the number of signals to check from 3 to 2; eventually we can
teach cancellation groups how to check fish's own signals and then it will
just be 1.
2020-09-02 22:06:05 +00:00
|
|
|
job->group->cancel_with_signal(sig);
|
2019-03-03 19:45:05 +00:00
|
|
|
} else {
|
|
|
|
// Deliver the SIGINT or SIGQUIT signal to ourself since we're not interactive.
|
|
|
|
struct sigaction act;
|
|
|
|
sigemptyset(&act.sa_mask);
|
|
|
|
act.sa_flags = 0;
|
|
|
|
act.sa_handler = SIG_DFL;
|
2019-11-19 02:34:50 +00:00
|
|
|
sigaction(sig, &act, nullptr);
|
2019-03-03 19:45:05 +00:00
|
|
|
kill(getpid(), sig);
|
2012-11-19 00:30:30 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2005-09-20 13:26:39 +00:00
|
|
|
}
|
|
|
|
|
2019-02-17 01:35:16 +00:00
|
|
|
process_t::process_t() = default;
|
|
|
|
|
|
|
|
void process_t::check_generations_before_launch() {
|
|
|
|
gens_ = topic_monitor_t::principal().current_generations();
|
|
|
|
}
|
2012-11-19 00:30:30 +00:00
|
|
|
|
2020-12-13 23:39:20 +00:00
|
|
|
void process_t::mark_aborted_before_launch() {
|
2021-03-28 04:21:29 +00:00
|
|
|
this->completed = true;
|
|
|
|
// The status may have already been set to e.g. STATUS_NOT_EXECUTABLE.
|
|
|
|
// Only stomp a successful status.
|
|
|
|
if (this->status.is_success()) {
|
|
|
|
this->status = proc_status_t::from_exit_code(EXIT_FAILURE);
|
|
|
|
}
|
2020-12-13 23:39:20 +00:00
|
|
|
}
|
|
|
|
|
2020-01-30 00:39:44 +00:00
|
|
|
bool process_t::is_internal() const {
|
|
|
|
switch (type) {
|
|
|
|
case process_type_t::builtin:
|
|
|
|
case process_type_t::function:
|
|
|
|
case process_type_t::block_node:
|
|
|
|
return true;
|
|
|
|
case process_type_t::external:
|
|
|
|
case process_type_t::exec:
|
|
|
|
return false;
|
2020-01-30 19:39:42 +00:00
|
|
|
default:
|
2020-02-17 13:12:27 +00:00
|
|
|
assert(false &&
|
|
|
|
"The fish developers forgot to include a process_t. Please report a bug");
|
2020-01-30 19:39:42 +00:00
|
|
|
return true;
|
2020-01-30 00:39:44 +00:00
|
|
|
}
|
2020-02-17 13:12:27 +00:00
|
|
|
assert(false &&
|
|
|
|
"process_t::is_internal: Total logic failure, universe is broken. Please replace "
|
|
|
|
"universe and retry.");
|
2020-01-30 19:39:42 +00:00
|
|
|
return true;
|
2020-01-30 00:39:44 +00:00
|
|
|
}
|
|
|
|
|
2021-10-28 17:37:43 +00:00
|
|
|
wait_handle_ref_t process_t::make_wait_handle(internal_job_id_t jid) {
|
2021-05-11 19:01:08 +00:00
|
|
|
if (type != process_type_t::external || pid <= 0) {
|
2021-10-28 17:37:43 +00:00
|
|
|
// Not waitable.
|
2021-05-11 19:01:08 +00:00
|
|
|
return nullptr;
|
|
|
|
}
|
2021-10-28 17:37:43 +00:00
|
|
|
if (!wait_handle_) {
|
|
|
|
wait_handle_ = std::make_shared<wait_handle_t>(this->pid, jid, wbasename(this->actual_cmd));
|
2021-05-11 19:01:08 +00:00
|
|
|
}
|
|
|
|
return wait_handle_;
|
|
|
|
}
|
|
|
|
|
2020-02-08 23:43:21 +00:00
|
|
|
static uint64_t next_internal_job_id() {
|
|
|
|
static std::atomic<uint64_t> s_next{};
|
|
|
|
return ++s_next;
|
|
|
|
}
|
|
|
|
|
2020-07-17 21:05:23 +00:00
|
|
|
job_t::job_t(const properties_t &props, wcstring command_str)
|
|
|
|
: properties(props),
|
|
|
|
command_str(std::move(command_str)),
|
|
|
|
internal_job_id(next_internal_job_id()) {}
|
2012-11-19 00:30:30 +00:00
|
|
|
|
2020-02-08 23:12:58 +00:00
|
|
|
job_t::~job_t() = default;
|
2005-09-20 13:26:39 +00:00
|
|
|
|
2019-12-11 02:32:56 +00:00
|
|
|
void job_t::mark_constructed() {
|
|
|
|
assert(!is_constructed() && "Job was already constructed");
|
2020-02-08 23:12:58 +00:00
|
|
|
mut_flags().constructed = true;
|
2020-07-12 00:05:42 +00:00
|
|
|
if (flags().is_group_root) {
|
2020-05-30 21:05:07 +00:00
|
|
|
group->mark_root_constructed();
|
2020-02-08 23:12:58 +00:00
|
|
|
}
|
2019-12-11 02:32:56 +00:00
|
|
|
}
|
|
|
|
|
2020-01-30 00:39:44 +00:00
|
|
|
bool job_t::has_internal_proc() const {
|
|
|
|
for (const auto &p : processes) {
|
|
|
|
if (p->is_internal()) return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool job_t::has_external_proc() const {
|
|
|
|
for (const auto &p : processes) {
|
|
|
|
if (!p->is_internal()) return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2018-11-18 21:18:18 +00:00
|
|
|
/// A list of pids/pgids that have been disowned. They are kept around until either they exit or
|
|
|
|
/// we exit. Poll these from time-to-time to prevent zombie processes from happening (#5342).
|
2020-07-13 01:56:39 +00:00
|
|
|
static owning_lock<std::vector<pid_t>> s_disowned_pids;
|
2018-11-18 21:18:18 +00:00
|
|
|
|
2021-04-04 04:05:32 +00:00
|
|
|
void add_disowned_job(const job_t *j) {
|
2021-10-25 23:13:00 +00:00
|
|
|
assert(j && "Null job");
|
2020-07-14 14:20:46 +00:00
|
|
|
|
2020-07-26 01:31:44 +00:00
|
|
|
// Never add our own (or an invalid) pgid as it is not unique to only
|
2018-12-30 16:11:20 +00:00
|
|
|
// one job, and may result in a deadlock if we attempt the wait.
|
2020-07-14 14:20:46 +00:00
|
|
|
auto pgid = j->get_pgid();
|
2020-07-26 01:31:44 +00:00
|
|
|
auto disowned_pids = s_disowned_pids.acquire();
|
2020-07-14 14:20:46 +00:00
|
|
|
if (pgid && *pgid != getpgrp() && *pgid > 0) {
|
2018-12-30 16:11:20 +00:00
|
|
|
// waitpid(2) is signalled to wait on a process group rather than a
|
|
|
|
// process id by using the negative of its value.
|
2020-07-26 01:31:44 +00:00
|
|
|
disowned_pids->push_back(*pgid * -1);
|
2020-07-14 14:20:46 +00:00
|
|
|
} else {
|
|
|
|
// Instead, add the PIDs of any external processes
|
|
|
|
for (auto &process : j->processes) {
|
|
|
|
if (process->pid) {
|
2020-07-26 01:31:44 +00:00
|
|
|
disowned_pids->push_back(process->pid);
|
2020-07-14 14:20:46 +00:00
|
|
|
}
|
|
|
|
}
|
2018-12-30 15:04:57 +00:00
|
|
|
}
|
2018-11-18 21:18:18 +00:00
|
|
|
}
|
|
|
|
|
2019-05-23 00:10:33 +00:00
|
|
|
// Reap any pids in our disowned list that have exited. This is used to avoid zombies.
|
|
|
|
static void reap_disowned_pids() {
|
2020-07-13 01:56:39 +00:00
|
|
|
auto disowned_pids = s_disowned_pids.acquire();
|
2019-05-23 00:10:33 +00:00
|
|
|
auto try_reap1 = [](pid_t pid) {
|
|
|
|
int status;
|
2020-07-14 14:20:46 +00:00
|
|
|
int ret = waitpid(pid, &status, WNOHANG);
|
|
|
|
if (ret > 0) {
|
2020-07-10 14:36:42 +00:00
|
|
|
FLOGF(proc_reap_external, "Reaped disowned PID or PGID %d", pid);
|
|
|
|
}
|
|
|
|
return ret;
|
2019-05-23 00:10:33 +00:00
|
|
|
};
|
2020-07-14 14:20:46 +00:00
|
|
|
// waitpid returns 0 iff the PID/PGID in question has not changed state; remove the pid/pgid
|
|
|
|
// if it has changed or an error occurs (presumably ECHILD because the child does not exist)
|
2020-07-13 01:56:39 +00:00
|
|
|
disowned_pids->erase(std::remove_if(disowned_pids->begin(), disowned_pids->end(), try_reap1),
|
|
|
|
disowned_pids->end());
|
2019-05-23 00:10:33 +00:00
|
|
|
}
|
|
|
|
|
2019-02-17 01:39:14 +00:00
|
|
|
/// See if any reapable processes have exited, and mark them accordingly.
|
|
|
|
/// \param block_ok if no reapable processes have exited, block until one is (or until we receive a
|
|
|
|
/// signal).
|
2019-05-05 05:12:31 +00:00
|
|
|
static void process_mark_finished_children(parser_t &parser, bool block_ok) {
|
2014-10-25 23:51:25 +00:00
|
|
|
ASSERT_IS_MAIN_THREAD();
|
2016-05-03 04:41:17 +00:00
|
|
|
|
2019-02-17 01:39:14 +00:00
|
|
|
// Get the exit and signal generations of all reapable processes.
|
|
|
|
// The exit generation tells us if we have an exit; the signal generation allows for detecting
|
|
|
|
// SIGHUP and SIGINT.
|
2020-08-07 19:33:43 +00:00
|
|
|
// Go through each process and figure out if and how it wants to be reaped.
|
2020-08-07 02:00:53 +00:00
|
|
|
generation_list_t reapgens = generation_list_t::invalids();
|
2019-05-05 05:12:31 +00:00
|
|
|
for (const auto &j : parser.jobs()) {
|
2019-02-17 01:39:14 +00:00
|
|
|
for (const auto &proc : j->processes) {
|
2020-08-07 19:33:43 +00:00
|
|
|
if (!j->can_reap(proc)) continue;
|
|
|
|
|
2020-08-16 19:51:15 +00:00
|
|
|
if (proc->pid > 0) {
|
2020-08-07 19:33:43 +00:00
|
|
|
// Reaps with a pid.
|
|
|
|
reapgens.set_min_from(topic_t::sigchld, proc->gens_);
|
|
|
|
reapgens.set_min_from(topic_t::sighupint, proc->gens_);
|
|
|
|
}
|
|
|
|
if (proc->internal_proc_) {
|
|
|
|
// Reaps with an internal process.
|
|
|
|
reapgens.set_min_from(topic_t::internal_exit, proc->gens_);
|
2020-08-07 02:00:53 +00:00
|
|
|
reapgens.set_min_from(topic_t::sighupint, proc->gens_);
|
2019-02-17 01:39:14 +00:00
|
|
|
}
|
2019-01-02 06:12:07 +00:00
|
|
|
}
|
2019-02-17 01:39:14 +00:00
|
|
|
}
|
2019-01-02 06:12:07 +00:00
|
|
|
|
2020-08-07 02:00:53 +00:00
|
|
|
// Now check for changes, optionally waiting.
|
|
|
|
if (!topic_monitor_t::principal().check(&reapgens, block_ok)) {
|
|
|
|
// Nothing changed.
|
2019-02-17 01:39:14 +00:00
|
|
|
return;
|
|
|
|
}
|
2018-10-08 17:44:47 +00:00
|
|
|
|
2019-02-17 01:39:14 +00:00
|
|
|
// We got some changes. Since we last checked we received SIGCHLD, and or HUP/INT.
|
|
|
|
// Update the hup/int generations and reap any reapable processes.
|
2020-08-07 19:33:43 +00:00
|
|
|
// We structure this as two loops for some simplicity.
|
|
|
|
// First reap all pids.
|
|
|
|
for (const auto &j : parser.jobs()) {
|
2019-02-13 23:17:07 +00:00
|
|
|
for (const auto &proc : j->processes) {
|
2020-08-07 19:33:43 +00:00
|
|
|
// Does this proc have a pid that is reapable?
|
|
|
|
if (proc->pid <= 0 || !j->can_reap(proc)) continue;
|
|
|
|
|
2020-08-16 19:51:15 +00:00
|
|
|
// Always update the signal hup/int gen.
|
2020-08-07 19:33:43 +00:00
|
|
|
proc->gens_.sighupint = reapgens.sighupint;
|
|
|
|
|
|
|
|
// Nothing to do if we did not get a new sigchld.
|
|
|
|
if (proc->gens_.sigchld == reapgens.sigchld) continue;
|
|
|
|
proc->gens_.sigchld = reapgens.sigchld;
|
|
|
|
|
|
|
|
// Ok, we are reapable. Run waitpid()!
|
|
|
|
int statusv = -1;
|
|
|
|
pid_t pid = waitpid(proc->pid, &statusv, WNOHANG | WUNTRACED | WCONTINUED);
|
2020-08-08 07:14:25 +00:00
|
|
|
assert((pid <= 0 || pid == proc->pid) && "Unexpcted waitpid() return");
|
2020-08-07 19:33:43 +00:00
|
|
|
if (pid <= 0) continue;
|
|
|
|
|
|
|
|
// The process has stopped or exited! Update its status.
|
|
|
|
proc_status_t status = proc_status_t::from_waitpid(statusv);
|
|
|
|
handle_child_status(j, proc.get(), status);
|
|
|
|
if (status.stopped()) {
|
|
|
|
j->group->set_is_foreground(false);
|
|
|
|
}
|
|
|
|
if (status.continued()) {
|
|
|
|
j->mut_flags().notified = false;
|
|
|
|
}
|
|
|
|
if (status.normal_exited() || status.signal_exited()) {
|
|
|
|
FLOGF(proc_reap_external, "Reaped external process '%ls' (pid %d, status %d)",
|
|
|
|
proc->argv0(), pid, proc->status.status_value());
|
|
|
|
} else {
|
|
|
|
assert(status.stopped() || status.continued());
|
|
|
|
FLOGF(proc_reap_external, "External process '%ls' (pid %d, %s)", proc->argv0(),
|
|
|
|
proc->pid, proc->status.stopped() ? "stopped" : "continued");
|
2014-10-25 23:51:25 +00:00
|
|
|
}
|
2012-11-19 00:30:30 +00:00
|
|
|
}
|
|
|
|
}
|
2014-10-25 23:51:25 +00:00
|
|
|
|
2020-08-07 19:33:43 +00:00
|
|
|
// We are done reaping pids.
|
|
|
|
// Reap internal processes.
|
|
|
|
for (const auto &j : parser.jobs()) {
|
|
|
|
for (const auto &proc : j->processes) {
|
|
|
|
// Does this proc have an internal process that is reapable?
|
|
|
|
if (!proc->internal_proc_ || !j->can_reap(proc)) continue;
|
|
|
|
|
2020-08-16 19:51:15 +00:00
|
|
|
// Always update the signal hup/int gen.
|
|
|
|
proc->gens_.sighupint = reapgens.sighupint;
|
|
|
|
|
|
|
|
// Nothing to do if we did not get a new internal exit.
|
|
|
|
if (proc->gens_.internal_exit == reapgens.internal_exit) continue;
|
|
|
|
proc->gens_.internal_exit = reapgens.internal_exit;
|
|
|
|
|
2020-08-07 19:33:43 +00:00
|
|
|
// Has the process exited?
|
|
|
|
if (!proc->internal_proc_->exited()) continue;
|
|
|
|
|
|
|
|
// The process gets the status from its internal proc.
|
|
|
|
handle_child_status(j, proc.get(), proc->internal_proc_->get_status());
|
|
|
|
FLOGF(proc_reap_internal, "Reaped internal process '%ls' (id %llu, status %d)",
|
|
|
|
proc->argv0(), proc->internal_proc_->get_id(), proc->status.status_value());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-23 00:10:33 +00:00
|
|
|
// Remove any zombies.
|
|
|
|
reap_disowned_pids();
|
2005-09-20 13:26:39 +00:00
|
|
|
}
|
|
|
|
|
2021-11-03 22:03:37 +00:00
|
|
|
/// Given a job that has completed, generate job_exit, process_exit, and caller_exit events.
|
|
|
|
static void generate_exit_events(const job_ref_t &j, std::vector<event_t> *out_evts) {
|
|
|
|
// Generate proc and job exit events, except for jobs originating in event handlers.
|
|
|
|
if (!j->from_event_handler()) {
|
|
|
|
// process_exit events.
|
|
|
|
for (const auto &proc : j->processes) {
|
|
|
|
if (proc->pid > 0) {
|
|
|
|
out_evts->push_back(event_t::process_exit(proc->pid, proc->status.status_value()));
|
|
|
|
}
|
|
|
|
}
|
2005-09-20 13:26:39 +00:00
|
|
|
|
2021-11-03 22:03:37 +00:00
|
|
|
// job_exit events.
|
|
|
|
if (j->posts_job_exit_events()) {
|
|
|
|
if (auto last_pid = j->get_last_pid()) {
|
|
|
|
out_evts->push_back(event_t::job_exit(*last_pid, j->internal_job_id));
|
|
|
|
}
|
2019-04-29 06:11:02 +00:00
|
|
|
}
|
|
|
|
}
|
2021-11-03 22:03:37 +00:00
|
|
|
// Generate caller_exit events.
|
|
|
|
out_evts->push_back(event_t::caller_exit(j->internal_job_id, j->job_id()));
|
2019-04-29 06:11:02 +00:00
|
|
|
}
|
|
|
|
|
2021-11-03 22:03:37 +00:00
|
|
|
/// \return whether to emit a fish_job_summary call for a process.
|
|
|
|
static bool proc_wants_summary(const shared_ptr<job_t> &j, const process_ptr_t &p) {
|
|
|
|
// Are we completed with a pid?
|
|
|
|
if (!p->completed || !p->pid) return false;
|
2019-04-29 16:12:34 +00:00
|
|
|
|
2021-11-03 22:03:37 +00:00
|
|
|
// Did we die due to a signal other than SIGPIPE?
|
|
|
|
auto s = p->status;
|
|
|
|
if (!s.signal_exited() || s.signal_code() == SIGPIPE) return false;
|
2019-04-29 16:12:34 +00:00
|
|
|
|
2021-11-03 22:03:37 +00:00
|
|
|
// Does the job want to suppress notifications?
|
|
|
|
// Note we always report crashes.
|
|
|
|
if (j->skip_notification() && !contains(crashsignals, s.signal_code())) return false;
|
2019-04-29 16:12:34 +00:00
|
|
|
|
2020-04-30 19:15:29 +00:00
|
|
|
return true;
|
2019-04-29 16:12:34 +00:00
|
|
|
}
|
|
|
|
|
2021-11-03 22:03:37 +00:00
|
|
|
/// \return whether to emit a fish_job_summary call for a job as a whole. We may also emit this for
|
|
|
|
/// its individual processes.
|
|
|
|
static bool job_wants_summary(const shared_ptr<job_t> &j) {
|
2019-05-01 18:24:54 +00:00
|
|
|
// Did we already print a status message?
|
2019-10-15 21:37:10 +00:00
|
|
|
if (j->flags().notified) return false;
|
2019-05-01 18:24:54 +00:00
|
|
|
|
|
|
|
// Do we just skip notifications?
|
2019-06-23 19:39:29 +00:00
|
|
|
if (j->skip_notification()) return false;
|
2019-05-01 18:24:54 +00:00
|
|
|
|
2021-11-03 22:03:37 +00:00
|
|
|
// Do we have a single process which will also report? If so then that suffices for us.
|
|
|
|
if (j->processes.size() == 1 && proc_wants_summary(j, j->processes.front())) return false;
|
|
|
|
|
2019-05-01 18:24:54 +00:00
|
|
|
// Are we foreground?
|
|
|
|
// The idea here is to not print status messages for jobs that execute in the foreground (i.e.
|
|
|
|
// without & and without being `bg`).
|
|
|
|
if (j->is_foreground()) return false;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2021-11-03 22:03:37 +00:00
|
|
|
/// \return whether we want to emit a fish_job_summary call for a job or any of its processes.
|
|
|
|
bool job_or_proc_wants_summary(const shared_ptr<job_t> &j) {
|
|
|
|
if (job_wants_summary(j)) return true;
|
|
|
|
for (const auto &p : j->processes) {
|
|
|
|
if (proc_wants_summary(j, p)) return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Invoke the fish_job_summary function by executing the given command.
|
|
|
|
static void call_job_summary(parser_t &parser, const wcstring &cmd) {
|
|
|
|
event_t event(event_type_t::generic);
|
|
|
|
event.desc.str_param1 = L"fish_job_summary";
|
|
|
|
block_t *b = parser.push_block(block_t::event_block(event));
|
|
|
|
auto saved_status = parser.get_last_statuses();
|
|
|
|
parser.eval(cmd, io_chain_t());
|
|
|
|
parser.set_last_statuses(saved_status);
|
|
|
|
parser.pop_block(b);
|
|
|
|
}
|
|
|
|
|
|
|
|
// \return a command which invokes fish_job_summary.
|
|
|
|
// The process pointer may be null, in which case it represents the entire job.
|
|
|
|
// Note this implements the arguments which fish_job_summary expects.
|
|
|
|
wcstring summary_command(const job_ref_t &j, const process_ptr_t &p = nullptr) {
|
|
|
|
wcstring buffer = L"fish_job_summary";
|
|
|
|
|
|
|
|
// Job id.
|
|
|
|
append_format(buffer, L" %d", j->job_id());
|
|
|
|
|
|
|
|
// 1 if foreground, 0 if background.
|
|
|
|
append_format(buffer, L" %d", static_cast<int>(j->is_foreground()));
|
|
|
|
|
|
|
|
// Command.
|
|
|
|
buffer.push_back(L' ');
|
|
|
|
buffer.append(escape_string(j->command(), ESCAPE_ALL));
|
|
|
|
|
|
|
|
if (!p) {
|
|
|
|
// No process, we are summarizing the whole job.
|
|
|
|
buffer.append(j->is_stopped() ? L" STOPPED" : L" ENDED");
|
|
|
|
} else {
|
|
|
|
// We are summarizing a process which exited with a signal.
|
|
|
|
// Arguments are the signal name and description.
|
|
|
|
int sig = p->status.signal_code();
|
|
|
|
buffer.push_back(L' ');
|
|
|
|
buffer.append(escape_string(sig2wcs(sig), ESCAPE_ALL));
|
|
|
|
|
|
|
|
buffer.push_back(L' ');
|
|
|
|
buffer.append(escape_string(signal_get_desc(sig), ESCAPE_ALL));
|
|
|
|
|
|
|
|
// If we have multiple processes, we also append the pid and argv.
|
|
|
|
if (j->processes.size() > 1) {
|
|
|
|
append_format(buffer, L" %d", p->pid);
|
|
|
|
|
|
|
|
buffer.push_back(L' ');
|
|
|
|
buffer.append(escape_string(p->argv0(), ESCAPE_ALL));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return buffer;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Summarize a list of jobs, by emitting calls to fish_job_summary.
|
|
|
|
// Note the given list must NOT be the parser's own job list, since the call to fish_job_summary
|
|
|
|
// could modify it.
|
|
|
|
static bool summarize_jobs(parser_t &parser, const std::vector<job_ref_t> &jobs) {
|
|
|
|
if (jobs.empty()) return false;
|
|
|
|
|
|
|
|
for (const auto &j : jobs) {
|
|
|
|
if (j->is_stopped()) {
|
|
|
|
call_job_summary(parser, summary_command(j));
|
|
|
|
} else {
|
|
|
|
// Completed job.
|
|
|
|
for (const auto &p : j->processes) {
|
|
|
|
if (proc_wants_summary(j, p)) {
|
|
|
|
call_job_summary(parser, summary_command(j, p));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Overall status for the job.
|
|
|
|
if (job_wants_summary(j)) {
|
|
|
|
call_job_summary(parser, summary_command(j));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Remove all disowned jobs whose job chain is fully constructed (that is, do not erase disowned
|
|
|
|
/// jobs that still have an in-flight parent job). Note we never print statuses for such jobs.
|
|
|
|
static void remove_disowned_jobs(job_list_t &jobs) {
|
|
|
|
auto iter = jobs.begin();
|
|
|
|
while (iter != jobs.end()) {
|
|
|
|
const auto &j = *iter;
|
|
|
|
if (j->flags().disown_requested && j->job_chain_is_fully_constructed()) {
|
|
|
|
iter = jobs.erase(iter);
|
|
|
|
} else {
|
|
|
|
++iter;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-05-11 19:01:08 +00:00
|
|
|
/// Given that a job has completed, check if it may be wait'ed on; if so add it to the wait handle
|
|
|
|
/// store. Then mark all wait handles as complete.
|
|
|
|
static void save_wait_handle_for_completed_job(const shared_ptr<job_t> &job,
|
|
|
|
wait_handle_store_t &store) {
|
|
|
|
assert(job && job->is_completed() && "Job null or not completed");
|
|
|
|
// Are we a background job?
|
|
|
|
if (!job->is_foreground()) {
|
|
|
|
for (auto &proc : job->processes) {
|
2021-10-28 17:37:43 +00:00
|
|
|
store.add(proc->make_wait_handle(job->internal_job_id));
|
2021-05-11 19:01:08 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Mark all wait handles as complete (but don't create just for this).
|
|
|
|
for (auto &proc : job->processes) {
|
2021-10-28 17:37:43 +00:00
|
|
|
if (wait_handle_ref_t wh = proc->get_wait_handle()) {
|
2021-05-17 22:22:02 +00:00
|
|
|
wh->status = proc->status.status_value();
|
2021-05-11 19:01:08 +00:00
|
|
|
wh->completed = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-04-29 16:05:00 +00:00
|
|
|
/// Remove completed jobs from the job list, printing status messages as appropriate.
|
|
|
|
/// \return whether something was printed.
|
2019-04-30 03:58:58 +00:00
|
|
|
static bool process_clean_after_marking(parser_t &parser, bool allow_interactive) {
|
2012-02-16 08:24:27 +00:00
|
|
|
ASSERT_IS_MAIN_THREAD();
|
2012-11-19 00:30:30 +00:00
|
|
|
|
2019-03-28 23:16:22 +00:00
|
|
|
// This function may fire an event handler, we do not want to call ourselves recursively (to
|
2018-10-29 00:09:57 +00:00
|
|
|
// avoid infinite recursion).
|
2019-04-30 03:58:58 +00:00
|
|
|
if (parser.libdata().is_cleaning_procs) {
|
2018-12-31 03:25:16 +00:00
|
|
|
return false;
|
2014-10-25 23:51:25 +00:00
|
|
|
}
|
2021-11-03 22:03:37 +00:00
|
|
|
const scoped_push<bool> cleaning(&parser.libdata().is_cleaning_procs, true);
|
2016-05-03 04:41:17 +00:00
|
|
|
|
2019-03-28 23:16:22 +00:00
|
|
|
// This may be invoked in an exit handler, after the TERM has been torn down
|
|
|
|
// Don't try to print in that case (#3222)
|
2019-11-19 02:34:50 +00:00
|
|
|
const bool interactive = allow_interactive && cur_term != nullptr;
|
2016-09-09 20:13:45 +00:00
|
|
|
|
2019-04-29 06:11:02 +00:00
|
|
|
// Remove all disowned jobs.
|
2019-05-05 05:12:31 +00:00
|
|
|
remove_disowned_jobs(parser.jobs());
|
2019-04-29 06:11:02 +00:00
|
|
|
|
2019-05-01 23:17:23 +00:00
|
|
|
// Accumulate exit events into a new list, which we fire after the list manipulation is
|
|
|
|
// complete.
|
|
|
|
std::vector<event_t> exit_events;
|
|
|
|
|
2021-11-03 22:03:37 +00:00
|
|
|
// Defer processing under-construction jobs or jobs that want a message when we are not
|
|
|
|
// interactive.
|
2020-03-02 20:34:07 +00:00
|
|
|
auto should_process_job = [=](const shared_ptr<job_t> &j) {
|
|
|
|
// Do not attempt to process jobs which are not yet constructed.
|
|
|
|
// Do not attempt to process jobs that need to print a status message,
|
|
|
|
// unless we are interactive, in which case printing is OK.
|
2021-11-03 22:03:37 +00:00
|
|
|
return j->is_constructed() && (interactive || !job_or_proc_wants_summary(j));
|
2020-03-02 20:34:07 +00:00
|
|
|
};
|
|
|
|
|
2021-11-03 22:03:37 +00:00
|
|
|
// The list of jobs to summarize. Some of these jobs are completed and are removed from the
|
|
|
|
// parser's job list, others are stopped and remain in the list.
|
|
|
|
std::vector<job_ref_t> jobs_to_summarize;
|
2012-11-19 00:30:30 +00:00
|
|
|
|
2021-11-03 22:03:37 +00:00
|
|
|
// Handle stopped jobs. These stay in our list.
|
|
|
|
for (const auto &j : parser.jobs()) {
|
|
|
|
if (!j->is_completed() && j->is_stopped() && should_process_job(j) &&
|
|
|
|
job_wants_summary(j)) {
|
2019-10-15 21:37:10 +00:00
|
|
|
j->mut_flags().notified = true;
|
2021-11-03 22:03:37 +00:00
|
|
|
jobs_to_summarize.push_back(j);
|
2019-05-01 23:17:23 +00:00
|
|
|
}
|
2012-11-19 00:30:30 +00:00
|
|
|
}
|
|
|
|
|
2021-11-03 22:03:37 +00:00
|
|
|
// Remove completed, processable jobs from our job list.
|
|
|
|
for (auto iter = parser.jobs().begin(); iter != parser.jobs().end();) {
|
|
|
|
const job_ref_t &j = *iter;
|
|
|
|
if (!should_process_job(j) || !j->is_completed()) {
|
2021-05-15 19:50:25 +00:00
|
|
|
++iter;
|
2021-11-03 22:03:37 +00:00
|
|
|
continue;
|
2021-05-15 19:50:25 +00:00
|
|
|
}
|
2021-11-03 22:03:37 +00:00
|
|
|
// We are committed to removing this job.
|
|
|
|
// Remember it for summary later, generate exit events, maybe save its wait handle if it
|
|
|
|
// finished in the background.
|
|
|
|
if (job_or_proc_wants_summary(j)) jobs_to_summarize.push_back(j);
|
|
|
|
generate_exit_events(j, &exit_events);
|
|
|
|
save_wait_handle_for_completed_job(j, parser.get_wait_handles());
|
|
|
|
|
|
|
|
// Remove it.
|
|
|
|
iter = parser.jobs().erase(iter);
|
2021-05-15 19:50:25 +00:00
|
|
|
}
|
2019-03-28 23:16:22 +00:00
|
|
|
|
2021-11-03 22:03:37 +00:00
|
|
|
// Emit calls to fish_job_summary.
|
|
|
|
bool printed = summarize_jobs(parser, jobs_to_summarize);
|
|
|
|
|
2019-05-01 23:17:23 +00:00
|
|
|
// Post pending exit events.
|
|
|
|
for (const auto &evt : exit_events) {
|
2019-06-03 09:31:13 +00:00
|
|
|
event_fire(parser, evt);
|
2019-03-28 23:16:22 +00:00
|
|
|
}
|
|
|
|
|
2019-04-29 16:05:00 +00:00
|
|
|
if (printed) {
|
2019-03-21 03:57:38 +00:00
|
|
|
fflush(stdout);
|
|
|
|
}
|
2012-11-19 00:30:30 +00:00
|
|
|
|
2019-04-29 16:05:00 +00:00
|
|
|
return printed;
|
2017-10-22 07:10:23 +00:00
|
|
|
}
|
|
|
|
|
2019-04-30 03:58:58 +00:00
|
|
|
bool job_reap(parser_t &parser, bool allow_interactive) {
|
2017-10-22 07:10:23 +00:00
|
|
|
ASSERT_IS_MAIN_THREAD();
|
2021-04-11 04:32:20 +00:00
|
|
|
// Early out for the common case that there are no jobs.
|
|
|
|
if (parser.jobs().empty()) {
|
|
|
|
return false;
|
|
|
|
}
|
2013-12-31 22:37:37 +00:00
|
|
|
|
2021-04-11 04:32:20 +00:00
|
|
|
process_mark_finished_children(parser, false /* not block_ok */);
|
|
|
|
return process_clean_after_marking(parser, allow_interactive);
|
2005-09-20 13:26:39 +00:00
|
|
|
}
|
|
|
|
|
2016-05-03 04:41:17 +00:00
|
|
|
/// Get the CPU time for the specified process.
|
2021-08-27 20:03:01 +00:00
|
|
|
unsigned long proc_get_jiffies(pid_t inpid) {
|
|
|
|
if (inpid <= 0 || !have_proc_stat()) return 0;
|
2012-11-19 00:30:30 +00:00
|
|
|
|
|
|
|
char state;
|
2016-05-03 04:41:17 +00:00
|
|
|
int pid, ppid, pgrp, session, tty_nr, tpgid, exit_signal, processor;
|
|
|
|
long int cutime, cstime, priority, nice, placeholder, itrealvalue, rss;
|
|
|
|
unsigned long int flags, minflt, cminflt, majflt, cmajflt, utime, stime, starttime, vsize, rlim,
|
|
|
|
startcode, endcode, startstack, kstkesp, kstkeip, signal, blocked, sigignore, sigcatch,
|
|
|
|
wchan, nswap, cnswap;
|
2012-11-19 00:30:30 +00:00
|
|
|
char comm[1024];
|
|
|
|
|
2020-01-28 18:43:37 +00:00
|
|
|
/// Maximum length of a /proc/[PID]/stat filename.
|
|
|
|
constexpr size_t FN_SIZE = 256;
|
|
|
|
char fn[FN_SIZE];
|
2021-08-27 20:03:01 +00:00
|
|
|
std::snprintf(fn, FN_SIZE, "/proc/%d/stat", inpid);
|
2020-01-28 18:43:37 +00:00
|
|
|
// Don't use autoclose_fd here, we will fdopen() and then fclose() instead.
|
|
|
|
int fd = open_cloexec(fn, O_RDONLY);
|
|
|
|
if (fd < 0) return 0;
|
2012-11-19 00:30:30 +00:00
|
|
|
|
2016-05-29 05:28:26 +00:00
|
|
|
// TODO: replace the use of fscanf() as it is brittle and should never be used.
|
2020-01-28 18:43:37 +00:00
|
|
|
FILE *f = fdopen(fd, "r");
|
2016-05-29 05:28:26 +00:00
|
|
|
int count = fscanf(f,
|
|
|
|
"%9d %1023s %c %9d %9d %9d %9d %9d %9lu "
|
|
|
|
"%9lu %9lu %9lu %9lu %9lu %9lu %9ld %9ld %9ld "
|
|
|
|
"%9ld %9ld %9ld %9lu %9lu %9ld %9lu %9lu %9lu "
|
|
|
|
"%9lu %9lu %9lu %9lu %9lu %9lu %9lu %9lu %9lu "
|
|
|
|
"%9lu %9d %9d ",
|
|
|
|
&pid, comm, &state, &ppid, &pgrp, &session, &tty_nr, &tpgid, &flags, &minflt,
|
|
|
|
&cminflt, &majflt, &cmajflt, &utime, &stime, &cutime, &cstime, &priority,
|
|
|
|
&nice, &placeholder, &itrealvalue, &starttime, &vsize, &rss, &rlim,
|
|
|
|
&startcode, &endcode, &startstack, &kstkesp, &kstkeip, &signal, &blocked,
|
|
|
|
&sigignore, &sigcatch, &wchan, &nswap, &cnswap, &exit_signal, &processor);
|
2014-04-26 15:36:20 +00:00
|
|
|
fclose(f);
|
2016-05-29 05:28:26 +00:00
|
|
|
if (count < 17) return 0;
|
2016-05-03 04:41:17 +00:00
|
|
|
return utime + stime + cutime + cstime;
|
2005-09-20 13:26:39 +00:00
|
|
|
}
|
|
|
|
|
2016-05-03 04:41:17 +00:00
|
|
|
/// Update the CPU time for all jobs.
|
2019-05-05 05:12:31 +00:00
|
|
|
void proc_update_jiffies(parser_t &parser) {
|
|
|
|
for (const auto &job : parser.jobs()) {
|
2017-01-23 18:38:55 +00:00
|
|
|
for (process_ptr_t &p : job->processes) {
|
2021-08-27 23:20:30 +00:00
|
|
|
p->last_time = timef();
|
2021-08-27 20:03:01 +00:00
|
|
|
p->last_jiffies = proc_get_jiffies(p->pid);
|
2012-11-19 00:30:30 +00:00
|
|
|
}
|
|
|
|
}
|
2005-09-20 13:26:39 +00:00
|
|
|
}
|
|
|
|
|
2019-05-05 10:09:25 +00:00
|
|
|
// Return control of the terminal to a job's process group. restore_attrs is true if we are
|
|
|
|
// restoring a previously-stopped job, in which case we need to restore terminal attributes.
|
2020-07-17 21:01:03 +00:00
|
|
|
int terminal_maybe_give_to_job_group(const job_group_t *jg, bool continuing_from_stopped) {
|
2019-06-29 22:58:36 +00:00
|
|
|
enum { notneeded = 0, success = 1, error = -1 };
|
|
|
|
|
2020-07-17 21:01:03 +00:00
|
|
|
if (!jg->should_claim_terminal()) {
|
2019-07-12 20:31:56 +00:00
|
|
|
// The job doesn't want the terminal.
|
2019-06-29 22:58:36 +00:00
|
|
|
return notneeded;
|
|
|
|
}
|
|
|
|
|
2020-05-29 21:51:48 +00:00
|
|
|
// Get the pgid; we may not have one.
|
|
|
|
pid_t pgid{};
|
2020-07-17 21:01:03 +00:00
|
|
|
if (auto mpgid = jg->get_pgid()) {
|
2020-05-29 21:51:48 +00:00
|
|
|
pgid = *mpgid;
|
|
|
|
} else {
|
2019-05-29 06:07:04 +00:00
|
|
|
FLOG(proc_termowner, L"terminal_give_to_job() returning early due to no process group");
|
2019-06-29 22:58:36 +00:00
|
|
|
return notneeded;
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we are continuing, ensure that stdin is marked as blocking first (issue #176).
|
2020-07-27 00:55:00 +00:00
|
|
|
// Also restore tty modes.
|
2019-06-29 22:58:36 +00:00
|
|
|
if (continuing_from_stopped) {
|
|
|
|
make_fd_blocking(STDIN_FILENO);
|
2020-07-27 00:55:00 +00:00
|
|
|
if (jg->tmodes.has_value()) {
|
|
|
|
int res = tcsetattr(STDIN_FILENO, TCSADRAIN, &jg->tmodes.value());
|
|
|
|
if (res < 0) wperror(L"tcsetattr");
|
|
|
|
}
|
2017-02-27 05:46:15 +00:00
|
|
|
}
|
|
|
|
|
2020-07-27 00:55:00 +00:00
|
|
|
// Ok, we want to transfer to the child.
|
|
|
|
// Note it is important to be very careful about calling tcsetpgrp()!
|
|
|
|
// fish ignores SIGTTOU which means that it has the power to reassign the tty even if it doesn't
|
|
|
|
// own it. This means that other processes may get SIGTTOU and become zombies.
|
|
|
|
// Check who own the tty now. Thre's five cases of interest:
|
|
|
|
// 1. The process's pgrp is the same as fish. In that case there is nothing to do.
|
|
|
|
// 2. There is no tty at all (tcgetpgrp() returns -1). For example running from a pure script.
|
|
|
|
// Of course do not transfer it in that case.
|
|
|
|
// 3. The tty is owned by the process. This comes about often, as the process will call
|
|
|
|
// tcsetpgrp() on itself between fork ane exec. This is the essential race inherent in
|
|
|
|
// tcsetpgrp(). In this case we want to reclaim the tty, but do not need to transfer it
|
|
|
|
// ourselves since the child won the race.
|
|
|
|
// 4. The tty is owned by a different process. This may come about if fish is running in the
|
|
|
|
// background with job control enabled. Do not transfer it.
|
|
|
|
// 5. The tty is owned by fish. In that case we want to transfer the pgid.
|
|
|
|
pid_t fish_pgrp = getpgrp();
|
|
|
|
if (fish_pgrp == pgid) {
|
|
|
|
// Case 1.
|
|
|
|
return notneeded;
|
|
|
|
}
|
|
|
|
pid_t current_owner = tcgetpgrp(STDIN_FILENO);
|
|
|
|
if (current_owner < 0) {
|
|
|
|
// Case 2.
|
|
|
|
return notneeded;
|
|
|
|
} else if (current_owner == pgid) {
|
|
|
|
// Case 3.
|
|
|
|
return success;
|
|
|
|
} else if (current_owner != pgid && current_owner != fish_pgrp) {
|
|
|
|
// Case 4.
|
|
|
|
return notneeded;
|
|
|
|
}
|
|
|
|
// Case 5 - we do want to transfer it.
|
|
|
|
|
2020-06-26 03:46:09 +00:00
|
|
|
// The tcsetpgrp(2) man page says that EPERM is thrown if "pgrp has a supported value, but
|
|
|
|
// is not the process group ID of a process in the same session as the calling process."
|
|
|
|
// Since we _guarantee_ that this isn't the case (the child calls setpgid before it calls
|
|
|
|
// SIGSTOP, and the child was created in the same session as us), it seems that EPERM is
|
|
|
|
// being thrown because of an caching issue - the call to tcsetpgrp isn't seeing the
|
|
|
|
// newly-created process group just yet. On this developer's test machine (WSL running Linux
|
|
|
|
// 4.4.0), EPERM does indeed disappear on retry. The important thing is that we can
|
|
|
|
// guarantee the process isn't going to exit while we wait (which would cause us to possibly
|
|
|
|
// block indefinitely).
|
|
|
|
while (tcsetpgrp(STDIN_FILENO, pgid) != 0) {
|
|
|
|
FLOGF(proc_termowner, L"tcsetpgrp failed: %d", errno);
|
|
|
|
|
|
|
|
// Before anything else, make sure that it's even necessary to call tcsetpgrp.
|
|
|
|
// Since it usually _is_ necessary, we only check in case it fails so as to avoid the
|
|
|
|
// unnecessary syscall and associated context switch, which profiling has shown to have
|
|
|
|
// a significant cost when running process groups in quick succession.
|
|
|
|
int getpgrp_res = tcgetpgrp(STDIN_FILENO);
|
|
|
|
if (getpgrp_res < 0) {
|
|
|
|
switch (errno) {
|
|
|
|
case ENOTTY:
|
|
|
|
// stdin is not a tty. This may come about if job control is enabled but we are
|
|
|
|
// not a tty - see #6573.
|
|
|
|
return notneeded;
|
|
|
|
case EBADF:
|
|
|
|
// stdin has been closed. Workaround a glibc bug - see #3644.
|
|
|
|
redirect_tty_output();
|
|
|
|
return notneeded;
|
|
|
|
default:
|
|
|
|
wperror(L"tcgetpgrp");
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (getpgrp_res == pgid) {
|
|
|
|
FLOGF(proc_termowner, L"Process group %d already has control of terminal", pgid);
|
2020-04-18 23:26:54 +00:00
|
|
|
return notneeded;
|
|
|
|
}
|
2020-06-26 03:46:09 +00:00
|
|
|
|
|
|
|
bool pgroup_terminated = false;
|
|
|
|
if (errno == EINVAL) {
|
|
|
|
// OS X returns EINVAL if the process group no longer lives. Probably other OSes,
|
|
|
|
// too. Unlike EPERM below, EINVAL can only happen if the process group has
|
|
|
|
// terminated.
|
|
|
|
pgroup_terminated = true;
|
|
|
|
} else if (errno == EPERM) {
|
|
|
|
// Retry so long as this isn't because the process group is dead.
|
|
|
|
int wait_result = waitpid(-1 * pgid, &wait_result, WNOHANG);
|
|
|
|
if (wait_result == -1) {
|
|
|
|
// Note that -1 is technically an "error" for waitpid in the sense that an
|
|
|
|
// invalid argument was specified because no such process group exists any
|
|
|
|
// longer. This is the observed behavior on Linux 4.4.0. a "success" result
|
|
|
|
// would mean processes from the group still exist but is still running in some
|
|
|
|
// state or the other.
|
2017-07-29 23:18:01 +00:00
|
|
|
pgroup_terminated = true;
|
2017-08-06 23:05:51 +00:00
|
|
|
} else {
|
2020-06-26 03:46:09 +00:00
|
|
|
// Debug the original tcsetpgrp error (not the waitpid errno) to the log, and
|
|
|
|
// then retry until not EPERM or the process group has exited.
|
|
|
|
FLOGF(proc_termowner, L"terminal_give_to_job(): EPERM.\n", pgid);
|
|
|
|
continue;
|
2017-07-29 23:18:01 +00:00
|
|
|
}
|
2020-06-26 03:46:09 +00:00
|
|
|
} else if (errno == ENOTTY) {
|
|
|
|
// stdin is not a TTY. In general we expect this to be caught via the tcgetpgrp
|
|
|
|
// call's EBADF handler above.
|
|
|
|
return notneeded;
|
|
|
|
} else {
|
|
|
|
FLOGF(warning, _(L"Could not send job %d ('%ls') with pgid %d to foreground"),
|
2020-07-17 21:01:03 +00:00
|
|
|
jg->get_id(), jg->get_command().c_str(), pgid);
|
2020-06-26 03:46:09 +00:00
|
|
|
wperror(L"tcsetpgrp");
|
|
|
|
return error;
|
|
|
|
}
|
2018-10-02 18:24:05 +00:00
|
|
|
|
2020-06-26 03:46:09 +00:00
|
|
|
if (pgroup_terminated) {
|
|
|
|
// All processes in the process group has exited.
|
|
|
|
// Since we delay reaping any processes in a process group until all members of that
|
|
|
|
// job/group have been started, the only way this can happen is if the very last
|
|
|
|
// process in the group terminated and didn't need to access the terminal, otherwise
|
|
|
|
// it would have hung waiting for terminal IO (SIGTTIN). We can safely ignore this.
|
|
|
|
FLOGF(proc_termowner, L"tcsetpgrp called but process group %d has terminated.\n", pgid);
|
|
|
|
return notneeded;
|
2017-07-29 17:03:37 +00:00
|
|
|
}
|
2020-06-26 03:46:09 +00:00
|
|
|
|
|
|
|
break;
|
2012-11-19 00:30:30 +00:00
|
|
|
}
|
|
|
|
|
2019-06-29 22:58:36 +00:00
|
|
|
return success;
|
2006-11-11 10:48:40 +00:00
|
|
|
}
|
|
|
|
|
2020-07-17 21:01:03 +00:00
|
|
|
/// Returns control of the terminal to the shell, and saves the terminal attribute state to the job
|
|
|
|
/// group, so that we can restore the terminal ownership to the job at a later time.
|
2020-11-20 03:15:51 +00:00
|
|
|
static bool terminal_return_from_job_group(job_group_t *jg) {
|
2017-02-27 05:46:15 +00:00
|
|
|
errno = 0;
|
2020-07-17 21:01:03 +00:00
|
|
|
auto pgid = jg->get_pgid();
|
2020-05-29 21:51:48 +00:00
|
|
|
if (!pgid.has_value()) {
|
2020-01-19 13:32:41 +00:00
|
|
|
FLOG(proc_pgroup, "terminal_return_from_job() returning early due to no process group");
|
2017-02-27 05:46:15 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2020-05-29 21:51:48 +00:00
|
|
|
FLOG(proc_pgroup, "fish reclaiming terminal after job pgid", *pgid);
|
2016-12-15 03:21:36 +00:00
|
|
|
if (tcsetpgrp(STDIN_FILENO, getpgrp()) == -1) {
|
|
|
|
if (errno == ENOTTY) redirect_tty_output();
|
2020-01-19 12:38:47 +00:00
|
|
|
FLOGF(warning, _(L"Could not return shell to foreground"));
|
2012-11-19 00:30:30 +00:00
|
|
|
wperror(L"tcsetpgrp");
|
2017-02-27 05:46:15 +00:00
|
|
|
return false;
|
2012-11-19 00:30:30 +00:00
|
|
|
}
|
|
|
|
|
2016-12-15 03:21:36 +00:00
|
|
|
// Save jobs terminal modes.
|
2020-07-17 21:01:03 +00:00
|
|
|
struct termios tmodes {};
|
|
|
|
if (tcgetattr(STDIN_FILENO, &tmodes)) {
|
2020-05-21 08:06:51 +00:00
|
|
|
// If it's not a tty, it's not a tty, and there are no attributes to save (or restore)
|
|
|
|
if (errno == ENOTTY) return false;
|
2020-01-19 12:38:47 +00:00
|
|
|
FLOGF(warning, _(L"Could not return shell to foreground"));
|
2012-11-19 00:30:30 +00:00
|
|
|
wperror(L"tcgetattr");
|
2017-02-27 05:46:15 +00:00
|
|
|
return false;
|
2012-11-19 00:30:30 +00:00
|
|
|
}
|
2020-07-17 21:01:03 +00:00
|
|
|
jg->tmodes = tmodes;
|
2017-02-27 05:46:15 +00:00
|
|
|
return true;
|
2006-11-11 10:48:40 +00:00
|
|
|
}
|
|
|
|
|
2020-07-19 23:41:58 +00:00
|
|
|
bool job_t::is_foreground() const { return group->is_foreground(); }
|
|
|
|
|
|
|
|
maybe_t<pid_t> job_t::get_pgid() const { return group->get_pgid(); }
|
|
|
|
|
2021-05-18 19:35:37 +00:00
|
|
|
maybe_t<pid_t> job_t::get_last_pid() const {
|
|
|
|
for (auto iter = processes.rbegin(); iter != processes.rend(); ++iter) {
|
|
|
|
const process_t *proc = iter->get();
|
|
|
|
if (proc->pid > 0) return proc->pid;
|
|
|
|
}
|
|
|
|
return none();
|
|
|
|
}
|
|
|
|
|
2020-07-19 23:41:58 +00:00
|
|
|
job_id_t job_t::job_id() const { return group->get_id(); }
|
|
|
|
|
2020-07-27 22:51:37 +00:00
|
|
|
void job_t::continue_job(parser_t &parser, bool in_foreground) {
|
2016-05-03 04:41:17 +00:00
|
|
|
// Put job first in the job list.
|
2019-05-05 05:12:31 +00:00
|
|
|
parser.job_promote(this);
|
2019-10-15 21:37:10 +00:00
|
|
|
mut_flags().notified = false;
|
2012-11-19 00:30:30 +00:00
|
|
|
|
2020-05-29 21:51:48 +00:00
|
|
|
int pgid = -2;
|
|
|
|
if (auto tmp = get_pgid()) pgid = *tmp;
|
|
|
|
|
2020-07-26 23:51:11 +00:00
|
|
|
// We must send_sigcont if the job is stopped.
|
|
|
|
bool send_sigcont = this->is_stopped();
|
|
|
|
|
2019-05-18 20:47:27 +00:00
|
|
|
FLOGF(proc_job_run, L"%ls job %d, gid %d (%ls), %ls, %ls",
|
2020-02-08 23:12:58 +00:00
|
|
|
send_sigcont ? L"Continue" : L"Start", job_id(), pgid, command_wcstr(),
|
2019-05-18 20:47:27 +00:00
|
|
|
is_completed() ? L"COMPLETED" : L"UNCOMPLETED",
|
2019-05-27 21:52:48 +00:00
|
|
|
parser.libdata().is_interactive ? L"INTERACTIVE" : L"NON-INTERACTIVE");
|
2016-05-03 04:41:17 +00:00
|
|
|
|
2018-10-02 22:10:14 +00:00
|
|
|
// Make sure we retake control of the terminal before leaving this function.
|
|
|
|
bool term_transferred = false;
|
2020-07-17 21:01:03 +00:00
|
|
|
cleanup_t take_term_back([&] {
|
2020-07-27 00:55:00 +00:00
|
|
|
if (term_transferred) {
|
2020-11-20 03:15:51 +00:00
|
|
|
// Issues of interest include #121 and #2114.
|
|
|
|
terminal_return_from_job_group(this->group.get());
|
2018-10-29 00:09:57 +00:00
|
|
|
}
|
|
|
|
});
|
2018-10-02 22:10:14 +00:00
|
|
|
|
2018-10-02 17:30:23 +00:00
|
|
|
if (!is_completed()) {
|
2020-07-17 21:01:03 +00:00
|
|
|
int transfer = terminal_maybe_give_to_job_group(this->group.get(), send_sigcont);
|
2019-06-29 22:58:36 +00:00
|
|
|
if (transfer < 0) {
|
|
|
|
// terminal_maybe_give_to_job prints an error.
|
|
|
|
return;
|
2012-11-19 00:30:30 +00:00
|
|
|
}
|
2019-06-29 22:58:36 +00:00
|
|
|
term_transferred = (transfer > 0);
|
2012-11-19 00:30:30 +00:00
|
|
|
|
2018-10-02 22:10:14 +00:00
|
|
|
// If both requested and necessary, send the job a continue signal.
|
2018-10-02 20:10:42 +00:00
|
|
|
if (send_sigcont) {
|
2018-10-02 22:10:14 +00:00
|
|
|
// This code used to check for JOB_CONTROL to decide between using killpg to signal all
|
|
|
|
// processes in the group or iterating over each process in the group and sending the
|
|
|
|
// signal individually. job_t::signal() does the same, but uses the shell's own pgroup
|
|
|
|
// to make that distinction.
|
2018-10-02 20:10:42 +00:00
|
|
|
if (!signal(SIGCONT)) {
|
2020-01-19 13:32:41 +00:00
|
|
|
FLOGF(proc_pgroup, "Failed to send SIGCONT to any processes in pgroup %d!", pgid);
|
2018-10-02 22:10:14 +00:00
|
|
|
// This returns without bubbling up the error. Presumably that is OK.
|
2018-10-02 20:10:42 +00:00
|
|
|
return;
|
2012-11-19 00:30:30 +00:00
|
|
|
}
|
2018-10-02 22:10:14 +00:00
|
|
|
|
|
|
|
// reset the status of each process instance
|
|
|
|
for (auto &p : processes) {
|
|
|
|
p->stopped = false;
|
|
|
|
}
|
2012-11-19 00:30:30 +00:00
|
|
|
}
|
|
|
|
|
2020-07-27 22:51:37 +00:00
|
|
|
if (in_foreground) {
|
2019-02-01 09:58:06 +00:00
|
|
|
// Wait for the status of our own job to change.
|
2020-08-29 22:14:33 +00:00
|
|
|
while (!check_cancel_from_fish_signal() && !is_stopped() && !is_completed()) {
|
2019-05-05 05:12:31 +00:00
|
|
|
process_mark_finished_children(parser, true);
|
2012-11-19 00:30:30 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-07-27 22:51:37 +00:00
|
|
|
if (in_foreground && is_completed()) {
|
2019-02-13 23:17:18 +00:00
|
|
|
// Set $status only if we are in the foreground and the last process in the job has
|
2020-07-27 22:36:43 +00:00
|
|
|
// finished.
|
2020-07-27 22:51:37 +00:00
|
|
|
const auto &p = processes.back();
|
2019-02-25 18:05:42 +00:00
|
|
|
if (p->status.normal_exited() || p->status.signal_exited()) {
|
2020-07-18 17:25:43 +00:00
|
|
|
auto statuses = get_statuses();
|
|
|
|
if (statuses) {
|
|
|
|
parser.set_last_statuses(statuses.value());
|
|
|
|
parser.libdata().status_count++;
|
|
|
|
}
|
2012-11-19 00:30:30 +00:00
|
|
|
}
|
|
|
|
}
|
2005-09-20 13:26:39 +00:00
|
|
|
}
|
|
|
|
|
2019-04-30 03:58:58 +00:00
|
|
|
void proc_wait_any(parser_t &parser) {
|
2019-05-05 05:12:31 +00:00
|
|
|
process_mark_finished_children(parser, true /* block_ok */);
|
2019-05-27 21:52:48 +00:00
|
|
|
process_clean_after_marking(parser, parser.libdata().is_interactive);
|
2017-10-22 07:10:23 +00:00
|
|
|
}
|
2018-10-20 18:58:51 +00:00
|
|
|
|
2020-02-20 03:35:04 +00:00
|
|
|
void hup_jobs(const job_list_t &jobs) {
|
|
|
|
pid_t fish_pgrp = getpgrp();
|
|
|
|
for (const auto &j : jobs) {
|
2020-05-29 21:51:48 +00:00
|
|
|
auto pgid = j->get_pgid();
|
|
|
|
if (pgid && *pgid != fish_pgrp && !j->is_completed()) {
|
2018-10-20 18:58:51 +00:00
|
|
|
if (j->is_stopped()) {
|
|
|
|
j->signal(SIGCONT);
|
|
|
|
}
|
|
|
|
j->signal(SIGHUP);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2018-09-09 08:36:21 +00:00
|
|
|
|
|
|
|
static std::atomic<bool> s_is_within_fish_initialization{false};
|
|
|
|
|
|
|
|
void set_is_within_fish_initialization(bool flag) { s_is_within_fish_initialization.store(flag); }
|
|
|
|
|
|
|
|
bool is_within_fish_initialization() { return s_is_within_fish_initialization.load(); }
|