2016-05-03 04:41:17 +00:00
|
|
|
// Utilities for keeping track of jobs, processes and subshells, as well as signal handling
|
|
|
|
// functions for tracking children. These functions do not themselves launch new processes, the exec
|
|
|
|
// library will call proc to create representations of the running jobs as needed.
|
|
|
|
//
|
|
|
|
// Some of the code in this file is based on code from the Glibc manual.
|
2016-04-21 06:00:54 +00:00
|
|
|
// IWYU pragma: no_include <__bit_reference>
|
2005-09-20 13:26:39 +00:00
|
|
|
#include "config.h"
|
|
|
|
|
2016-05-03 04:41:17 +00:00
|
|
|
#include <errno.h>
|
|
|
|
#include <signal.h>
|
2005-09-20 13:26:39 +00:00
|
|
|
#include <stdio.h>
|
2016-05-03 04:41:17 +00:00
|
|
|
#include <unistd.h>
|
2015-07-25 15:14:25 +00:00
|
|
|
#include <wctype.h>
|
2019-10-13 22:50:48 +00:00
|
|
|
|
2019-05-05 10:09:25 +00:00
|
|
|
#include <atomic>
|
|
|
|
#include <cwchar>
|
2017-02-14 04:37:27 +00:00
|
|
|
|
2006-01-19 12:22:07 +00:00
|
|
|
#if HAVE_TERM_H
|
2018-02-04 08:59:37 +00:00
|
|
|
#include <curses.h>
|
2005-09-20 13:26:39 +00:00
|
|
|
#include <term.h>
|
2006-01-19 12:22:07 +00:00
|
|
|
#elif HAVE_NCURSES_TERM_H
|
|
|
|
#include <ncurses/term.h>
|
|
|
|
#endif
|
2018-02-04 08:59:37 +00:00
|
|
|
#include <termios.h>
|
2006-07-30 20:26:59 +00:00
|
|
|
#ifdef HAVE_SIGINFO_H
|
|
|
|
#include <siginfo.h>
|
|
|
|
#endif
|
2006-08-09 22:26:05 +00:00
|
|
|
#ifdef HAVE_SYS_SELECT_H
|
|
|
|
#include <sys/select.h>
|
|
|
|
#endif
|
2016-04-21 06:00:54 +00:00
|
|
|
#include <sys/time.h> // IWYU pragma: keep
|
2016-05-03 04:41:17 +00:00
|
|
|
#include <sys/types.h>
|
2017-02-14 04:37:27 +00:00
|
|
|
|
2016-04-21 06:00:54 +00:00
|
|
|
#include <algorithm> // IWYU pragma: keep
|
2017-02-14 04:37:27 +00:00
|
|
|
#include <memory>
|
2018-02-19 02:39:03 +00:00
|
|
|
#include <utility>
|
2017-02-14 04:37:27 +00:00
|
|
|
#include <vector>
|
2006-08-09 22:26:05 +00:00
|
|
|
|
2005-09-20 13:26:39 +00:00
|
|
|
#include "common.h"
|
2005-10-11 19:23:43 +00:00
|
|
|
#include "event.h"
|
2016-05-03 04:41:17 +00:00
|
|
|
#include "fallback.h" // IWYU pragma: keep
|
2019-05-18 20:47:27 +00:00
|
|
|
#include "flog.h"
|
2019-04-30 06:08:01 +00:00
|
|
|
#include "global_safety.h"
|
2016-04-21 06:00:54 +00:00
|
|
|
#include "io.h"
|
2016-05-03 04:41:17 +00:00
|
|
|
#include "output.h"
|
2016-04-21 06:00:54 +00:00
|
|
|
#include "parse_tree.h"
|
2016-05-03 04:41:17 +00:00
|
|
|
#include "parser.h"
|
|
|
|
#include "proc.h"
|
|
|
|
#include "reader.h"
|
|
|
|
#include "sanity.h"
|
|
|
|
#include "signal.h"
|
|
|
|
#include "wutil.h" // IWYU pragma: keep
|
2005-09-20 13:26:39 +00:00
|
|
|
|
2018-09-18 13:56:36 +00:00
|
|
|
/// The signals that signify crashes to us.
|
2018-09-29 04:58:44 +00:00
|
|
|
static const int crashsignals[] = {SIGABRT, SIGBUS, SIGFPE, SIGILL, SIGSEGV, SIGSYS};
|
2018-09-18 13:56:36 +00:00
|
|
|
|
2019-12-12 12:50:33 +00:00
|
|
|
static relaxed_atomic_t<session_interactivity_t> s_is_interactive_session{
|
|
|
|
session_interactivity_t::not_interactive};
|
|
|
|
session_interactivity_t session_interactivity() { return s_is_interactive_session; }
|
|
|
|
void set_interactive_session(session_interactivity_t flag) { s_is_interactive_session = flag; }
|
2019-05-12 22:48:00 +00:00
|
|
|
|
|
|
|
static relaxed_atomic_bool_t s_is_login{false};
|
|
|
|
bool get_login() { return s_is_login; }
|
|
|
|
void mark_login() { s_is_login = true; }
|
|
|
|
|
|
|
|
static relaxed_atomic_bool_t s_no_exec{false};
|
2019-05-12 22:04:18 +00:00
|
|
|
bool no_exec() { return s_no_exec; }
|
2019-05-12 22:48:00 +00:00
|
|
|
void mark_no_exec() { s_no_exec = true; }
|
2019-05-12 21:59:30 +00:00
|
|
|
|
|
|
|
bool have_proc_stat() {
|
|
|
|
// Check for /proc/self/stat to see if we are running with Linux-style procfs.
|
|
|
|
static const bool s_result = (access("/proc/self/stat", R_OK) == 0);
|
|
|
|
return s_result;
|
|
|
|
}
|
2006-03-18 01:04:59 +00:00
|
|
|
|
2019-04-30 06:08:01 +00:00
|
|
|
static relaxed_atomic_t<job_control_t> job_control_mode{job_control_t::interactive};
|
|
|
|
|
|
|
|
job_control_t get_job_control_mode() { return job_control_mode; }
|
|
|
|
|
|
|
|
void set_job_control_mode(job_control_t mode) { job_control_mode = mode; }
|
|
|
|
|
2019-05-27 21:52:48 +00:00
|
|
|
void proc_init() { signal_set_handlers_once(false); }
|
2005-10-14 11:40:33 +00:00
|
|
|
|
2016-05-03 04:41:17 +00:00
|
|
|
// Basic thread safe job IDs. The vector consumed_job_ids has a true value wherever the job ID
|
|
|
|
// corresponding to that slot is in use. The job ID corresponding to slot 0 is 1.
|
2017-01-29 21:03:54 +00:00
|
|
|
static owning_lock<std::vector<bool>> locked_consumed_job_ids;
|
2012-02-28 02:43:24 +00:00
|
|
|
|
2018-02-19 02:33:04 +00:00
|
|
|
job_id_t acquire_job_id() {
|
2018-09-01 20:11:42 +00:00
|
|
|
auto consumed_job_ids = locked_consumed_job_ids.acquire();
|
2012-11-19 00:30:30 +00:00
|
|
|
|
2016-05-03 04:41:17 +00:00
|
|
|
// Find the index of the first 0 slot.
|
2018-09-01 20:11:42 +00:00
|
|
|
auto slot = std::find(consumed_job_ids->begin(), consumed_job_ids->end(), false);
|
|
|
|
if (slot != consumed_job_ids->end()) {
|
2016-05-03 04:41:17 +00:00
|
|
|
// We found a slot. Note that slot 0 corresponds to job ID 1.
|
2012-02-28 02:43:24 +00:00
|
|
|
*slot = true;
|
2019-11-19 01:08:16 +00:00
|
|
|
return static_cast<job_id_t>(slot - consumed_job_ids->begin() + 1);
|
2012-02-28 02:43:24 +00:00
|
|
|
}
|
2016-05-04 22:19:47 +00:00
|
|
|
|
|
|
|
// We did not find a slot; create a new slot. The size of the vector is now the job ID
|
|
|
|
// (since it is one larger than the slot).
|
2018-09-01 20:11:42 +00:00
|
|
|
consumed_job_ids->push_back(true);
|
2019-11-19 01:08:16 +00:00
|
|
|
return static_cast<job_id_t>(consumed_job_ids->size());
|
2012-02-28 02:43:24 +00:00
|
|
|
}
|
2006-03-10 13:38:09 +00:00
|
|
|
|
2016-05-03 04:41:17 +00:00
|
|
|
void release_job_id(job_id_t jid) {
|
2012-02-28 02:43:24 +00:00
|
|
|
assert(jid > 0);
|
2018-09-01 20:11:42 +00:00
|
|
|
auto consumed_job_ids = locked_consumed_job_ids.acquire();
|
2019-11-19 01:08:16 +00:00
|
|
|
size_t slot = static_cast<size_t>(jid - 1), count = consumed_job_ids->size();
|
2012-11-19 00:30:30 +00:00
|
|
|
|
2016-05-03 04:41:17 +00:00
|
|
|
// Make sure this slot is within our vector and is currently set to consumed.
|
2012-02-28 02:43:24 +00:00
|
|
|
assert(slot < count);
|
2018-09-01 20:11:42 +00:00
|
|
|
assert(consumed_job_ids->at(slot) == true);
|
2012-11-19 00:30:30 +00:00
|
|
|
|
2016-05-03 04:41:17 +00:00
|
|
|
// Clear it and then resize the vector to eliminate unused trailing job IDs.
|
2018-09-01 20:11:42 +00:00
|
|
|
consumed_job_ids->at(slot) = false;
|
2016-05-03 04:41:17 +00:00
|
|
|
while (count--) {
|
2018-09-01 20:11:42 +00:00
|
|
|
if (consumed_job_ids->at(count)) break;
|
2012-02-28 02:43:24 +00:00
|
|
|
}
|
2018-09-01 20:11:42 +00:00
|
|
|
consumed_job_ids->resize(count + 1);
|
2005-09-20 13:26:39 +00:00
|
|
|
}
|
|
|
|
|
2018-10-02 17:30:23 +00:00
|
|
|
job_t *job_t::from_job_id(job_id_t id) {
|
2012-02-28 02:43:24 +00:00
|
|
|
ASSERT_IS_MAIN_THREAD();
|
|
|
|
return parser_t::principal_parser().job_get(id);
|
2005-09-20 13:26:39 +00:00
|
|
|
}
|
|
|
|
|
2018-10-02 17:30:23 +00:00
|
|
|
job_t *job_t::from_pid(pid_t pid) {
|
2012-02-28 02:43:24 +00:00
|
|
|
ASSERT_IS_MAIN_THREAD();
|
|
|
|
return parser_t::principal_parser().job_get_from_pid(pid);
|
2005-09-20 13:26:39 +00:00
|
|
|
}
|
|
|
|
|
2016-05-03 04:41:17 +00:00
|
|
|
/// Return true if all processes in the job have stopped or completed.
|
2018-10-02 17:30:23 +00:00
|
|
|
bool job_t::is_stopped() const {
|
|
|
|
for (const process_ptr_t &p : processes) {
|
2016-05-03 04:41:17 +00:00
|
|
|
if (!p->completed && !p->stopped) {
|
2018-10-02 17:30:23 +00:00
|
|
|
return false;
|
2012-11-19 00:30:30 +00:00
|
|
|
}
|
|
|
|
}
|
2018-10-02 17:30:23 +00:00
|
|
|
return true;
|
2005-09-20 13:26:39 +00:00
|
|
|
}
|
|
|
|
|
2016-05-03 04:41:17 +00:00
|
|
|
/// Return true if the last processes in the job has completed.
|
2018-10-02 17:30:23 +00:00
|
|
|
bool job_t::is_completed() const {
|
|
|
|
assert(!processes.empty());
|
|
|
|
for (const process_ptr_t &p : processes) {
|
2016-05-03 04:41:17 +00:00
|
|
|
if (!p->completed) {
|
2018-10-02 17:30:23 +00:00
|
|
|
return false;
|
2013-06-16 09:53:14 +00:00
|
|
|
}
|
|
|
|
}
|
2018-10-02 17:30:23 +00:00
|
|
|
return true;
|
2005-09-20 13:26:39 +00:00
|
|
|
}
|
|
|
|
|
2019-07-28 21:05:51 +00:00
|
|
|
bool job_t::should_report_process_exits() const {
|
|
|
|
// This implements the behavior of process exit events only being sent for jobs containing an
|
|
|
|
// external process. Bizarrely the process exit event is for the pgroup leader which may be fish
|
|
|
|
// itself.
|
|
|
|
// TODO: rationalize this.
|
|
|
|
// If we never got a pgid then we never launched the external process, so don't report it.
|
|
|
|
if (this->pgid == INVALID_PID) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Return whether we have an external process.
|
|
|
|
for (const auto &p : this->processes) {
|
|
|
|
if (p->type == process_type_t::external) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2019-12-12 00:34:20 +00:00
|
|
|
bool job_t::job_chain_is_fully_constructed() const { return *root_constructed; }
|
2018-11-04 09:11:12 +00:00
|
|
|
|
2018-10-02 20:10:42 +00:00
|
|
|
bool job_t::signal(int signal) {
|
|
|
|
// Presumably we are distinguishing between the two cases below because we do
|
|
|
|
// not want to send ourselves the signal in question in case the job shares
|
|
|
|
// a pgid with the shell.
|
|
|
|
|
|
|
|
if (pgid != getpgrp()) {
|
|
|
|
if (killpg(pgid, signal) == -1) {
|
2018-10-20 18:58:51 +00:00
|
|
|
char buffer[512];
|
|
|
|
sprintf(buffer, "killpg(%d, %s)", pgid, strsignal(signal));
|
|
|
|
wperror(str2wcstring(buffer).c_str());
|
2018-10-02 20:10:42 +00:00
|
|
|
return false;
|
|
|
|
}
|
2016-05-03 04:41:17 +00:00
|
|
|
} else {
|
2018-10-02 20:10:42 +00:00
|
|
|
for (const auto &p : processes) {
|
|
|
|
if (!p->completed && p->pid && kill(p->pid, signal) == -1) {
|
|
|
|
return false;
|
2012-11-19 00:30:30 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-10-02 20:10:42 +00:00
|
|
|
return true;
|
2006-11-20 13:12:24 +00:00
|
|
|
}
|
|
|
|
|
2019-02-25 09:21:32 +00:00
|
|
|
statuses_t job_t::get_statuses() const {
|
|
|
|
statuses_t st{};
|
|
|
|
st.pipestatus.reserve(processes.size());
|
|
|
|
for (const auto &p : processes) {
|
|
|
|
st.pipestatus.push_back(p->status.status_value());
|
|
|
|
}
|
|
|
|
int laststatus = st.pipestatus.back();
|
2019-10-15 21:37:10 +00:00
|
|
|
st.status = flags().negate ? !laststatus : laststatus;
|
2019-02-25 09:21:32 +00:00
|
|
|
return st;
|
|
|
|
}
|
|
|
|
|
2019-02-25 18:05:42 +00:00
|
|
|
void internal_proc_t::mark_exited(proc_status_t status) {
|
2019-02-13 23:17:07 +00:00
|
|
|
assert(!exited() && "Process is already exited");
|
2019-02-25 18:05:42 +00:00
|
|
|
status_.store(status, std::memory_order_relaxed);
|
|
|
|
exited_.store(true, std::memory_order_release);
|
2019-02-13 23:17:07 +00:00
|
|
|
topic_monitor_t::principal().post(topic_t::internal_exit);
|
2019-05-30 11:04:40 +00:00
|
|
|
FLOG(proc_internal_proc, L"Internal proc", internal_proc_id_, L"exited with status",
|
2019-05-28 16:38:45 +00:00
|
|
|
status.status_value());
|
2019-02-13 23:17:07 +00:00
|
|
|
}
|
|
|
|
|
2019-05-28 16:38:45 +00:00
|
|
|
static int64_t next_proc_id() {
|
|
|
|
static std::atomic<uint64_t> s_next{};
|
|
|
|
return ++s_next;
|
|
|
|
}
|
|
|
|
|
|
|
|
internal_proc_t::internal_proc_t() : internal_proc_id_(next_proc_id()) {}
|
|
|
|
|
2018-11-04 07:58:44 +00:00
|
|
|
void job_mark_process_as_failed(const std::shared_ptr<job_t> &job, const process_t *failed_proc) {
|
2016-05-03 04:41:17 +00:00
|
|
|
// The given process failed to even lift off (e.g. posix_spawn failed) and so doesn't have a
|
2017-01-23 17:28:34 +00:00
|
|
|
// valid pid. Mark it and everything after it as dead.
|
|
|
|
bool found = false;
|
|
|
|
for (process_ptr_t &p : job->processes) {
|
|
|
|
found = found || (p.get() == failed_proc);
|
|
|
|
if (found) {
|
|
|
|
p->completed = true;
|
|
|
|
}
|
2013-01-30 10:22:38 +00:00
|
|
|
}
|
2012-10-29 08:45:51 +00:00
|
|
|
}
|
|
|
|
|
2019-03-03 19:45:05 +00:00
|
|
|
/// Set the status of \p proc to \p status.
|
|
|
|
static void handle_child_status(process_t *proc, proc_status_t status) {
|
|
|
|
proc->status = status;
|
|
|
|
if (status.stopped()) {
|
|
|
|
proc->stopped = true;
|
|
|
|
} else {
|
|
|
|
proc->completed = true;
|
|
|
|
}
|
2005-09-20 13:26:39 +00:00
|
|
|
|
2019-03-03 19:45:05 +00:00
|
|
|
// If the child was killed by SIGINT or SIGQUIT, then treat it as if we received that signal.
|
|
|
|
if (status.signal_exited()) {
|
|
|
|
int sig = status.signal_code();
|
|
|
|
if (sig == SIGINT || sig == SIGQUIT) {
|
2019-12-12 12:50:33 +00:00
|
|
|
if (session_interactivity() != session_interactivity_t::not_interactive) {
|
2019-03-03 19:45:05 +00:00
|
|
|
// In an interactive session, tell the principal parser to skip all blocks we're
|
|
|
|
// executing so control-C returns control to the user.
|
|
|
|
parser_t::skip_all_blocks();
|
|
|
|
} else {
|
|
|
|
// Deliver the SIGINT or SIGQUIT signal to ourself since we're not interactive.
|
|
|
|
struct sigaction act;
|
|
|
|
sigemptyset(&act.sa_mask);
|
|
|
|
act.sa_flags = 0;
|
|
|
|
act.sa_handler = SIG_DFL;
|
2019-11-19 02:34:50 +00:00
|
|
|
sigaction(sig, &act, nullptr);
|
2019-03-03 19:45:05 +00:00
|
|
|
kill(getpid(), sig);
|
2012-11-19 00:30:30 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2005-09-20 13:26:39 +00:00
|
|
|
}
|
|
|
|
|
2019-02-17 01:35:16 +00:00
|
|
|
process_t::process_t() = default;
|
|
|
|
|
|
|
|
void process_t::check_generations_before_launch() {
|
|
|
|
gens_ = topic_monitor_t::principal().current_generations();
|
|
|
|
}
|
2012-11-19 00:30:30 +00:00
|
|
|
|
2019-12-27 05:54:21 +00:00
|
|
|
job_t::job_t(job_id_t job_id, const properties_t &props, const job_lineage_t &lineage)
|
2019-12-12 00:34:20 +00:00
|
|
|
: properties(props),
|
Introduce the internal jobs for functions
This PR is aimed at improving how job ids are assigned. In particular,
previous to this commit, a job id would be consumed by functions (and
thus aliases). Since it's usual to use functions as command wrappers
this results in awkward job id assignments.
For example if the user is like me and just made the jump from vim -> neovim
then the user might create the following alias:
```
alias vim=nvim
```
Previous to this commit if the user ran `vim` after setting up this
alias, backgrounded (^Z) and ran `jobs` then the output might be:
```
Job Group State Command
2 60267 stopped nvim $argv
```
If the user subsequently opened another vim (nvim) session, backgrounded
and ran jobs then they might see what follows:
```
Job Group State Command
4 70542 stopped nvim $argv
2 60267 stopped nvim $argv
```
These job ids feel unnatural, especially when transitioning away from
e.g. bash where job ids are sequentially incremented (and aliases/functions
don't consume a job id).
See #6053 for more details.
As @ridiculousfish pointed out in
https://github.com/fish-shell/fish-shell/issues/6053#issuecomment-559899400,
we want to elide a job's job id if it corresponds to a single function in the
foreground. This translates to the following prerequisites:
- A job must correspond to a single process (i.e. the job continuation
must be empty)
- A job must be in the foreground (i.e. `&` wasn't appended)
- The job's single process must resolve to a function invocation
If all of these conditions are true then we should mark a job as
"internal" and somehow remove it from consideration when any
infrastructure tries to interact with jobs / job ids.
I saw two paths to implement these requirements:
- At the time of job creation calculate whether or not a job is
"internal" and use a separate list of job ids to track their ids.
Additionally introduce a new flag denoting that a job is internal so
that e.g. `jobs` doesn't list internal jobs
- I started implementing this route but quickly realized I was
computing the same information that would be computed later on (e.g.
"is this job a single process" and "is this jobs statement a
function"). Specifically I was computing data that populate_job_process
would end up computing later anyway. Additionally this added some
weird complexities to the job system (after the change there were two
job id lists AND an additional flag that had to be taken into
consideration)
- Once a function is about to be executed we release the current jobs
job id if the prerequisites are satisfied (which at this point have
been fully computed).
- I opted for this solution since it seems cleaner. In this
implementation "releasing a job id" is done by both calling
`release_job_id` and by marking the internal job_id member variable to
-1. The former operation allows subsequent child jobs to reuse that
same job id (so e.g. the situation described in Motivation doesn't
occur), and the latter ensures that no other job / job id
infrastructure will interact with these jobs because valid jobs have
positive job ids. The second operation causes job_id to become
non-const which leads to the list of code changes outside of `exec.c`
(i.e. a codemod from `job_t::job_id` -> `job_t::job_id()` and moving the
old member variable to a non-const private `job_t::job_id_`)
Note: Its very possible I missed something and setting the job id to -1
will break some other infrastructure, please let me know if so!
I tried to run `make/ninja lint`, but a bunch of non-relevant issues
appeared (e.g. `fatal error: 'config.h' file not found`). I did
successfully clang-format (`git clang-format -f`) and run tests, though.
This PR closes #6053.
2019-12-29 15:46:07 +00:00
|
|
|
job_id_(job_id),
|
2019-12-12 00:34:20 +00:00
|
|
|
root_constructed(lineage.root_constructed ? lineage.root_constructed : this->constructed) {}
|
2012-11-19 00:30:30 +00:00
|
|
|
|
Introduce the internal jobs for functions
This PR is aimed at improving how job ids are assigned. In particular,
previous to this commit, a job id would be consumed by functions (and
thus aliases). Since it's usual to use functions as command wrappers
this results in awkward job id assignments.
For example if the user is like me and just made the jump from vim -> neovim
then the user might create the following alias:
```
alias vim=nvim
```
Previous to this commit if the user ran `vim` after setting up this
alias, backgrounded (^Z) and ran `jobs` then the output might be:
```
Job Group State Command
2 60267 stopped nvim $argv
```
If the user subsequently opened another vim (nvim) session, backgrounded
and ran jobs then they might see what follows:
```
Job Group State Command
4 70542 stopped nvim $argv
2 60267 stopped nvim $argv
```
These job ids feel unnatural, especially when transitioning away from
e.g. bash where job ids are sequentially incremented (and aliases/functions
don't consume a job id).
See #6053 for more details.
As @ridiculousfish pointed out in
https://github.com/fish-shell/fish-shell/issues/6053#issuecomment-559899400,
we want to elide a job's job id if it corresponds to a single function in the
foreground. This translates to the following prerequisites:
- A job must correspond to a single process (i.e. the job continuation
must be empty)
- A job must be in the foreground (i.e. `&` wasn't appended)
- The job's single process must resolve to a function invocation
If all of these conditions are true then we should mark a job as
"internal" and somehow remove it from consideration when any
infrastructure tries to interact with jobs / job ids.
I saw two paths to implement these requirements:
- At the time of job creation calculate whether or not a job is
"internal" and use a separate list of job ids to track their ids.
Additionally introduce a new flag denoting that a job is internal so
that e.g. `jobs` doesn't list internal jobs
- I started implementing this route but quickly realized I was
computing the same information that would be computed later on (e.g.
"is this job a single process" and "is this jobs statement a
function"). Specifically I was computing data that populate_job_process
would end up computing later anyway. Additionally this added some
weird complexities to the job system (after the change there were two
job id lists AND an additional flag that had to be taken into
consideration)
- Once a function is about to be executed we release the current jobs
job id if the prerequisites are satisfied (which at this point have
been fully computed).
- I opted for this solution since it seems cleaner. In this
implementation "releasing a job id" is done by both calling
`release_job_id` and by marking the internal job_id member variable to
-1. The former operation allows subsequent child jobs to reuse that
same job id (so e.g. the situation described in Motivation doesn't
occur), and the latter ensures that no other job / job id
infrastructure will interact with these jobs because valid jobs have
positive job ids. The second operation causes job_id to become
non-const which leads to the list of code changes outside of `exec.c`
(i.e. a codemod from `job_t::job_id` -> `job_t::job_id()` and moving the
old member variable to a non-const private `job_t::job_id_`)
Note: Its very possible I missed something and setting the job id to -1
will break some other infrastructure, please let me know if so!
I tried to run `make/ninja lint`, but a bunch of non-relevant issues
appeared (e.g. `fatal error: 'config.h' file not found`). I did
successfully clang-format (`git clang-format -f`) and run tests, though.
This PR closes #6053.
2019-12-29 15:46:07 +00:00
|
|
|
job_t::~job_t() {
|
|
|
|
if (job_id_ != -1) release_job_id(job_id_);
|
|
|
|
}
|
2005-09-20 13:26:39 +00:00
|
|
|
|
2019-12-11 02:32:56 +00:00
|
|
|
void job_t::mark_constructed() {
|
|
|
|
assert(!is_constructed() && "Job was already constructed");
|
|
|
|
*constructed = true;
|
|
|
|
}
|
|
|
|
|
2018-11-18 21:18:18 +00:00
|
|
|
/// A list of pids/pgids that have been disowned. They are kept around until either they exit or
|
|
|
|
/// we exit. Poll these from time-to-time to prevent zombie processes from happening (#5342).
|
2019-05-23 00:10:33 +00:00
|
|
|
static owning_lock<std::vector<pid_t>> s_disowned_pids;
|
2018-11-18 21:18:18 +00:00
|
|
|
|
|
|
|
void add_disowned_pgid(pid_t pgid) {
|
2018-12-30 16:11:20 +00:00
|
|
|
// NEVER add our own (or an invalid) pgid as they are not unique to only
|
|
|
|
// one job, and may result in a deadlock if we attempt the wait.
|
|
|
|
if (pgid != getpgrp() && pgid > 0) {
|
|
|
|
// waitpid(2) is signalled to wait on a process group rather than a
|
|
|
|
// process id by using the negative of its value.
|
2019-05-23 00:10:33 +00:00
|
|
|
s_disowned_pids.acquire()->push_back(pgid * -1);
|
2018-12-30 15:04:57 +00:00
|
|
|
}
|
2018-11-18 21:18:18 +00:00
|
|
|
}
|
|
|
|
|
2019-05-23 00:10:33 +00:00
|
|
|
// Reap any pids in our disowned list that have exited. This is used to avoid zombies.
|
|
|
|
static void reap_disowned_pids() {
|
|
|
|
auto disowned_pids = s_disowned_pids.acquire();
|
|
|
|
auto try_reap1 = [](pid_t pid) {
|
|
|
|
int status;
|
|
|
|
return waitpid(pid, &status, WNOHANG) > 0;
|
|
|
|
};
|
|
|
|
disowned_pids->erase(std::remove_if(disowned_pids->begin(), disowned_pids->end(), try_reap1),
|
|
|
|
disowned_pids->end());
|
|
|
|
}
|
|
|
|
|
2019-02-17 01:39:14 +00:00
|
|
|
/// See if any reapable processes have exited, and mark them accordingly.
|
|
|
|
/// \param block_ok if no reapable processes have exited, block until one is (or until we receive a
|
|
|
|
/// signal).
|
2019-05-05 05:12:31 +00:00
|
|
|
static void process_mark_finished_children(parser_t &parser, bool block_ok) {
|
2014-10-25 23:51:25 +00:00
|
|
|
ASSERT_IS_MAIN_THREAD();
|
2016-05-03 04:41:17 +00:00
|
|
|
|
2019-02-17 01:39:14 +00:00
|
|
|
// Get the exit and signal generations of all reapable processes.
|
|
|
|
// The exit generation tells us if we have an exit; the signal generation allows for detecting
|
|
|
|
// SIGHUP and SIGINT.
|
2019-02-13 23:17:07 +00:00
|
|
|
// Get the gen count of all reapable processes.
|
|
|
|
topic_set_t reaptopics{};
|
2019-02-17 01:39:14 +00:00
|
|
|
generation_list_t gens{};
|
|
|
|
gens.fill(invalid_generation);
|
2019-05-05 05:12:31 +00:00
|
|
|
for (const auto &j : parser.jobs()) {
|
2019-02-17 01:39:14 +00:00
|
|
|
for (const auto &proc : j->processes) {
|
2019-02-13 23:17:07 +00:00
|
|
|
if (auto mtopic = j->reap_topic_for_process(proc.get())) {
|
|
|
|
topic_t topic = *mtopic;
|
|
|
|
reaptopics.set(topic);
|
|
|
|
gens[topic] = std::min(gens[topic], proc->gens_[topic]);
|
|
|
|
|
|
|
|
reaptopics.set(topic_t::sighupint);
|
2019-02-17 01:39:14 +00:00
|
|
|
gens[topic_t::sighupint] =
|
|
|
|
std::min(gens[topic_t::sighupint], proc->gens_[topic_t::sighupint]);
|
|
|
|
}
|
2019-01-02 06:12:07 +00:00
|
|
|
}
|
2019-02-17 01:39:14 +00:00
|
|
|
}
|
2019-01-02 06:12:07 +00:00
|
|
|
|
2019-02-13 23:17:07 +00:00
|
|
|
if (reaptopics.none()) {
|
2019-02-17 01:39:14 +00:00
|
|
|
// No reapable processes, nothing to wait for.
|
|
|
|
return;
|
|
|
|
}
|
2018-10-08 17:44:47 +00:00
|
|
|
|
2019-02-17 01:39:14 +00:00
|
|
|
// Now check for changes, optionally waiting.
|
2019-02-13 23:17:07 +00:00
|
|
|
auto changed_topics = topic_monitor_t::principal().check(&gens, reaptopics, block_ok);
|
2019-02-17 01:39:14 +00:00
|
|
|
if (changed_topics.none()) return;
|
|
|
|
|
|
|
|
// We got some changes. Since we last checked we received SIGCHLD, and or HUP/INT.
|
|
|
|
// Update the hup/int generations and reap any reapable processes.
|
2019-05-05 05:12:31 +00:00
|
|
|
for (const auto &j : parser.jobs()) {
|
2019-02-13 23:17:07 +00:00
|
|
|
for (const auto &proc : j->processes) {
|
|
|
|
if (auto mtopic = j->reap_topic_for_process(proc.get())) {
|
|
|
|
// Update the signal hup/int gen.
|
|
|
|
proc->gens_[topic_t::sighupint] = gens[topic_t::sighupint];
|
|
|
|
|
|
|
|
if (proc->gens_[*mtopic] < gens[*mtopic]) {
|
|
|
|
// Potentially reapable. Update its gen count and try reaping it.
|
|
|
|
proc->gens_[*mtopic] = gens[*mtopic];
|
|
|
|
if (proc->internal_proc_) {
|
|
|
|
// Try reaping an internal process.
|
|
|
|
if (proc->internal_proc_->exited()) {
|
|
|
|
proc->status = proc->internal_proc_->get_status();
|
|
|
|
proc->completed = true;
|
2019-06-13 21:29:13 +00:00
|
|
|
FLOGF(proc_reap_internal,
|
|
|
|
"Reaped internal process '%ls' (id %llu, status %d)",
|
|
|
|
proc->argv0(), proc->internal_proc_->get_id(),
|
|
|
|
proc->status.status_value());
|
2019-02-13 23:17:07 +00:00
|
|
|
}
|
|
|
|
} else if (proc->pid > 0) {
|
|
|
|
// Try reaping an external process.
|
|
|
|
int status = -1;
|
|
|
|
auto pid = waitpid(proc->pid, &status, WNOHANG | WUNTRACED);
|
|
|
|
if (pid > 0) {
|
|
|
|
assert(pid == proc->pid && "Unexpcted waitpid() return");
|
2019-03-03 19:45:05 +00:00
|
|
|
handle_child_status(proc.get(), proc_status_t::from_waitpid(status));
|
2019-06-13 21:29:13 +00:00
|
|
|
FLOGF(proc_reap_external,
|
|
|
|
"Reaped external process '%ls' (pid %d, status %d)",
|
|
|
|
proc->argv0(), pid, proc->status.status_value());
|
2019-02-13 23:17:07 +00:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
assert(0 && "Don't know how to reap this process");
|
2019-01-29 04:25:55 +00:00
|
|
|
}
|
2018-10-29 19:22:46 +00:00
|
|
|
}
|
2014-10-25 23:51:25 +00:00
|
|
|
}
|
2012-11-19 00:30:30 +00:00
|
|
|
}
|
|
|
|
}
|
2014-10-25 23:51:25 +00:00
|
|
|
|
2019-05-23 00:10:33 +00:00
|
|
|
// Remove any zombies.
|
|
|
|
reap_disowned_pids();
|
2005-09-20 13:26:39 +00:00
|
|
|
}
|
|
|
|
|
2016-05-03 04:41:17 +00:00
|
|
|
/// Given a command like "cat file", truncate it to a reasonable length.
|
|
|
|
static wcstring truncate_command(const wcstring &cmd) {
|
2014-11-03 18:56:16 +00:00
|
|
|
const size_t max_len = 32;
|
2016-05-03 04:41:17 +00:00
|
|
|
if (cmd.size() <= max_len) {
|
|
|
|
// No truncation necessary.
|
2014-11-03 18:56:16 +00:00
|
|
|
return cmd;
|
|
|
|
}
|
2016-05-03 04:41:17 +00:00
|
|
|
|
|
|
|
// Truncation required.
|
2019-04-28 22:00:36 +00:00
|
|
|
const wchar_t *ellipsis_str = get_ellipsis_str();
|
2019-03-12 21:06:01 +00:00
|
|
|
const size_t ellipsis_length = std::wcslen(ellipsis_str); // no need for wcwidth
|
2014-11-03 18:56:16 +00:00
|
|
|
size_t trunc_length = max_len - ellipsis_length;
|
2016-05-03 04:41:17 +00:00
|
|
|
// Eat trailing whitespace.
|
|
|
|
while (trunc_length > 0 && iswspace(cmd.at(trunc_length - 1))) {
|
2014-11-03 18:56:16 +00:00
|
|
|
trunc_length -= 1;
|
|
|
|
}
|
|
|
|
wcstring result = wcstring(cmd, 0, trunc_length);
|
2016-05-03 04:41:17 +00:00
|
|
|
// Append ellipsis.
|
2018-03-09 20:40:35 +00:00
|
|
|
result.append(ellipsis_str);
|
2014-11-03 18:56:16 +00:00
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2016-05-03 04:41:17 +00:00
|
|
|
/// Format information about job status for the user to look at.
|
2017-05-27 22:41:22 +00:00
|
|
|
typedef enum { JOB_STOPPED, JOB_ENDED } job_status_t;
|
2019-02-21 00:35:46 +00:00
|
|
|
static void print_job_status(const job_t *j, job_status_t status) {
|
2017-05-27 22:41:22 +00:00
|
|
|
const wchar_t *msg = L"Job %d, '%ls' has ended"; // this is the most common status msg
|
|
|
|
if (status == JOB_STOPPED) msg = L"Job %d, '%ls' has stopped";
|
2018-10-06 20:32:08 +00:00
|
|
|
outputter_t outp;
|
|
|
|
outp.writestr("\r");
|
Introduce the internal jobs for functions
This PR is aimed at improving how job ids are assigned. In particular,
previous to this commit, a job id would be consumed by functions (and
thus aliases). Since it's usual to use functions as command wrappers
this results in awkward job id assignments.
For example if the user is like me and just made the jump from vim -> neovim
then the user might create the following alias:
```
alias vim=nvim
```
Previous to this commit if the user ran `vim` after setting up this
alias, backgrounded (^Z) and ran `jobs` then the output might be:
```
Job Group State Command
2 60267 stopped nvim $argv
```
If the user subsequently opened another vim (nvim) session, backgrounded
and ran jobs then they might see what follows:
```
Job Group State Command
4 70542 stopped nvim $argv
2 60267 stopped nvim $argv
```
These job ids feel unnatural, especially when transitioning away from
e.g. bash where job ids are sequentially incremented (and aliases/functions
don't consume a job id).
See #6053 for more details.
As @ridiculousfish pointed out in
https://github.com/fish-shell/fish-shell/issues/6053#issuecomment-559899400,
we want to elide a job's job id if it corresponds to a single function in the
foreground. This translates to the following prerequisites:
- A job must correspond to a single process (i.e. the job continuation
must be empty)
- A job must be in the foreground (i.e. `&` wasn't appended)
- The job's single process must resolve to a function invocation
If all of these conditions are true then we should mark a job as
"internal" and somehow remove it from consideration when any
infrastructure tries to interact with jobs / job ids.
I saw two paths to implement these requirements:
- At the time of job creation calculate whether or not a job is
"internal" and use a separate list of job ids to track their ids.
Additionally introduce a new flag denoting that a job is internal so
that e.g. `jobs` doesn't list internal jobs
- I started implementing this route but quickly realized I was
computing the same information that would be computed later on (e.g.
"is this job a single process" and "is this jobs statement a
function"). Specifically I was computing data that populate_job_process
would end up computing later anyway. Additionally this added some
weird complexities to the job system (after the change there were two
job id lists AND an additional flag that had to be taken into
consideration)
- Once a function is about to be executed we release the current jobs
job id if the prerequisites are satisfied (which at this point have
been fully computed).
- I opted for this solution since it seems cleaner. In this
implementation "releasing a job id" is done by both calling
`release_job_id` and by marking the internal job_id member variable to
-1. The former operation allows subsequent child jobs to reuse that
same job id (so e.g. the situation described in Motivation doesn't
occur), and the latter ensures that no other job / job id
infrastructure will interact with these jobs because valid jobs have
positive job ids. The second operation causes job_id to become
non-const which leads to the list of code changes outside of `exec.c`
(i.e. a codemod from `job_t::job_id` -> `job_t::job_id()` and moving the
old member variable to a non-const private `job_t::job_id_`)
Note: Its very possible I missed something and setting the job id to -1
will break some other infrastructure, please let me know if so!
I tried to run `make/ninja lint`, but a bunch of non-relevant issues
appeared (e.g. `fatal error: 'config.h' file not found`). I did
successfully clang-format (`git clang-format -f`) and run tests, though.
This PR closes #6053.
2019-12-29 15:46:07 +00:00
|
|
|
outp.writestr(format_string(_(msg), j->job_id(), truncate_command(j->command()).c_str()));
|
2018-10-06 20:32:08 +00:00
|
|
|
if (clr_eol) outp.term_puts(clr_eol, 1);
|
|
|
|
outp.writestr(L"\n");
|
2012-11-19 00:30:30 +00:00
|
|
|
fflush(stdout);
|
2018-10-06 20:32:08 +00:00
|
|
|
outp.flush_to(STDOUT_FILENO);
|
2005-09-20 13:26:39 +00:00
|
|
|
}
|
|
|
|
|
2019-05-01 23:17:23 +00:00
|
|
|
event_t proc_create_event(const wchar_t *msg, event_type_t type, pid_t pid, int status) {
|
2019-02-23 09:04:05 +00:00
|
|
|
event_t event{type};
|
|
|
|
event.desc.param1.pid = pid;
|
2012-11-19 00:30:30 +00:00
|
|
|
|
2012-12-20 09:52:44 +00:00
|
|
|
event.arguments.push_back(msg);
|
2019-02-04 00:06:10 +00:00
|
|
|
event.arguments.push_back(to_string(pid));
|
|
|
|
event.arguments.push_back(to_string(status));
|
2019-05-01 23:17:23 +00:00
|
|
|
return event;
|
2012-11-19 00:30:30 +00:00
|
|
|
}
|
2005-10-11 19:23:43 +00:00
|
|
|
|
2019-04-29 06:11:02 +00:00
|
|
|
/// Remove all disowned jobs whose job chain is fully constructed (that is, do not erase disowned
|
|
|
|
/// jobs that still have an in-flight parent job). Note we never print statuses for such jobs.
|
|
|
|
void remove_disowned_jobs(job_list_t &jobs) {
|
|
|
|
auto iter = jobs.begin();
|
|
|
|
while (iter != jobs.end()) {
|
|
|
|
const auto &j = *iter;
|
2019-10-15 21:37:10 +00:00
|
|
|
if (j->flags().disown_requested && j->job_chain_is_fully_constructed()) {
|
2019-04-29 06:11:02 +00:00
|
|
|
iter = jobs.erase(iter);
|
|
|
|
} else {
|
|
|
|
++iter;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-04-29 16:12:34 +00:00
|
|
|
/// Given a a process in a job, print the status message for the process as appropriate, and then
|
2019-05-01 23:17:23 +00:00
|
|
|
/// mark the status code so we don't print again. Populate any events into \p exit_events.
|
|
|
|
/// \return true if we printed a status message, false if not.
|
|
|
|
static bool try_clean_process_in_job(process_t *p, job_t *j, std::vector<event_t> *exit_events,
|
|
|
|
bool only_one_job) {
|
2019-04-29 16:12:34 +00:00
|
|
|
if (!p->completed || !p->pid) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
auto s = p->status;
|
2019-05-01 23:17:23 +00:00
|
|
|
|
2019-06-26 18:28:27 +00:00
|
|
|
// Add an exit event if the process did not come from a job handler.
|
|
|
|
if (!j->from_event_handler()) {
|
|
|
|
exit_events->push_back(proc_create_event(L"PROCESS_EXIT", event_type_t::exit, p->pid,
|
|
|
|
s.normal_exited() ? s.exit_code() : -1));
|
|
|
|
}
|
2019-05-01 23:17:23 +00:00
|
|
|
|
|
|
|
// Ignore SIGPIPE. We issue it ourselves to the pipe writer when the pipe reader dies.
|
2019-04-29 16:12:34 +00:00
|
|
|
if (!s.signal_exited() || s.signal_code() == SIGPIPE) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
int proc_is_job = (p->is_first_in_job && p->is_last_in_job);
|
2019-10-15 21:37:10 +00:00
|
|
|
if (proc_is_job) j->mut_flags().notified = true;
|
2019-04-29 16:12:34 +00:00
|
|
|
|
|
|
|
// Handle signals other than SIGPIPE.
|
|
|
|
// Always report crashes.
|
2019-06-23 19:39:29 +00:00
|
|
|
if (j->skip_notification() && !contains(crashsignals, s.signal_code())) {
|
2019-04-29 16:12:34 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Print nothing if we get SIGINT in the foreground process group, to avoid spamming
|
|
|
|
// obvious stuff on the console (#1119). If we get SIGINT for the foreground
|
|
|
|
// process, assume the user typed ^C and can see it working. It's possible they
|
|
|
|
// didn't, and the signal was delivered via pkill, etc., but the SIGINT/SIGTERM
|
|
|
|
// distinction is precisely to allow INT to be from a UI
|
|
|
|
// and TERM to be programmatic, so this assumption is keeping with the design of
|
|
|
|
// signals. If echoctl is on, then the terminal will have written ^C to the console.
|
|
|
|
// If off, it won't have. We don't echo ^C either way, so as to respect the user's
|
|
|
|
// preference.
|
|
|
|
bool printed = false;
|
|
|
|
if (s.signal_code() != SIGINT || !j->is_foreground()) {
|
|
|
|
if (proc_is_job) {
|
|
|
|
// We want to report the job number, unless it's the only job, in which case
|
|
|
|
// we don't need to.
|
|
|
|
const wcstring job_number_desc =
|
Introduce the internal jobs for functions
This PR is aimed at improving how job ids are assigned. In particular,
previous to this commit, a job id would be consumed by functions (and
thus aliases). Since it's usual to use functions as command wrappers
this results in awkward job id assignments.
For example if the user is like me and just made the jump from vim -> neovim
then the user might create the following alias:
```
alias vim=nvim
```
Previous to this commit if the user ran `vim` after setting up this
alias, backgrounded (^Z) and ran `jobs` then the output might be:
```
Job Group State Command
2 60267 stopped nvim $argv
```
If the user subsequently opened another vim (nvim) session, backgrounded
and ran jobs then they might see what follows:
```
Job Group State Command
4 70542 stopped nvim $argv
2 60267 stopped nvim $argv
```
These job ids feel unnatural, especially when transitioning away from
e.g. bash where job ids are sequentially incremented (and aliases/functions
don't consume a job id).
See #6053 for more details.
As @ridiculousfish pointed out in
https://github.com/fish-shell/fish-shell/issues/6053#issuecomment-559899400,
we want to elide a job's job id if it corresponds to a single function in the
foreground. This translates to the following prerequisites:
- A job must correspond to a single process (i.e. the job continuation
must be empty)
- A job must be in the foreground (i.e. `&` wasn't appended)
- The job's single process must resolve to a function invocation
If all of these conditions are true then we should mark a job as
"internal" and somehow remove it from consideration when any
infrastructure tries to interact with jobs / job ids.
I saw two paths to implement these requirements:
- At the time of job creation calculate whether or not a job is
"internal" and use a separate list of job ids to track their ids.
Additionally introduce a new flag denoting that a job is internal so
that e.g. `jobs` doesn't list internal jobs
- I started implementing this route but quickly realized I was
computing the same information that would be computed later on (e.g.
"is this job a single process" and "is this jobs statement a
function"). Specifically I was computing data that populate_job_process
would end up computing later anyway. Additionally this added some
weird complexities to the job system (after the change there were two
job id lists AND an additional flag that had to be taken into
consideration)
- Once a function is about to be executed we release the current jobs
job id if the prerequisites are satisfied (which at this point have
been fully computed).
- I opted for this solution since it seems cleaner. In this
implementation "releasing a job id" is done by both calling
`release_job_id` and by marking the internal job_id member variable to
-1. The former operation allows subsequent child jobs to reuse that
same job id (so e.g. the situation described in Motivation doesn't
occur), and the latter ensures that no other job / job id
infrastructure will interact with these jobs because valid jobs have
positive job ids. The second operation causes job_id to become
non-const which leads to the list of code changes outside of `exec.c`
(i.e. a codemod from `job_t::job_id` -> `job_t::job_id()` and moving the
old member variable to a non-const private `job_t::job_id_`)
Note: Its very possible I missed something and setting the job id to -1
will break some other infrastructure, please let me know if so!
I tried to run `make/ninja lint`, but a bunch of non-relevant issues
appeared (e.g. `fatal error: 'config.h' file not found`). I did
successfully clang-format (`git clang-format -f`) and run tests, though.
This PR closes #6053.
2019-12-29 15:46:07 +00:00
|
|
|
only_one_job ? wcstring() : format_string(_(L"Job %d, "), j->job_id());
|
2019-04-29 16:12:34 +00:00
|
|
|
std::fwprintf(stdout, _(L"%ls: %ls\'%ls\' terminated by signal %ls (%ls)"),
|
|
|
|
program_name, job_number_desc.c_str(),
|
|
|
|
truncate_command(j->command()).c_str(), sig2wcs(s.signal_code()),
|
|
|
|
signal_get_desc(s.signal_code()));
|
|
|
|
} else {
|
|
|
|
const wcstring job_number_desc =
|
Introduce the internal jobs for functions
This PR is aimed at improving how job ids are assigned. In particular,
previous to this commit, a job id would be consumed by functions (and
thus aliases). Since it's usual to use functions as command wrappers
this results in awkward job id assignments.
For example if the user is like me and just made the jump from vim -> neovim
then the user might create the following alias:
```
alias vim=nvim
```
Previous to this commit if the user ran `vim` after setting up this
alias, backgrounded (^Z) and ran `jobs` then the output might be:
```
Job Group State Command
2 60267 stopped nvim $argv
```
If the user subsequently opened another vim (nvim) session, backgrounded
and ran jobs then they might see what follows:
```
Job Group State Command
4 70542 stopped nvim $argv
2 60267 stopped nvim $argv
```
These job ids feel unnatural, especially when transitioning away from
e.g. bash where job ids are sequentially incremented (and aliases/functions
don't consume a job id).
See #6053 for more details.
As @ridiculousfish pointed out in
https://github.com/fish-shell/fish-shell/issues/6053#issuecomment-559899400,
we want to elide a job's job id if it corresponds to a single function in the
foreground. This translates to the following prerequisites:
- A job must correspond to a single process (i.e. the job continuation
must be empty)
- A job must be in the foreground (i.e. `&` wasn't appended)
- The job's single process must resolve to a function invocation
If all of these conditions are true then we should mark a job as
"internal" and somehow remove it from consideration when any
infrastructure tries to interact with jobs / job ids.
I saw two paths to implement these requirements:
- At the time of job creation calculate whether or not a job is
"internal" and use a separate list of job ids to track their ids.
Additionally introduce a new flag denoting that a job is internal so
that e.g. `jobs` doesn't list internal jobs
- I started implementing this route but quickly realized I was
computing the same information that would be computed later on (e.g.
"is this job a single process" and "is this jobs statement a
function"). Specifically I was computing data that populate_job_process
would end up computing later anyway. Additionally this added some
weird complexities to the job system (after the change there were two
job id lists AND an additional flag that had to be taken into
consideration)
- Once a function is about to be executed we release the current jobs
job id if the prerequisites are satisfied (which at this point have
been fully computed).
- I opted for this solution since it seems cleaner. In this
implementation "releasing a job id" is done by both calling
`release_job_id` and by marking the internal job_id member variable to
-1. The former operation allows subsequent child jobs to reuse that
same job id (so e.g. the situation described in Motivation doesn't
occur), and the latter ensures that no other job / job id
infrastructure will interact with these jobs because valid jobs have
positive job ids. The second operation causes job_id to become
non-const which leads to the list of code changes outside of `exec.c`
(i.e. a codemod from `job_t::job_id` -> `job_t::job_id()` and moving the
old member variable to a non-const private `job_t::job_id_`)
Note: Its very possible I missed something and setting the job id to -1
will break some other infrastructure, please let me know if so!
I tried to run `make/ninja lint`, but a bunch of non-relevant issues
appeared (e.g. `fatal error: 'config.h' file not found`). I did
successfully clang-format (`git clang-format -f`) and run tests, though.
This PR closes #6053.
2019-12-29 15:46:07 +00:00
|
|
|
only_one_job ? wcstring() : format_string(L"from job %d, ", j->job_id());
|
2019-04-29 16:12:34 +00:00
|
|
|
const wchar_t *fmt =
|
|
|
|
_(L"%ls: Process %d, \'%ls\' %ls\'%ls\' terminated by signal %ls (%ls)");
|
|
|
|
std::fwprintf(stdout, fmt, program_name, p->pid, p->argv0(), job_number_desc.c_str(),
|
|
|
|
truncate_command(j->command()).c_str(), sig2wcs(s.signal_code()),
|
|
|
|
signal_get_desc(s.signal_code()));
|
|
|
|
}
|
|
|
|
|
|
|
|
if (clr_eol) outputter_t::stdoutput().term_puts(clr_eol, 1);
|
|
|
|
std::fwprintf(stdout, L"\n");
|
|
|
|
printed = true;
|
|
|
|
}
|
|
|
|
// Clear status so it is not reported more than once.
|
|
|
|
// TODO: this seems like a clumsy way to ensure that.
|
|
|
|
p->status = proc_status_t::from_exit_code(0);
|
|
|
|
return printed;
|
|
|
|
}
|
|
|
|
|
2019-05-01 18:24:54 +00:00
|
|
|
/// \return whether this job wants a status message printed when it stops or completes.
|
2019-05-13 01:23:00 +00:00
|
|
|
static bool job_wants_message(const shared_ptr<job_t> &j) {
|
2019-05-01 18:24:54 +00:00
|
|
|
// Did we already print a status message?
|
2019-10-15 21:37:10 +00:00
|
|
|
if (j->flags().notified) return false;
|
2019-05-01 18:24:54 +00:00
|
|
|
|
|
|
|
// Do we just skip notifications?
|
2019-06-23 19:39:29 +00:00
|
|
|
if (j->skip_notification()) return false;
|
2019-05-01 18:24:54 +00:00
|
|
|
|
|
|
|
// Are we foreground?
|
|
|
|
// The idea here is to not print status messages for jobs that execute in the foreground (i.e.
|
|
|
|
// without & and without being `bg`).
|
|
|
|
if (j->is_foreground()) return false;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-04-29 16:05:00 +00:00
|
|
|
/// Remove completed jobs from the job list, printing status messages as appropriate.
|
|
|
|
/// \return whether something was printed.
|
2019-04-30 03:58:58 +00:00
|
|
|
static bool process_clean_after_marking(parser_t &parser, bool allow_interactive) {
|
2012-02-16 08:24:27 +00:00
|
|
|
ASSERT_IS_MAIN_THREAD();
|
2019-04-29 16:05:00 +00:00
|
|
|
bool printed = false;
|
2012-11-19 00:30:30 +00:00
|
|
|
|
2019-03-28 23:16:22 +00:00
|
|
|
// This function may fire an event handler, we do not want to call ourselves recursively (to
|
2018-10-29 00:09:57 +00:00
|
|
|
// avoid infinite recursion).
|
2019-04-30 03:58:58 +00:00
|
|
|
if (parser.libdata().is_cleaning_procs) {
|
2018-12-31 03:25:16 +00:00
|
|
|
return false;
|
2014-10-25 23:51:25 +00:00
|
|
|
}
|
2019-04-30 03:58:58 +00:00
|
|
|
parser.libdata().is_cleaning_procs = true;
|
|
|
|
const cleanup_t cleanup([&] { parser.libdata().is_cleaning_procs = false; });
|
2016-05-03 04:41:17 +00:00
|
|
|
|
2019-03-28 23:16:22 +00:00
|
|
|
// This may be invoked in an exit handler, after the TERM has been torn down
|
|
|
|
// Don't try to print in that case (#3222)
|
2019-11-19 02:34:50 +00:00
|
|
|
const bool interactive = allow_interactive && cur_term != nullptr;
|
2016-09-09 20:13:45 +00:00
|
|
|
|
2019-04-29 06:11:02 +00:00
|
|
|
// Remove all disowned jobs.
|
2019-05-05 05:12:31 +00:00
|
|
|
remove_disowned_jobs(parser.jobs());
|
2019-04-29 06:11:02 +00:00
|
|
|
|
2019-05-01 23:17:23 +00:00
|
|
|
// Accumulate exit events into a new list, which we fire after the list manipulation is
|
|
|
|
// complete.
|
|
|
|
std::vector<event_t> exit_events;
|
|
|
|
|
|
|
|
// Print status messages for completed or stopped jobs.
|
2019-05-05 05:12:31 +00:00
|
|
|
const bool only_one_job = parser.jobs().size() == 1;
|
|
|
|
for (const auto &j : parser.jobs()) {
|
2019-05-01 18:24:54 +00:00
|
|
|
// Skip unconstructed jobs.
|
2019-04-11 03:38:19 +00:00
|
|
|
if (!j->is_constructed()) {
|
|
|
|
continue;
|
|
|
|
}
|
2019-05-01 18:24:54 +00:00
|
|
|
|
|
|
|
// If we are not interactive, skip cleaning jobs that want to print an interactive message.
|
2019-05-13 01:23:00 +00:00
|
|
|
if (!interactive && job_wants_message(j)) {
|
2012-11-19 00:30:30 +00:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2019-05-01 18:24:54 +00:00
|
|
|
// Clean processes within the job.
|
|
|
|
// Note this may print the message on behalf of the job, affecting the result of
|
|
|
|
// job_wants_message().
|
2019-04-29 16:12:34 +00:00
|
|
|
for (process_ptr_t &p : j->processes) {
|
2019-05-01 23:17:23 +00:00
|
|
|
if (try_clean_process_in_job(p.get(), j.get(), &exit_events, only_one_job)) {
|
2019-04-29 16:05:00 +00:00
|
|
|
printed = true;
|
2012-11-19 00:30:30 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-01 18:24:54 +00:00
|
|
|
// Print the message if we need to.
|
|
|
|
if (job_wants_message(j) && (j->is_completed() || j->is_stopped())) {
|
|
|
|
print_job_status(j.get(), j->is_completed() ? JOB_ENDED : JOB_STOPPED);
|
2019-10-15 21:37:10 +00:00
|
|
|
j->mut_flags().notified = true;
|
2019-05-01 18:24:54 +00:00
|
|
|
printed = true;
|
|
|
|
}
|
2012-11-19 00:30:30 +00:00
|
|
|
|
2019-06-26 18:28:27 +00:00
|
|
|
// Prepare events for completed jobs, except for jobs that themselves came from event
|
|
|
|
// handlers.
|
|
|
|
if (!j->from_event_handler() && j->is_completed()) {
|
2019-07-28 21:05:51 +00:00
|
|
|
if (j->should_report_process_exits()) {
|
2019-05-01 23:17:23 +00:00
|
|
|
exit_events.push_back(
|
|
|
|
proc_create_event(L"JOB_EXIT", event_type_t::exit, -j->pgid, 0));
|
|
|
|
}
|
|
|
|
exit_events.push_back(
|
Introduce the internal jobs for functions
This PR is aimed at improving how job ids are assigned. In particular,
previous to this commit, a job id would be consumed by functions (and
thus aliases). Since it's usual to use functions as command wrappers
this results in awkward job id assignments.
For example if the user is like me and just made the jump from vim -> neovim
then the user might create the following alias:
```
alias vim=nvim
```
Previous to this commit if the user ran `vim` after setting up this
alias, backgrounded (^Z) and ran `jobs` then the output might be:
```
Job Group State Command
2 60267 stopped nvim $argv
```
If the user subsequently opened another vim (nvim) session, backgrounded
and ran jobs then they might see what follows:
```
Job Group State Command
4 70542 stopped nvim $argv
2 60267 stopped nvim $argv
```
These job ids feel unnatural, especially when transitioning away from
e.g. bash where job ids are sequentially incremented (and aliases/functions
don't consume a job id).
See #6053 for more details.
As @ridiculousfish pointed out in
https://github.com/fish-shell/fish-shell/issues/6053#issuecomment-559899400,
we want to elide a job's job id if it corresponds to a single function in the
foreground. This translates to the following prerequisites:
- A job must correspond to a single process (i.e. the job continuation
must be empty)
- A job must be in the foreground (i.e. `&` wasn't appended)
- The job's single process must resolve to a function invocation
If all of these conditions are true then we should mark a job as
"internal" and somehow remove it from consideration when any
infrastructure tries to interact with jobs / job ids.
I saw two paths to implement these requirements:
- At the time of job creation calculate whether or not a job is
"internal" and use a separate list of job ids to track their ids.
Additionally introduce a new flag denoting that a job is internal so
that e.g. `jobs` doesn't list internal jobs
- I started implementing this route but quickly realized I was
computing the same information that would be computed later on (e.g.
"is this job a single process" and "is this jobs statement a
function"). Specifically I was computing data that populate_job_process
would end up computing later anyway. Additionally this added some
weird complexities to the job system (after the change there were two
job id lists AND an additional flag that had to be taken into
consideration)
- Once a function is about to be executed we release the current jobs
job id if the prerequisites are satisfied (which at this point have
been fully computed).
- I opted for this solution since it seems cleaner. In this
implementation "releasing a job id" is done by both calling
`release_job_id` and by marking the internal job_id member variable to
-1. The former operation allows subsequent child jobs to reuse that
same job id (so e.g. the situation described in Motivation doesn't
occur), and the latter ensures that no other job / job id
infrastructure will interact with these jobs because valid jobs have
positive job ids. The second operation causes job_id to become
non-const which leads to the list of code changes outside of `exec.c`
(i.e. a codemod from `job_t::job_id` -> `job_t::job_id()` and moving the
old member variable to a non-const private `job_t::job_id_`)
Note: Its very possible I missed something and setting the job id to -1
will break some other infrastructure, please let me know if so!
I tried to run `make/ninja lint`, but a bunch of non-relevant issues
appeared (e.g. `fatal error: 'config.h' file not found`). I did
successfully clang-format (`git clang-format -f`) and run tests, though.
This PR closes #6053.
2019-12-29 15:46:07 +00:00
|
|
|
proc_create_event(L"JOB_EXIT", event_type_t::job_exit, j->job_id(), 0));
|
2019-05-01 23:17:23 +00:00
|
|
|
}
|
2012-11-19 00:30:30 +00:00
|
|
|
}
|
|
|
|
|
2019-05-01 23:25:30 +00:00
|
|
|
// Remove completed jobs.
|
|
|
|
// Do this before calling out to user code in the event handler below, to ensure an event
|
|
|
|
// handler doesn't remove jobs on our behalf.
|
|
|
|
auto is_complete = [](const shared_ptr<job_t> &j) { return j->is_completed(); };
|
2019-05-05 05:12:31 +00:00
|
|
|
auto &jobs = parser.jobs();
|
|
|
|
jobs.erase(std::remove_if(jobs.begin(), jobs.end(), is_complete), jobs.end());
|
2019-03-28 23:16:22 +00:00
|
|
|
|
2019-05-01 23:17:23 +00:00
|
|
|
// Post pending exit events.
|
|
|
|
for (const auto &evt : exit_events) {
|
2019-06-03 09:31:13 +00:00
|
|
|
event_fire(parser, evt);
|
2019-03-28 23:16:22 +00:00
|
|
|
}
|
|
|
|
|
2019-04-29 16:05:00 +00:00
|
|
|
if (printed) {
|
2019-03-21 03:57:38 +00:00
|
|
|
fflush(stdout);
|
|
|
|
}
|
2012-11-19 00:30:30 +00:00
|
|
|
|
2019-04-29 16:05:00 +00:00
|
|
|
return printed;
|
2017-10-22 07:10:23 +00:00
|
|
|
}
|
|
|
|
|
2019-04-30 03:58:58 +00:00
|
|
|
bool job_reap(parser_t &parser, bool allow_interactive) {
|
2017-10-22 07:10:23 +00:00
|
|
|
ASSERT_IS_MAIN_THREAD();
|
2019-05-05 05:12:31 +00:00
|
|
|
process_mark_finished_children(parser, false);
|
2017-10-22 07:10:23 +00:00
|
|
|
|
|
|
|
// Preserve the exit status.
|
2019-05-12 21:00:44 +00:00
|
|
|
auto saved_statuses = parser.get_last_statuses();
|
2017-10-22 07:10:23 +00:00
|
|
|
|
2019-04-30 03:58:58 +00:00
|
|
|
bool printed = process_clean_after_marking(parser, allow_interactive);
|
2017-10-22 07:10:23 +00:00
|
|
|
|
2016-05-03 04:41:17 +00:00
|
|
|
// Restore the exit status.
|
2019-05-12 21:00:44 +00:00
|
|
|
parser.set_last_statuses(std::move(saved_statuses));
|
2013-12-31 22:37:37 +00:00
|
|
|
|
2019-04-29 16:05:00 +00:00
|
|
|
return printed;
|
2005-09-20 13:26:39 +00:00
|
|
|
}
|
|
|
|
|
2016-05-03 04:41:17 +00:00
|
|
|
/// Maximum length of a /proc/[PID]/stat filename.
|
2006-01-20 14:27:21 +00:00
|
|
|
#define FN_SIZE 256
|
|
|
|
|
2016-05-03 04:41:17 +00:00
|
|
|
/// Get the CPU time for the specified process.
|
|
|
|
unsigned long proc_get_jiffies(process_t *p) {
|
2019-05-12 21:59:30 +00:00
|
|
|
if (!have_proc_stat()) return 0;
|
2016-05-29 05:28:26 +00:00
|
|
|
if (p->pid <= 0) return 0;
|
2012-11-19 00:30:30 +00:00
|
|
|
|
2016-05-29 05:28:26 +00:00
|
|
|
wchar_t fn[FN_SIZE];
|
2012-11-19 00:30:30 +00:00
|
|
|
char state;
|
2016-05-03 04:41:17 +00:00
|
|
|
int pid, ppid, pgrp, session, tty_nr, tpgid, exit_signal, processor;
|
|
|
|
long int cutime, cstime, priority, nice, placeholder, itrealvalue, rss;
|
|
|
|
unsigned long int flags, minflt, cminflt, majflt, cmajflt, utime, stime, starttime, vsize, rlim,
|
|
|
|
startcode, endcode, startstack, kstkesp, kstkeip, signal, blocked, sigignore, sigcatch,
|
|
|
|
wchan, nswap, cnswap;
|
2012-11-19 00:30:30 +00:00
|
|
|
char comm[1024];
|
|
|
|
|
2019-03-12 21:06:01 +00:00
|
|
|
std::swprintf(fn, FN_SIZE, L"/proc/%d/stat", p->pid);
|
2012-11-19 00:30:30 +00:00
|
|
|
FILE *f = wfopen(fn, "r");
|
2016-05-03 04:41:17 +00:00
|
|
|
if (!f) return 0;
|
2012-11-19 00:30:30 +00:00
|
|
|
|
2016-05-29 05:28:26 +00:00
|
|
|
// TODO: replace the use of fscanf() as it is brittle and should never be used.
|
|
|
|
int count = fscanf(f,
|
|
|
|
"%9d %1023s %c %9d %9d %9d %9d %9d %9lu "
|
|
|
|
"%9lu %9lu %9lu %9lu %9lu %9lu %9ld %9ld %9ld "
|
|
|
|
"%9ld %9ld %9ld %9lu %9lu %9ld %9lu %9lu %9lu "
|
|
|
|
"%9lu %9lu %9lu %9lu %9lu %9lu %9lu %9lu %9lu "
|
|
|
|
"%9lu %9d %9d ",
|
|
|
|
&pid, comm, &state, &ppid, &pgrp, &session, &tty_nr, &tpgid, &flags, &minflt,
|
|
|
|
&cminflt, &majflt, &cmajflt, &utime, &stime, &cutime, &cstime, &priority,
|
|
|
|
&nice, &placeholder, &itrealvalue, &starttime, &vsize, &rss, &rlim,
|
|
|
|
&startcode, &endcode, &startstack, &kstkesp, &kstkeip, &signal, &blocked,
|
|
|
|
&sigignore, &sigcatch, &wchan, &nswap, &cnswap, &exit_signal, &processor);
|
2014-04-26 15:36:20 +00:00
|
|
|
fclose(f);
|
2016-05-29 05:28:26 +00:00
|
|
|
if (count < 17) return 0;
|
2016-05-03 04:41:17 +00:00
|
|
|
return utime + stime + cutime + cstime;
|
2005-09-20 13:26:39 +00:00
|
|
|
}
|
|
|
|
|
2016-05-03 04:41:17 +00:00
|
|
|
/// Update the CPU time for all jobs.
|
2019-05-05 05:12:31 +00:00
|
|
|
void proc_update_jiffies(parser_t &parser) {
|
|
|
|
for (const auto &job : parser.jobs()) {
|
2017-01-23 18:38:55 +00:00
|
|
|
for (process_ptr_t &p : job->processes) {
|
2019-11-19 02:34:50 +00:00
|
|
|
gettimeofday(&p->last_time, nullptr);
|
2017-01-23 18:39:53 +00:00
|
|
|
p->last_jiffies = proc_get_jiffies(p.get());
|
2012-11-19 00:30:30 +00:00
|
|
|
}
|
|
|
|
}
|
2005-09-20 13:26:39 +00:00
|
|
|
}
|
|
|
|
|
2019-05-05 10:09:25 +00:00
|
|
|
// Return control of the terminal to a job's process group. restore_attrs is true if we are
|
|
|
|
// restoring a previously-stopped job, in which case we need to restore terminal attributes.
|
2019-06-29 22:58:36 +00:00
|
|
|
int terminal_maybe_give_to_job(const job_t *j, bool continuing_from_stopped) {
|
|
|
|
enum { notneeded = 0, success = 1, error = -1 };
|
|
|
|
|
2019-07-12 20:31:56 +00:00
|
|
|
if (!j->should_claim_terminal()) {
|
|
|
|
// The job doesn't want the terminal.
|
2019-06-29 22:58:36 +00:00
|
|
|
return notneeded;
|
|
|
|
}
|
|
|
|
|
2017-02-27 05:46:15 +00:00
|
|
|
if (j->pgid == 0) {
|
2019-05-29 06:07:04 +00:00
|
|
|
FLOG(proc_termowner, L"terminal_give_to_job() returning early due to no process group");
|
2019-06-29 22:58:36 +00:00
|
|
|
return notneeded;
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we are continuing, ensure that stdin is marked as blocking first (issue #176).
|
|
|
|
if (continuing_from_stopped) {
|
|
|
|
make_fd_blocking(STDIN_FILENO);
|
2017-02-27 05:46:15 +00:00
|
|
|
}
|
|
|
|
|
2017-08-06 22:47:01 +00:00
|
|
|
// It may not be safe to call tcsetpgrp if we've already done so, as at that point we are no
|
|
|
|
// longer the controlling process group for the terminal and no longer have permission to set
|
|
|
|
// the process group that is in control, causing tcsetpgrp to return EPERM, even though that's
|
|
|
|
// not the documented behavior in tcsetpgrp(3), which instead says other bad things will happen
|
|
|
|
// (it says SIGTTOU will be sent to all members of the background *calling* process group, but
|
|
|
|
// it's more complicated than that, SIGTTOU may or may not be sent depending on the TTY
|
|
|
|
// configuration and whether or not signal handlers for SIGTTOU are installed. Read:
|
|
|
|
// http://curiousthing.org/sigttin-sigttou-deep-dive-linux In all cases, our goal here was just
|
|
|
|
// to hand over control of the terminal to this process group, which is a no-op if it's already
|
|
|
|
// been done.
|
2018-10-02 03:55:18 +00:00
|
|
|
if (j->pgid == INVALID_PID || tcgetpgrp(STDIN_FILENO) == j->pgid) {
|
2019-05-18 21:51:16 +00:00
|
|
|
FLOGF(proc_termowner, L"Process group %d already has control of terminal", j->pgid);
|
2017-08-06 23:05:51 +00:00
|
|
|
} else {
|
2019-05-18 21:51:16 +00:00
|
|
|
FLOGF(proc_termowner,
|
|
|
|
L"Attempting to bring process group to foreground via tcsetpgrp for job->pgid %d",
|
2017-08-06 23:05:51 +00:00
|
|
|
j->pgid);
|
2017-07-29 17:03:37 +00:00
|
|
|
|
2017-08-06 22:47:01 +00:00
|
|
|
// The tcsetpgrp(2) man page says that EPERM is thrown if "pgrp has a supported value, but
|
|
|
|
// is not the process group ID of a process in the same session as the calling process."
|
|
|
|
// Since we _guarantee_ that this isn't the case (the child calls setpgid before it calls
|
|
|
|
// SIGSTOP, and the child was created in the same session as us), it seems that EPERM is
|
|
|
|
// being thrown because of an caching issue - the call to tcsetpgrp isn't seeing the
|
|
|
|
// newly-created process group just yet. On this developer's test machine (WSL running Linux
|
|
|
|
// 4.4.0), EPERM does indeed disappear on retry. The important thing is that we can
|
|
|
|
// guarantee the process isn't going to exit while we wait (which would cause us to possibly
|
|
|
|
// block indefinitely).
|
2017-07-29 17:03:37 +00:00
|
|
|
while (tcsetpgrp(STDIN_FILENO, j->pgid) != 0) {
|
2019-05-29 06:07:04 +00:00
|
|
|
FLOGF(proc_termowner, L"tcsetpgrp failed: %d", errno);
|
2018-10-02 18:24:05 +00:00
|
|
|
|
2017-07-29 23:18:01 +00:00
|
|
|
bool pgroup_terminated = false;
|
2018-10-02 18:24:05 +00:00
|
|
|
// No need to test for EINTR as we are blocking signals
|
|
|
|
if (errno == EINVAL) {
|
2017-08-06 22:47:01 +00:00
|
|
|
// OS X returns EINVAL if the process group no longer lives. Probably other OSes,
|
|
|
|
// too. Unlike EPERM below, EINVAL can only happen if the process group has
|
|
|
|
// terminated.
|
2017-07-29 23:18:01 +00:00
|
|
|
pgroup_terminated = true;
|
2017-08-06 23:05:51 +00:00
|
|
|
} else if (errno == EPERM) {
|
2017-08-06 22:47:01 +00:00
|
|
|
// Retry so long as this isn't because the process group is dead.
|
2017-07-29 17:03:37 +00:00
|
|
|
int wait_result = waitpid(-1 * j->pgid, &wait_result, WNOHANG);
|
|
|
|
if (wait_result == -1) {
|
2017-08-06 22:47:01 +00:00
|
|
|
// Note that -1 is technically an "error" for waitpid in the sense that an
|
|
|
|
// invalid argument was specified because no such process group exists any
|
|
|
|
// longer. This is the observed behavior on Linux 4.4.0. a "success" result
|
|
|
|
// would mean processes from the group still exist but is still running in some
|
|
|
|
// state or the other.
|
2017-07-29 23:18:01 +00:00
|
|
|
pgroup_terminated = true;
|
2017-08-06 23:05:51 +00:00
|
|
|
} else {
|
2017-08-06 22:47:01 +00:00
|
|
|
// Debug the original tcsetpgrp error (not the waitpid errno) to the log, and
|
|
|
|
// then retry until not EPERM or the process group has exited.
|
2017-07-29 23:18:01 +00:00
|
|
|
debug(2, L"terminal_give_to_job(): EPERM.\n", j->pgid);
|
2018-10-02 18:24:05 +00:00
|
|
|
continue;
|
2017-07-29 17:03:37 +00:00
|
|
|
}
|
2017-08-06 23:05:51 +00:00
|
|
|
} else {
|
2018-10-02 18:24:05 +00:00
|
|
|
if (errno == ENOTTY) {
|
|
|
|
redirect_tty_output();
|
|
|
|
}
|
Introduce the internal jobs for functions
This PR is aimed at improving how job ids are assigned. In particular,
previous to this commit, a job id would be consumed by functions (and
thus aliases). Since it's usual to use functions as command wrappers
this results in awkward job id assignments.
For example if the user is like me and just made the jump from vim -> neovim
then the user might create the following alias:
```
alias vim=nvim
```
Previous to this commit if the user ran `vim` after setting up this
alias, backgrounded (^Z) and ran `jobs` then the output might be:
```
Job Group State Command
2 60267 stopped nvim $argv
```
If the user subsequently opened another vim (nvim) session, backgrounded
and ran jobs then they might see what follows:
```
Job Group State Command
4 70542 stopped nvim $argv
2 60267 stopped nvim $argv
```
These job ids feel unnatural, especially when transitioning away from
e.g. bash where job ids are sequentially incremented (and aliases/functions
don't consume a job id).
See #6053 for more details.
As @ridiculousfish pointed out in
https://github.com/fish-shell/fish-shell/issues/6053#issuecomment-559899400,
we want to elide a job's job id if it corresponds to a single function in the
foreground. This translates to the following prerequisites:
- A job must correspond to a single process (i.e. the job continuation
must be empty)
- A job must be in the foreground (i.e. `&` wasn't appended)
- The job's single process must resolve to a function invocation
If all of these conditions are true then we should mark a job as
"internal" and somehow remove it from consideration when any
infrastructure tries to interact with jobs / job ids.
I saw two paths to implement these requirements:
- At the time of job creation calculate whether or not a job is
"internal" and use a separate list of job ids to track their ids.
Additionally introduce a new flag denoting that a job is internal so
that e.g. `jobs` doesn't list internal jobs
- I started implementing this route but quickly realized I was
computing the same information that would be computed later on (e.g.
"is this job a single process" and "is this jobs statement a
function"). Specifically I was computing data that populate_job_process
would end up computing later anyway. Additionally this added some
weird complexities to the job system (after the change there were two
job id lists AND an additional flag that had to be taken into
consideration)
- Once a function is about to be executed we release the current jobs
job id if the prerequisites are satisfied (which at this point have
been fully computed).
- I opted for this solution since it seems cleaner. In this
implementation "releasing a job id" is done by both calling
`release_job_id` and by marking the internal job_id member variable to
-1. The former operation allows subsequent child jobs to reuse that
same job id (so e.g. the situation described in Motivation doesn't
occur), and the latter ensures that no other job / job id
infrastructure will interact with these jobs because valid jobs have
positive job ids. The second operation causes job_id to become
non-const which leads to the list of code changes outside of `exec.c`
(i.e. a codemod from `job_t::job_id` -> `job_t::job_id()` and moving the
old member variable to a non-const private `job_t::job_id_`)
Note: Its very possible I missed something and setting the job id to -1
will break some other infrastructure, please let me know if so!
I tried to run `make/ninja lint`, but a bunch of non-relevant issues
appeared (e.g. `fatal error: 'config.h' file not found`). I did
successfully clang-format (`git clang-format -f`) and run tests, though.
This PR closes #6053.
2019-12-29 15:46:07 +00:00
|
|
|
debug(1, _(L"Could not send job %d ('%ls') with pgid %d to foreground"),
|
|
|
|
j->job_id(), j->command_wcstr(), j->pgid);
|
2017-07-29 17:03:37 +00:00
|
|
|
wperror(L"tcsetpgrp");
|
2019-06-29 22:58:36 +00:00
|
|
|
return error;
|
2017-07-29 17:03:37 +00:00
|
|
|
}
|
2017-07-29 23:18:01 +00:00
|
|
|
|
|
|
|
if (pgroup_terminated) {
|
2018-10-02 18:24:05 +00:00
|
|
|
// All processes in the process group has exited.
|
|
|
|
// Since we delay reaping any processes in a process group until all members of that
|
2018-10-29 00:09:57 +00:00
|
|
|
// job/group have been started, the only way this can happen is if the very last
|
|
|
|
// process in the group terminated and didn't need to access the terminal, otherwise
|
|
|
|
// it would have hung waiting for terminal IO (SIGTTIN). We can safely ignore this.
|
2017-07-29 23:18:01 +00:00
|
|
|
debug(3, L"tcsetpgrp called but process group %d has terminated.\n", j->pgid);
|
2019-06-29 22:58:36 +00:00
|
|
|
return notneeded;
|
2017-07-29 23:18:01 +00:00
|
|
|
}
|
2018-10-02 18:24:05 +00:00
|
|
|
|
|
|
|
break;
|
2017-07-29 17:03:37 +00:00
|
|
|
}
|
2012-11-19 00:30:30 +00:00
|
|
|
}
|
|
|
|
|
2019-06-29 22:58:36 +00:00
|
|
|
if (continuing_from_stopped) {
|
2018-10-02 18:24:05 +00:00
|
|
|
auto result = tcsetattr(STDIN_FILENO, TCSADRAIN, &j->tmodes);
|
2017-01-27 04:00:43 +00:00
|
|
|
if (result == -1) {
|
2018-10-02 18:24:05 +00:00
|
|
|
// No need to test for EINTR and retry since we have blocked all signals
|
|
|
|
if (errno == ENOTTY) {
|
|
|
|
redirect_tty_output();
|
|
|
|
}
|
|
|
|
|
Introduce the internal jobs for functions
This PR is aimed at improving how job ids are assigned. In particular,
previous to this commit, a job id would be consumed by functions (and
thus aliases). Since it's usual to use functions as command wrappers
this results in awkward job id assignments.
For example if the user is like me and just made the jump from vim -> neovim
then the user might create the following alias:
```
alias vim=nvim
```
Previous to this commit if the user ran `vim` after setting up this
alias, backgrounded (^Z) and ran `jobs` then the output might be:
```
Job Group State Command
2 60267 stopped nvim $argv
```
If the user subsequently opened another vim (nvim) session, backgrounded
and ran jobs then they might see what follows:
```
Job Group State Command
4 70542 stopped nvim $argv
2 60267 stopped nvim $argv
```
These job ids feel unnatural, especially when transitioning away from
e.g. bash where job ids are sequentially incremented (and aliases/functions
don't consume a job id).
See #6053 for more details.
As @ridiculousfish pointed out in
https://github.com/fish-shell/fish-shell/issues/6053#issuecomment-559899400,
we want to elide a job's job id if it corresponds to a single function in the
foreground. This translates to the following prerequisites:
- A job must correspond to a single process (i.e. the job continuation
must be empty)
- A job must be in the foreground (i.e. `&` wasn't appended)
- The job's single process must resolve to a function invocation
If all of these conditions are true then we should mark a job as
"internal" and somehow remove it from consideration when any
infrastructure tries to interact with jobs / job ids.
I saw two paths to implement these requirements:
- At the time of job creation calculate whether or not a job is
"internal" and use a separate list of job ids to track their ids.
Additionally introduce a new flag denoting that a job is internal so
that e.g. `jobs` doesn't list internal jobs
- I started implementing this route but quickly realized I was
computing the same information that would be computed later on (e.g.
"is this job a single process" and "is this jobs statement a
function"). Specifically I was computing data that populate_job_process
would end up computing later anyway. Additionally this added some
weird complexities to the job system (after the change there were two
job id lists AND an additional flag that had to be taken into
consideration)
- Once a function is about to be executed we release the current jobs
job id if the prerequisites are satisfied (which at this point have
been fully computed).
- I opted for this solution since it seems cleaner. In this
implementation "releasing a job id" is done by both calling
`release_job_id` and by marking the internal job_id member variable to
-1. The former operation allows subsequent child jobs to reuse that
same job id (so e.g. the situation described in Motivation doesn't
occur), and the latter ensures that no other job / job id
infrastructure will interact with these jobs because valid jobs have
positive job ids. The second operation causes job_id to become
non-const which leads to the list of code changes outside of `exec.c`
(i.e. a codemod from `job_t::job_id` -> `job_t::job_id()` and moving the
old member variable to a non-const private `job_t::job_id_`)
Note: Its very possible I missed something and setting the job id to -1
will break some other infrastructure, please let me know if so!
I tried to run `make/ninja lint`, but a bunch of non-relevant issues
appeared (e.g. `fatal error: 'config.h' file not found`). I did
successfully clang-format (`git clang-format -f`) and run tests, though.
This PR closes #6053.
2019-12-29 15:46:07 +00:00
|
|
|
debug(1, _(L"Could not send job %d ('%ls') to foreground"), j->job_id(),
|
2018-10-29 00:09:57 +00:00
|
|
|
j->preview().c_str());
|
2017-01-27 04:00:43 +00:00
|
|
|
wperror(L"tcsetattr");
|
2019-06-29 22:58:36 +00:00
|
|
|
return error;
|
2017-01-27 04:00:43 +00:00
|
|
|
}
|
2016-12-29 02:52:33 +00:00
|
|
|
}
|
|
|
|
|
2019-06-29 22:58:36 +00:00
|
|
|
return success;
|
2006-11-11 10:48:40 +00:00
|
|
|
}
|
|
|
|
|
2018-08-18 23:56:01 +00:00
|
|
|
pid_t terminal_acquire_before_builtin(int job_pgid) {
|
2018-11-13 17:39:53 +00:00
|
|
|
pid_t selfpgid = getpgrp();
|
|
|
|
|
2018-08-05 00:32:04 +00:00
|
|
|
pid_t current_owner = tcgetpgrp(STDIN_FILENO);
|
2018-11-13 17:39:53 +00:00
|
|
|
if (current_owner >= 0 && current_owner != selfpgid && current_owner == job_pgid) {
|
|
|
|
if (tcsetpgrp(STDIN_FILENO, selfpgid) == 0) {
|
2018-08-05 00:32:04 +00:00
|
|
|
return current_owner;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2016-05-03 04:41:17 +00:00
|
|
|
/// Returns control of the terminal to the shell, and saves the terminal attribute state to the job,
|
|
|
|
/// so that we can restore the terminal ownership to the job at a later time.
|
2019-09-02 00:08:51 +00:00
|
|
|
static bool terminal_return_from_job(job_t *j, int restore_attrs) {
|
2017-02-27 05:46:15 +00:00
|
|
|
errno = 0;
|
2018-12-07 20:35:09 +00:00
|
|
|
if (j->pgid == 0) {
|
2017-02-27 05:46:15 +00:00
|
|
|
debug(2, "terminal_return_from_job() returning early due to no process group");
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2016-12-15 03:21:36 +00:00
|
|
|
if (tcsetpgrp(STDIN_FILENO, getpgrp()) == -1) {
|
|
|
|
if (errno == ENOTTY) redirect_tty_output();
|
2012-11-19 00:30:30 +00:00
|
|
|
debug(1, _(L"Could not return shell to foreground"));
|
|
|
|
wperror(L"tcsetpgrp");
|
2017-02-27 05:46:15 +00:00
|
|
|
return false;
|
2012-11-19 00:30:30 +00:00
|
|
|
}
|
|
|
|
|
2016-12-15 03:21:36 +00:00
|
|
|
// Save jobs terminal modes.
|
|
|
|
if (tcgetattr(STDIN_FILENO, &j->tmodes)) {
|
2017-01-11 05:52:10 +00:00
|
|
|
if (errno == EIO) redirect_tty_output();
|
2012-11-19 00:30:30 +00:00
|
|
|
debug(1, _(L"Could not return shell to foreground"));
|
|
|
|
wperror(L"tcgetattr");
|
2017-02-27 05:46:15 +00:00
|
|
|
return false;
|
2012-11-19 00:30:30 +00:00
|
|
|
}
|
|
|
|
|
2019-09-02 00:08:51 +00:00
|
|
|
// Need to restore the terminal's attributes or `bind \cF fg` will put the
|
|
|
|
// terminal into a broken state (until "enter" is pressed).
|
|
|
|
// See: https://github.com/fish-shell/fish-shell/issues/2114
|
|
|
|
if (restore_attrs) {
|
|
|
|
if (tcsetattr(STDIN_FILENO, TCSADRAIN, &shell_modes) == -1) {
|
|
|
|
if (errno == EIO) redirect_tty_output();
|
|
|
|
debug(1, _(L"Could not return shell to foreground"));
|
|
|
|
wperror(L"tcsetattr");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
2006-11-11 10:48:40 +00:00
|
|
|
|
2017-02-27 05:46:15 +00:00
|
|
|
return true;
|
2006-11-11 10:48:40 +00:00
|
|
|
}
|
|
|
|
|
2019-05-05 05:12:31 +00:00
|
|
|
void job_t::continue_job(parser_t &parser, bool reclaim_foreground_pgrp, bool send_sigcont) {
|
2016-05-03 04:41:17 +00:00
|
|
|
// Put job first in the job list.
|
2019-05-05 05:12:31 +00:00
|
|
|
parser.job_promote(this);
|
2019-10-15 21:37:10 +00:00
|
|
|
mut_flags().notified = false;
|
2012-11-19 00:30:30 +00:00
|
|
|
|
2019-05-18 20:47:27 +00:00
|
|
|
FLOGF(proc_job_run, L"%ls job %d, gid %d (%ls), %ls, %ls",
|
Introduce the internal jobs for functions
This PR is aimed at improving how job ids are assigned. In particular,
previous to this commit, a job id would be consumed by functions (and
thus aliases). Since it's usual to use functions as command wrappers
this results in awkward job id assignments.
For example if the user is like me and just made the jump from vim -> neovim
then the user might create the following alias:
```
alias vim=nvim
```
Previous to this commit if the user ran `vim` after setting up this
alias, backgrounded (^Z) and ran `jobs` then the output might be:
```
Job Group State Command
2 60267 stopped nvim $argv
```
If the user subsequently opened another vim (nvim) session, backgrounded
and ran jobs then they might see what follows:
```
Job Group State Command
4 70542 stopped nvim $argv
2 60267 stopped nvim $argv
```
These job ids feel unnatural, especially when transitioning away from
e.g. bash where job ids are sequentially incremented (and aliases/functions
don't consume a job id).
See #6053 for more details.
As @ridiculousfish pointed out in
https://github.com/fish-shell/fish-shell/issues/6053#issuecomment-559899400,
we want to elide a job's job id if it corresponds to a single function in the
foreground. This translates to the following prerequisites:
- A job must correspond to a single process (i.e. the job continuation
must be empty)
- A job must be in the foreground (i.e. `&` wasn't appended)
- The job's single process must resolve to a function invocation
If all of these conditions are true then we should mark a job as
"internal" and somehow remove it from consideration when any
infrastructure tries to interact with jobs / job ids.
I saw two paths to implement these requirements:
- At the time of job creation calculate whether or not a job is
"internal" and use a separate list of job ids to track their ids.
Additionally introduce a new flag denoting that a job is internal so
that e.g. `jobs` doesn't list internal jobs
- I started implementing this route but quickly realized I was
computing the same information that would be computed later on (e.g.
"is this job a single process" and "is this jobs statement a
function"). Specifically I was computing data that populate_job_process
would end up computing later anyway. Additionally this added some
weird complexities to the job system (after the change there were two
job id lists AND an additional flag that had to be taken into
consideration)
- Once a function is about to be executed we release the current jobs
job id if the prerequisites are satisfied (which at this point have
been fully computed).
- I opted for this solution since it seems cleaner. In this
implementation "releasing a job id" is done by both calling
`release_job_id` and by marking the internal job_id member variable to
-1. The former operation allows subsequent child jobs to reuse that
same job id (so e.g. the situation described in Motivation doesn't
occur), and the latter ensures that no other job / job id
infrastructure will interact with these jobs because valid jobs have
positive job ids. The second operation causes job_id to become
non-const which leads to the list of code changes outside of `exec.c`
(i.e. a codemod from `job_t::job_id` -> `job_t::job_id()` and moving the
old member variable to a non-const private `job_t::job_id_`)
Note: Its very possible I missed something and setting the job id to -1
will break some other infrastructure, please let me know if so!
I tried to run `make/ninja lint`, but a bunch of non-relevant issues
appeared (e.g. `fatal error: 'config.h' file not found`). I did
successfully clang-format (`git clang-format -f`) and run tests, though.
This PR closes #6053.
2019-12-29 15:46:07 +00:00
|
|
|
send_sigcont ? L"Continue" : L"Start", job_id_, pgid, command_wcstr(),
|
2019-05-18 20:47:27 +00:00
|
|
|
is_completed() ? L"COMPLETED" : L"UNCOMPLETED",
|
2019-05-27 21:52:48 +00:00
|
|
|
parser.libdata().is_interactive ? L"INTERACTIVE" : L"NON-INTERACTIVE");
|
2016-05-03 04:41:17 +00:00
|
|
|
|
2018-10-02 22:10:14 +00:00
|
|
|
// Make sure we retake control of the terminal before leaving this function.
|
|
|
|
bool term_transferred = false;
|
|
|
|
cleanup_t take_term_back([&]() {
|
2019-04-07 03:00:52 +00:00
|
|
|
if (term_transferred && reclaim_foreground_pgrp) {
|
2019-09-02 00:08:51 +00:00
|
|
|
// Only restore terminal attrs if we're continuing a job. See:
|
|
|
|
// https://github.com/fish-shell/fish-shell/issues/121
|
|
|
|
// https://github.com/fish-shell/fish-shell/issues/2114
|
|
|
|
terminal_return_from_job(this, send_sigcont);
|
2018-10-29 00:09:57 +00:00
|
|
|
}
|
|
|
|
});
|
2018-10-02 22:10:14 +00:00
|
|
|
|
2018-10-02 17:30:23 +00:00
|
|
|
if (!is_completed()) {
|
2019-06-29 22:58:36 +00:00
|
|
|
int transfer = terminal_maybe_give_to_job(this, send_sigcont);
|
|
|
|
if (transfer < 0) {
|
|
|
|
// terminal_maybe_give_to_job prints an error.
|
|
|
|
return;
|
2012-11-19 00:30:30 +00:00
|
|
|
}
|
2019-06-29 22:58:36 +00:00
|
|
|
term_transferred = (transfer > 0);
|
2012-11-19 00:30:30 +00:00
|
|
|
|
2018-10-02 22:10:14 +00:00
|
|
|
// If both requested and necessary, send the job a continue signal.
|
2018-10-02 20:10:42 +00:00
|
|
|
if (send_sigcont) {
|
2018-10-02 22:10:14 +00:00
|
|
|
// This code used to check for JOB_CONTROL to decide between using killpg to signal all
|
|
|
|
// processes in the group or iterating over each process in the group and sending the
|
|
|
|
// signal individually. job_t::signal() does the same, but uses the shell's own pgroup
|
|
|
|
// to make that distinction.
|
2018-10-02 20:10:42 +00:00
|
|
|
if (!signal(SIGCONT)) {
|
|
|
|
debug(2, "Failed to send SIGCONT to any processes in pgroup %d!", pgid);
|
2018-10-02 22:10:14 +00:00
|
|
|
// This returns without bubbling up the error. Presumably that is OK.
|
2018-10-02 20:10:42 +00:00
|
|
|
return;
|
2012-11-19 00:30:30 +00:00
|
|
|
}
|
2018-10-02 22:10:14 +00:00
|
|
|
|
|
|
|
// reset the status of each process instance
|
|
|
|
for (auto &p : processes) {
|
|
|
|
p->stopped = false;
|
|
|
|
}
|
2012-11-19 00:30:30 +00:00
|
|
|
}
|
|
|
|
|
2018-10-02 17:30:23 +00:00
|
|
|
if (is_foreground()) {
|
2019-02-01 09:58:06 +00:00
|
|
|
// Wait for the status of our own job to change.
|
2018-10-02 17:30:23 +00:00
|
|
|
while (!reader_exit_forced() && !is_stopped() && !is_completed()) {
|
2019-05-05 05:12:31 +00:00
|
|
|
process_mark_finished_children(parser, true);
|
2012-11-19 00:30:30 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-02-13 23:17:18 +00:00
|
|
|
if (is_foreground() && is_completed()) {
|
|
|
|
// Set $status only if we are in the foreground and the last process in the job has
|
|
|
|
// finished and is not a short-circuited builtin.
|
|
|
|
auto &p = processes.back();
|
2019-02-25 18:05:42 +00:00
|
|
|
if (p->status.normal_exited() || p->status.signal_exited()) {
|
2019-05-12 21:00:44 +00:00
|
|
|
parser.set_last_statuses(get_statuses());
|
2012-11-19 00:30:30 +00:00
|
|
|
}
|
|
|
|
}
|
2005-09-20 13:26:39 +00:00
|
|
|
}
|
|
|
|
|
2019-05-05 05:12:31 +00:00
|
|
|
void proc_sanity_check(const parser_t &parser) {
|
2019-11-19 02:34:50 +00:00
|
|
|
const job_t *fg_job = nullptr;
|
2012-11-19 00:30:30 +00:00
|
|
|
|
2019-05-05 05:12:31 +00:00
|
|
|
for (const auto &j : parser.jobs()) {
|
2018-10-02 17:30:23 +00:00
|
|
|
if (!j->is_constructed()) continue;
|
2012-11-19 00:30:30 +00:00
|
|
|
|
2016-05-03 04:41:17 +00:00
|
|
|
// More than one foreground job?
|
2018-10-02 17:30:23 +00:00
|
|
|
if (j->is_foreground() && !(j->is_stopped() || j->is_completed())) {
|
2017-01-24 23:14:56 +00:00
|
|
|
if (fg_job) {
|
2019-05-30 09:54:09 +00:00
|
|
|
FLOGF(error, _(L"More than one job in foreground: job 1: '%ls' job 2: '%ls'"),
|
2019-06-04 03:30:48 +00:00
|
|
|
fg_job->command_wcstr(), j->command_wcstr());
|
2012-11-19 00:30:30 +00:00
|
|
|
sanity_lose();
|
|
|
|
}
|
2018-12-31 03:25:16 +00:00
|
|
|
fg_job = j.get();
|
2012-11-19 00:30:30 +00:00
|
|
|
}
|
|
|
|
|
2017-01-23 17:28:34 +00:00
|
|
|
for (const process_ptr_t &p : j->processes) {
|
2016-05-03 04:41:17 +00:00
|
|
|
// Internal block nodes do not have argv - see issue #1545.
|
2019-03-24 19:29:25 +00:00
|
|
|
bool null_ok = (p->type == process_type_t::block_node);
|
2014-07-12 18:01:00 +00:00
|
|
|
validate_pointer(p->get_argv(), _(L"Process argument list"), null_ok);
|
|
|
|
validate_pointer(p->argv0(), _(L"Process name"), null_ok);
|
2012-11-19 00:30:30 +00:00
|
|
|
|
2016-05-03 04:41:17 +00:00
|
|
|
if ((p->stopped & (~0x00000001)) != 0) {
|
2019-05-30 09:54:09 +00:00
|
|
|
FLOGF(error, _(L"Job '%ls', process '%ls' has inconsistent state \'stopped\'=%d"),
|
2019-06-04 03:30:48 +00:00
|
|
|
j->command_wcstr(), p->argv0(), p->stopped);
|
2012-11-19 00:30:30 +00:00
|
|
|
sanity_lose();
|
|
|
|
}
|
|
|
|
|
2016-05-03 04:41:17 +00:00
|
|
|
if ((p->completed & (~0x00000001)) != 0) {
|
2019-05-30 09:54:09 +00:00
|
|
|
FLOGF(error, _(L"Job '%ls', process '%ls' has inconsistent state \'completed\'=%d"),
|
2019-06-04 03:30:48 +00:00
|
|
|
j->command_wcstr(), p->argv0(), p->completed);
|
2012-11-19 00:30:30 +00:00
|
|
|
sanity_lose();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2005-09-20 13:26:39 +00:00
|
|
|
}
|
|
|
|
|
2019-04-30 03:58:58 +00:00
|
|
|
void proc_wait_any(parser_t &parser) {
|
2019-03-03 00:45:15 +00:00
|
|
|
ASSERT_IS_MAIN_THREAD();
|
2019-05-05 05:12:31 +00:00
|
|
|
process_mark_finished_children(parser, true /* block_ok */);
|
2019-05-27 21:52:48 +00:00
|
|
|
process_clean_after_marking(parser, parser.libdata().is_interactive);
|
2017-10-22 07:10:23 +00:00
|
|
|
}
|
2018-10-20 18:58:51 +00:00
|
|
|
|
2019-05-05 05:12:31 +00:00
|
|
|
void hup_background_jobs(const parser_t &parser) {
|
|
|
|
// TODO: we should probably hup all jobs across all parsers here.
|
|
|
|
for (const auto &j : parser.jobs()) {
|
2018-10-20 18:58:51 +00:00
|
|
|
// Make sure we don't try to SIGHUP the calling builtin
|
2019-06-23 19:39:29 +00:00
|
|
|
if (j->pgid == INVALID_PID || !j->wants_job_control()) {
|
2018-10-20 18:58:51 +00:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!j->is_completed()) {
|
|
|
|
if (j->is_stopped()) {
|
|
|
|
j->signal(SIGCONT);
|
|
|
|
}
|
|
|
|
j->signal(SIGHUP);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2018-09-09 08:36:21 +00:00
|
|
|
|
|
|
|
static std::atomic<bool> s_is_within_fish_initialization{false};
|
|
|
|
|
|
|
|
void set_is_within_fish_initialization(bool flag) { s_is_within_fish_initialization.store(flag); }
|
|
|
|
|
|
|
|
bool is_within_fish_initialization() { return s_is_within_fish_initialization.load(); }
|