2016-05-03 04:41:17 +00:00
|
|
|
// Utilities for keeping track of jobs, processes and subshells, as well as signal handling
|
|
|
|
// functions for tracking children. These functions do not themselves launch new processes, the exec
|
|
|
|
// library will call proc to create representations of the running jobs as needed.
|
|
|
|
//
|
|
|
|
// Some of the code in this file is based on code from the Glibc manual.
|
2016-04-21 06:00:54 +00:00
|
|
|
// IWYU pragma: no_include <__bit_reference>
|
2005-09-20 13:26:39 +00:00
|
|
|
#include "config.h"
|
|
|
|
|
2018-09-09 09:25:51 +00:00
|
|
|
#include <atomic>
|
2016-05-03 04:41:17 +00:00
|
|
|
#include <errno.h>
|
|
|
|
#include <signal.h>
|
2005-09-20 13:26:39 +00:00
|
|
|
#include <stdio.h>
|
|
|
|
#include <sys/wait.h>
|
2016-05-03 04:41:17 +00:00
|
|
|
#include <unistd.h>
|
|
|
|
#include <wchar.h>
|
2015-07-25 15:14:25 +00:00
|
|
|
#include <wctype.h>
|
2017-02-14 04:37:27 +00:00
|
|
|
|
2006-01-19 12:22:07 +00:00
|
|
|
#if HAVE_TERM_H
|
2018-02-04 08:59:37 +00:00
|
|
|
#include <curses.h>
|
2005-09-20 13:26:39 +00:00
|
|
|
#include <term.h>
|
2006-01-19 12:22:07 +00:00
|
|
|
#elif HAVE_NCURSES_TERM_H
|
|
|
|
#include <ncurses/term.h>
|
|
|
|
#endif
|
2018-02-04 08:59:37 +00:00
|
|
|
#include <termios.h>
|
2006-07-30 20:26:59 +00:00
|
|
|
#ifdef HAVE_SIGINFO_H
|
|
|
|
#include <siginfo.h>
|
|
|
|
#endif
|
2006-08-09 22:26:05 +00:00
|
|
|
#ifdef HAVE_SYS_SELECT_H
|
|
|
|
#include <sys/select.h>
|
|
|
|
#endif
|
2016-04-21 06:00:54 +00:00
|
|
|
#include <sys/time.h> // IWYU pragma: keep
|
2016-05-03 04:41:17 +00:00
|
|
|
#include <sys/types.h>
|
2017-02-14 04:37:27 +00:00
|
|
|
|
2016-04-21 06:00:54 +00:00
|
|
|
#include <algorithm> // IWYU pragma: keep
|
2017-02-14 04:37:27 +00:00
|
|
|
#include <memory>
|
2018-02-19 02:39:03 +00:00
|
|
|
#include <utility>
|
2017-02-14 04:37:27 +00:00
|
|
|
#include <vector>
|
2006-08-09 22:26:05 +00:00
|
|
|
|
2005-09-20 13:26:39 +00:00
|
|
|
#include "common.h"
|
2005-10-11 19:23:43 +00:00
|
|
|
#include "event.h"
|
2016-05-03 04:41:17 +00:00
|
|
|
#include "fallback.h" // IWYU pragma: keep
|
2016-04-21 06:00:54 +00:00
|
|
|
#include "io.h"
|
2016-05-03 04:41:17 +00:00
|
|
|
#include "output.h"
|
2016-04-21 06:00:54 +00:00
|
|
|
#include "parse_tree.h"
|
2016-05-03 04:41:17 +00:00
|
|
|
#include "parser.h"
|
|
|
|
#include "proc.h"
|
|
|
|
#include "reader.h"
|
|
|
|
#include "sanity.h"
|
|
|
|
#include "signal.h"
|
|
|
|
#include "util.h"
|
|
|
|
#include "wutil.h" // IWYU pragma: keep
|
2005-09-20 13:26:39 +00:00
|
|
|
|
2016-05-03 04:41:17 +00:00
|
|
|
/// Size of buffer for reading buffered output.
|
2005-09-20 13:26:39 +00:00
|
|
|
#define BUFFER_SIZE 4096
|
|
|
|
|
2016-05-03 04:41:17 +00:00
|
|
|
/// Status of last process to exit.
|
|
|
|
static int last_status = 0;
|
2005-09-20 13:26:39 +00:00
|
|
|
|
2018-09-18 13:56:36 +00:00
|
|
|
/// The signals that signify crashes to us.
|
2018-09-29 04:58:44 +00:00
|
|
|
static const int crashsignals[] = {SIGABRT, SIGBUS, SIGFPE, SIGILL, SIGSEGV, SIGSYS};
|
2018-09-18 13:56:36 +00:00
|
|
|
|
2018-02-19 02:33:04 +00:00
|
|
|
bool job_list_is_empty() {
|
2012-02-28 02:43:24 +00:00
|
|
|
ASSERT_IS_MAIN_THREAD();
|
|
|
|
return parser_t::principal_parser().job_list().empty();
|
|
|
|
}
|
|
|
|
|
2016-05-03 04:41:17 +00:00
|
|
|
void job_iterator_t::reset() {
|
2012-02-28 02:43:24 +00:00
|
|
|
this->current = job_list->begin();
|
|
|
|
this->end = job_list->end();
|
|
|
|
}
|
2012-01-30 00:36:21 +00:00
|
|
|
|
2016-05-03 04:41:17 +00:00
|
|
|
job_iterator_t::job_iterator_t(job_list_t &jobs) : job_list(&jobs) { this->reset(); }
|
2012-01-30 00:36:21 +00:00
|
|
|
|
2016-05-03 04:41:17 +00:00
|
|
|
job_iterator_t::job_iterator_t() : job_list(&parser_t::principal_parser().job_list()) {
|
2013-11-29 21:31:18 +00:00
|
|
|
ASSERT_IS_MAIN_THREAD();
|
2012-02-28 02:43:24 +00:00
|
|
|
this->reset();
|
|
|
|
}
|
|
|
|
|
2016-05-03 04:41:17 +00:00
|
|
|
size_t job_iterator_t::count() const { return this->job_list->size(); }
|
2014-11-03 18:56:16 +00:00
|
|
|
|
2017-06-20 04:05:34 +00:00
|
|
|
bool is_interactive_session = false;
|
|
|
|
bool is_subshell = false;
|
|
|
|
bool is_block = false;
|
|
|
|
bool is_breakpoint = false;
|
|
|
|
bool is_login = false;
|
|
|
|
int is_event = false;
|
2006-01-30 17:54:26 +00:00
|
|
|
int job_control_mode = JOB_CONTROL_INTERACTIVE;
|
2016-05-03 04:41:17 +00:00
|
|
|
int no_exec = 0;
|
2006-03-18 01:04:59 +00:00
|
|
|
|
2012-02-26 02:54:49 +00:00
|
|
|
static int is_interactive = -1;
|
|
|
|
|
2012-03-31 22:33:34 +00:00
|
|
|
static bool proc_had_barrier = false;
|
|
|
|
|
2018-02-19 02:33:04 +00:00
|
|
|
bool shell_is_interactive() {
|
2012-02-26 02:54:49 +00:00
|
|
|
ASSERT_IS_MAIN_THREAD();
|
2016-05-15 03:35:54 +00:00
|
|
|
// is_interactive is statically initialized to -1. Ensure it has been dynamically set
|
|
|
|
// before we're called.
|
|
|
|
assert(is_interactive != -1);
|
2014-01-01 23:29:56 +00:00
|
|
|
return is_interactive > 0;
|
2012-02-26 02:54:49 +00:00
|
|
|
}
|
2005-09-20 13:26:39 +00:00
|
|
|
|
2016-05-03 04:41:17 +00:00
|
|
|
bool get_proc_had_barrier() {
|
2012-03-31 22:33:34 +00:00
|
|
|
ASSERT_IS_MAIN_THREAD();
|
|
|
|
return proc_had_barrier;
|
|
|
|
}
|
|
|
|
|
2016-05-03 04:41:17 +00:00
|
|
|
void set_proc_had_barrier(bool flag) {
|
2012-03-31 22:33:34 +00:00
|
|
|
ASSERT_IS_MAIN_THREAD();
|
|
|
|
proc_had_barrier = flag;
|
|
|
|
}
|
|
|
|
|
2016-05-03 04:41:17 +00:00
|
|
|
/// The event variable used to send all process event.
|
2012-02-09 03:02:25 +00:00
|
|
|
static event_t event(0);
|
2005-12-11 22:21:01 +00:00
|
|
|
|
2016-05-03 04:41:17 +00:00
|
|
|
/// A stack containing the values of is_interactive. Used by proc_push_interactive and
|
|
|
|
/// proc_pop_interactive.
|
2012-07-17 19:47:01 +00:00
|
|
|
static std::vector<int> interactive_stack;
|
2005-10-14 11:40:33 +00:00
|
|
|
|
2016-05-03 04:41:17 +00:00
|
|
|
void proc_init() { proc_push_interactive(0); }
|
2005-10-14 11:40:33 +00:00
|
|
|
|
2016-05-03 04:41:17 +00:00
|
|
|
/// Remove job from list of jobs.
|
|
|
|
static int job_remove(job_t *j) {
|
2012-02-28 02:43:24 +00:00
|
|
|
ASSERT_IS_MAIN_THREAD();
|
|
|
|
return parser_t::principal_parser().job_remove(j);
|
2012-01-30 00:36:21 +00:00
|
|
|
}
|
|
|
|
|
2018-10-02 17:30:23 +00:00
|
|
|
void job_t::promote() {
|
2012-02-28 02:43:24 +00:00
|
|
|
ASSERT_IS_MAIN_THREAD();
|
2018-10-02 17:30:23 +00:00
|
|
|
parser_t::principal_parser().job_promote(this);
|
2005-09-20 13:26:39 +00:00
|
|
|
}
|
|
|
|
|
2016-05-03 04:41:17 +00:00
|
|
|
void proc_destroy() {
|
2012-02-28 02:43:24 +00:00
|
|
|
job_list_t &jobs = parser_t::principal_parser().job_list();
|
2016-05-03 04:41:17 +00:00
|
|
|
while (!jobs.empty()) {
|
2017-01-26 22:47:32 +00:00
|
|
|
job_t *job = jobs.front().get();
|
2012-11-19 00:30:30 +00:00
|
|
|
debug(2, L"freeing leaked job %ls", job->command_wcstr());
|
2017-01-26 22:47:32 +00:00
|
|
|
job_remove(job);
|
2012-11-19 00:30:30 +00:00
|
|
|
}
|
2005-09-20 13:26:39 +00:00
|
|
|
}
|
|
|
|
|
2016-05-03 04:41:17 +00:00
|
|
|
void proc_set_last_status(int s) {
|
2013-04-07 21:38:57 +00:00
|
|
|
ASSERT_IS_MAIN_THREAD();
|
2012-11-19 00:30:30 +00:00
|
|
|
last_status = s;
|
2005-09-20 13:26:39 +00:00
|
|
|
}
|
|
|
|
|
2016-05-03 04:41:17 +00:00
|
|
|
int proc_get_last_status() { return last_status; }
|
2005-09-20 13:26:39 +00:00
|
|
|
|
2016-05-03 04:41:17 +00:00
|
|
|
// Basic thread safe job IDs. The vector consumed_job_ids has a true value wherever the job ID
|
|
|
|
// corresponding to that slot is in use. The job ID corresponding to slot 0 is 1.
|
2017-01-29 21:03:54 +00:00
|
|
|
static owning_lock<std::vector<bool>> locked_consumed_job_ids;
|
2012-02-28 02:43:24 +00:00
|
|
|
|
2018-02-19 02:33:04 +00:00
|
|
|
job_id_t acquire_job_id() {
|
2018-09-01 20:11:42 +00:00
|
|
|
auto consumed_job_ids = locked_consumed_job_ids.acquire();
|
2012-11-19 00:30:30 +00:00
|
|
|
|
2016-05-03 04:41:17 +00:00
|
|
|
// Find the index of the first 0 slot.
|
2018-09-01 20:11:42 +00:00
|
|
|
auto slot = std::find(consumed_job_ids->begin(), consumed_job_ids->end(), false);
|
|
|
|
if (slot != consumed_job_ids->end()) {
|
2016-05-03 04:41:17 +00:00
|
|
|
// We found a slot. Note that slot 0 corresponds to job ID 1.
|
2012-02-28 02:43:24 +00:00
|
|
|
*slot = true;
|
2018-09-01 20:11:42 +00:00
|
|
|
return (job_id_t)(slot - consumed_job_ids->begin() + 1);
|
2012-02-28 02:43:24 +00:00
|
|
|
}
|
2016-05-04 22:19:47 +00:00
|
|
|
|
|
|
|
// We did not find a slot; create a new slot. The size of the vector is now the job ID
|
|
|
|
// (since it is one larger than the slot).
|
2018-09-01 20:11:42 +00:00
|
|
|
consumed_job_ids->push_back(true);
|
|
|
|
return (job_id_t)consumed_job_ids->size();
|
2012-02-28 02:43:24 +00:00
|
|
|
}
|
2006-03-10 13:38:09 +00:00
|
|
|
|
2016-05-03 04:41:17 +00:00
|
|
|
void release_job_id(job_id_t jid) {
|
2012-02-28 02:43:24 +00:00
|
|
|
assert(jid > 0);
|
2018-09-01 20:11:42 +00:00
|
|
|
auto consumed_job_ids = locked_consumed_job_ids.acquire();
|
|
|
|
size_t slot = (size_t)(jid - 1), count = consumed_job_ids->size();
|
2012-11-19 00:30:30 +00:00
|
|
|
|
2016-05-03 04:41:17 +00:00
|
|
|
// Make sure this slot is within our vector and is currently set to consumed.
|
2012-02-28 02:43:24 +00:00
|
|
|
assert(slot < count);
|
2018-09-01 20:11:42 +00:00
|
|
|
assert(consumed_job_ids->at(slot) == true);
|
2012-11-19 00:30:30 +00:00
|
|
|
|
2016-05-03 04:41:17 +00:00
|
|
|
// Clear it and then resize the vector to eliminate unused trailing job IDs.
|
2018-09-01 20:11:42 +00:00
|
|
|
consumed_job_ids->at(slot) = false;
|
2016-05-03 04:41:17 +00:00
|
|
|
while (count--) {
|
2018-09-01 20:11:42 +00:00
|
|
|
if (consumed_job_ids->at(count)) break;
|
2012-02-28 02:43:24 +00:00
|
|
|
}
|
2018-09-01 20:11:42 +00:00
|
|
|
consumed_job_ids->resize(count + 1);
|
2005-09-20 13:26:39 +00:00
|
|
|
}
|
|
|
|
|
2018-10-02 17:30:23 +00:00
|
|
|
job_t *job_t::from_job_id(job_id_t id) {
|
2012-02-28 02:43:24 +00:00
|
|
|
ASSERT_IS_MAIN_THREAD();
|
|
|
|
return parser_t::principal_parser().job_get(id);
|
2005-09-20 13:26:39 +00:00
|
|
|
}
|
|
|
|
|
2018-10-02 17:30:23 +00:00
|
|
|
job_t *job_t::from_pid(pid_t pid) {
|
2012-02-28 02:43:24 +00:00
|
|
|
ASSERT_IS_MAIN_THREAD();
|
|
|
|
return parser_t::principal_parser().job_get_from_pid(pid);
|
2005-09-20 13:26:39 +00:00
|
|
|
}
|
|
|
|
|
2016-05-03 04:41:17 +00:00
|
|
|
/// Return true if all processes in the job have stopped or completed.
|
2018-10-02 17:30:23 +00:00
|
|
|
bool job_t::is_stopped() const {
|
|
|
|
for (const process_ptr_t &p : processes) {
|
2016-05-03 04:41:17 +00:00
|
|
|
if (!p->completed && !p->stopped) {
|
2018-10-02 17:30:23 +00:00
|
|
|
return false;
|
2012-11-19 00:30:30 +00:00
|
|
|
}
|
|
|
|
}
|
2018-10-02 17:30:23 +00:00
|
|
|
return true;
|
2005-09-20 13:26:39 +00:00
|
|
|
}
|
|
|
|
|
2016-05-03 04:41:17 +00:00
|
|
|
/// Return true if the last processes in the job has completed.
|
2018-10-02 17:30:23 +00:00
|
|
|
bool job_t::is_completed() const {
|
|
|
|
assert(!processes.empty());
|
|
|
|
for (const process_ptr_t &p : processes) {
|
2016-05-03 04:41:17 +00:00
|
|
|
if (!p->completed) {
|
2018-10-02 17:30:23 +00:00
|
|
|
return false;
|
2013-06-16 09:53:14 +00:00
|
|
|
}
|
|
|
|
}
|
2018-10-02 17:30:23 +00:00
|
|
|
return true;
|
2005-09-20 13:26:39 +00:00
|
|
|
}
|
|
|
|
|
2018-11-04 09:11:12 +00:00
|
|
|
bool job_t::job_chain_is_fully_constructed() const {
|
|
|
|
const job_t *cursor = this;
|
|
|
|
while (cursor) {
|
|
|
|
if (!cursor->is_constructed()) return false;
|
|
|
|
cursor = cursor->get_parent().get();
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2018-10-02 17:30:23 +00:00
|
|
|
void job_t::set_flag(job_flag_t flag, bool set) { this->flags.set(flag, set); }
|
2006-10-25 20:47:59 +00:00
|
|
|
|
2018-10-02 17:30:23 +00:00
|
|
|
bool job_t::get_flag(job_flag_t flag) const { return this->flags.get(flag); }
|
2006-10-25 20:47:59 +00:00
|
|
|
|
2018-10-02 20:10:42 +00:00
|
|
|
bool job_t::signal(int signal) {
|
|
|
|
// Presumably we are distinguishing between the two cases below because we do
|
|
|
|
// not want to send ourselves the signal in question in case the job shares
|
|
|
|
// a pgid with the shell.
|
|
|
|
|
|
|
|
if (pgid != getpgrp()) {
|
|
|
|
if (killpg(pgid, signal) == -1) {
|
2018-10-20 18:58:51 +00:00
|
|
|
char buffer[512];
|
|
|
|
sprintf(buffer, "killpg(%d, %s)", pgid, strsignal(signal));
|
|
|
|
wperror(str2wcstring(buffer).c_str());
|
2018-10-02 20:10:42 +00:00
|
|
|
return false;
|
|
|
|
}
|
2016-05-03 04:41:17 +00:00
|
|
|
} else {
|
2018-10-02 20:10:42 +00:00
|
|
|
for (const auto &p : processes) {
|
|
|
|
if (!p->completed && p->pid && kill(p->pid, signal) == -1) {
|
|
|
|
return false;
|
2012-11-19 00:30:30 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-10-02 20:10:42 +00:00
|
|
|
return true;
|
2006-11-20 13:12:24 +00:00
|
|
|
}
|
|
|
|
|
2018-09-29 04:13:13 +00:00
|
|
|
static void mark_job_complete(const job_t *j) {
|
|
|
|
for (auto &p : j->processes) {
|
|
|
|
p->completed = 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-03 04:41:17 +00:00
|
|
|
/// Store the status of the process pid that was returned by waitpid.
|
2016-10-09 21:38:26 +00:00
|
|
|
static void mark_process_status(process_t *p, int status) {
|
2016-05-03 04:41:17 +00:00
|
|
|
// debug( 0, L"Process %ls %ls", p->argv[0], WIFSTOPPED (status)?L"stopped":(WIFEXITED( status
|
|
|
|
// )?L"exited":(WIFSIGNALED( status )?L"signaled to exit":L"BLARGH")) );
|
2012-11-19 00:30:30 +00:00
|
|
|
p->status = status;
|
|
|
|
|
2016-05-03 04:41:17 +00:00
|
|
|
if (WIFSTOPPED(status)) {
|
2012-11-19 00:30:30 +00:00
|
|
|
p->stopped = 1;
|
2016-05-03 04:41:17 +00:00
|
|
|
} else if (WIFSIGNALED(status) || WIFEXITED(status)) {
|
2012-11-19 00:30:30 +00:00
|
|
|
p->completed = 1;
|
2016-05-03 04:41:17 +00:00
|
|
|
} else {
|
|
|
|
// This should never be reached.
|
2012-11-19 00:30:30 +00:00
|
|
|
p->completed = 1;
|
2017-01-03 05:11:53 +00:00
|
|
|
debug(1, "Process %ld exited abnormally", (long)p->pid);
|
2012-11-19 00:30:30 +00:00
|
|
|
}
|
2005-09-20 13:26:39 +00:00
|
|
|
}
|
|
|
|
|
2018-11-04 07:58:44 +00:00
|
|
|
void job_mark_process_as_failed(const std::shared_ptr<job_t> &job, const process_t *failed_proc) {
|
2016-05-03 04:41:17 +00:00
|
|
|
// The given process failed to even lift off (e.g. posix_spawn failed) and so doesn't have a
|
2017-01-23 17:28:34 +00:00
|
|
|
// valid pid. Mark it and everything after it as dead.
|
|
|
|
bool found = false;
|
|
|
|
for (process_ptr_t &p : job->processes) {
|
|
|
|
found = found || (p.get() == failed_proc);
|
|
|
|
if (found) {
|
|
|
|
p->completed = true;
|
|
|
|
}
|
2013-01-30 10:22:38 +00:00
|
|
|
}
|
2012-10-29 08:45:51 +00:00
|
|
|
}
|
|
|
|
|
2016-05-03 04:41:17 +00:00
|
|
|
/// Handle status update for child \c pid.
|
|
|
|
///
|
|
|
|
/// \param pid the pid of the process whose status changes
|
|
|
|
/// \param status the status as returned by wait
|
|
|
|
static void handle_child_status(pid_t pid, int status) {
|
2017-01-23 17:28:34 +00:00
|
|
|
job_t *j = NULL;
|
2017-01-26 21:39:19 +00:00
|
|
|
const process_t *found_proc = NULL;
|
2005-09-20 13:26:39 +00:00
|
|
|
|
2012-01-30 00:36:21 +00:00
|
|
|
job_iterator_t jobs;
|
2016-05-03 04:41:17 +00:00
|
|
|
while (!found_proc && (j = jobs.next())) {
|
2017-01-23 17:28:34 +00:00
|
|
|
process_t *prev = NULL;
|
|
|
|
for (process_ptr_t &p : j->processes) {
|
2016-05-03 04:41:17 +00:00
|
|
|
if (pid == p->pid) {
|
2017-01-23 17:28:34 +00:00
|
|
|
mark_process_status(p.get(), status);
|
2017-01-26 21:39:19 +00:00
|
|
|
found_proc = p.get();
|
2012-11-19 00:30:30 +00:00
|
|
|
break;
|
|
|
|
}
|
2017-01-23 17:28:34 +00:00
|
|
|
prev = p.get();
|
2012-11-19 00:30:30 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-11-04 02:42:25 +00:00
|
|
|
// If the child process was not killed by a signal or other than SIGINT or SIGQUIT we're done.
|
|
|
|
if (!WIFSIGNALED(status) || (WTERMSIG(status) != SIGINT && WTERMSIG(status) != SIGQUIT)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (is_interactive_session) {
|
|
|
|
// In an interactive session, tell the principal parser to skip all blocks we're executing
|
|
|
|
// so control-C returns control to the user.
|
2017-01-26 21:39:19 +00:00
|
|
|
if (found_proc) parser_t::skip_all_blocks();
|
2016-11-04 02:42:25 +00:00
|
|
|
} else {
|
|
|
|
// Deliver the SIGINT or SIGQUIT signal to ourself since we're not interactive.
|
|
|
|
struct sigaction act;
|
|
|
|
sigemptyset(&act.sa_mask);
|
|
|
|
act.sa_flags = 0;
|
|
|
|
act.sa_handler = SIG_DFL;
|
|
|
|
sigaction(SIGINT, &act, 0);
|
|
|
|
sigaction(SIGQUIT, &act, 0);
|
|
|
|
kill(getpid(), WTERMSIG(status));
|
2012-11-19 00:30:30 +00:00
|
|
|
}
|
2005-09-20 13:26:39 +00:00
|
|
|
}
|
|
|
|
|
2018-02-11 03:16:35 +00:00
|
|
|
process_t::process_t() {}
|
2012-11-19 00:30:30 +00:00
|
|
|
|
2018-11-04 07:58:44 +00:00
|
|
|
job_t::job_t(job_id_t jobid, io_chain_t bio, std::shared_ptr<job_t> parent)
|
|
|
|
: block_io(std::move(bio)),
|
|
|
|
parent_job(std::move(parent)),
|
|
|
|
pgid(INVALID_PID),
|
|
|
|
tmodes(),
|
|
|
|
job_id(jobid),
|
|
|
|
flags{} {}
|
2012-11-19 00:30:30 +00:00
|
|
|
|
2017-01-27 04:00:43 +00:00
|
|
|
job_t::~job_t() { release_job_id(job_id); }
|
2005-09-20 13:26:39 +00:00
|
|
|
|
2016-05-03 04:41:17 +00:00
|
|
|
/// Return all the IO redirections. Start with the block IO, then walk over the processes.
|
|
|
|
io_chain_t job_t::all_io_redirections() const {
|
Big fat refactoring of how redirections work. In fish 1.x and 2.0.0, the redirections for a process were flattened into a big list associated with the job, so there was no way to tell which redirections applied to each process. Each process therefore got all the redirections associated with the job. See https://github.com/fish-shell/fish-shell/issues/877 for how this could manifest.
With this change, jobs only track their block-level redirections. Process level redirections are correctly associated with the process, and at exec time we stitch them together (block, pipe, and process redirects).
This fixes the weird issues where redirects bleed across pipelines (like #877), and also allows us to play with the order in which redirections are applied, since the final list is constructed right before it's needed. This lets us put pipes after block level redirections but before process level redirections, so that a 2>&1-type redirection gets picked up after the pipe, i.e. it should fix https://github.com/fish-shell/fish-shell/issues/110
This is a significant change. The tests all pass. Cross your fingers.
2013-08-19 23:16:41 +00:00
|
|
|
io_chain_t result = this->block_io;
|
2017-01-23 17:28:34 +00:00
|
|
|
for (const process_ptr_t &p : this->processes) {
|
Big fat refactoring of how redirections work. In fish 1.x and 2.0.0, the redirections for a process were flattened into a big list associated with the job, so there was no way to tell which redirections applied to each process. Each process therefore got all the redirections associated with the job. See https://github.com/fish-shell/fish-shell/issues/877 for how this could manifest.
With this change, jobs only track their block-level redirections. Process level redirections are correctly associated with the process, and at exec time we stitch them together (block, pipe, and process redirects).
This fixes the weird issues where redirects bleed across pipelines (like #877), and also allows us to play with the order in which redirections are applied, since the final list is constructed right before it's needed. This lets us put pipes after block level redirections but before process level redirections, so that a 2>&1-type redirection gets picked up after the pipe, i.e. it should fix https://github.com/fish-shell/fish-shell/issues/110
This is a significant change. The tests all pass. Cross your fingers.
2013-08-19 23:16:41 +00:00
|
|
|
result.append(p->io_chain());
|
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2014-10-25 23:51:25 +00:00
|
|
|
typedef unsigned int process_generation_count_t;
|
2005-09-20 13:26:39 +00:00
|
|
|
|
2018-11-18 21:18:18 +00:00
|
|
|
/// A list of pids/pgids that have been disowned. They are kept around until either they exit or
|
|
|
|
/// we exit. Poll these from time-to-time to prevent zombie processes from happening (#5342).
|
|
|
|
static std::vector<pid_t> s_disowned_pids;
|
|
|
|
|
|
|
|
void add_disowned_pgid(pid_t pgid) {
|
2018-12-30 16:11:20 +00:00
|
|
|
// NEVER add our own (or an invalid) pgid as they are not unique to only
|
|
|
|
// one job, and may result in a deadlock if we attempt the wait.
|
|
|
|
if (pgid != getpgrp() && pgid > 0) {
|
|
|
|
// waitpid(2) is signalled to wait on a process group rather than a
|
|
|
|
// process id by using the negative of its value.
|
2018-12-30 15:04:57 +00:00
|
|
|
s_disowned_pids.push_back(pgid * -1);
|
|
|
|
}
|
2018-11-18 21:18:18 +00:00
|
|
|
}
|
|
|
|
|
2018-10-11 23:28:06 +00:00
|
|
|
/// A static value tracking how many SIGCHLDs we have seen, which is used in a heurstic to
|
|
|
|
/// determine if we should call waitpid() at all in `process_mark_finished_children`.
|
2016-11-03 04:54:57 +00:00
|
|
|
static volatile process_generation_count_t s_sigchld_generation_cnt = 0;
|
2012-11-19 00:30:30 +00:00
|
|
|
|
2018-10-02 15:46:57 +00:00
|
|
|
/// See if any children of a fully constructed job have exited or been killed, and mark them
|
|
|
|
/// accordingly. We cannot reap just any child that's exited, (as in, `waitpid(-1,…`) since
|
|
|
|
/// that may reap a pgrp leader that has exited but in a job with another process that has yet to
|
2018-10-11 23:28:06 +00:00
|
|
|
/// launch and join its pgrp (#5219).
|
2018-10-02 15:46:57 +00:00
|
|
|
/// \param block_on_fg when true, blocks waiting for the foreground job to finish.
|
|
|
|
/// \return whether the operation completed without incident
|
|
|
|
static bool process_mark_finished_children(bool block_on_fg) {
|
2014-10-25 23:51:25 +00:00
|
|
|
ASSERT_IS_MAIN_THREAD();
|
2016-05-03 04:41:17 +00:00
|
|
|
|
2018-10-11 23:28:06 +00:00
|
|
|
// We can't always use SIGCHLD to determine if waitpid() should be called since it is not
|
|
|
|
// strictly one-SIGCHLD-per-one-child-exited (i.e. multiple exits can share a SIGCHLD call) and
|
|
|
|
// we a) return immediately the first time a dead child is reaped, b) explicitly skip over jobs
|
|
|
|
// that aren't yet fully constructed, so it's possible that we can get SIGCHLD and even find a
|
|
|
|
// killed child in the jobs we are reaping, but also have an exited child process in a job that
|
|
|
|
// hasn't been fully constructed yet - which means we can end up never knowing about the exited
|
|
|
|
// child process in that job if we use SIGCHLD count as the only metric for whether or not
|
|
|
|
// waitpid() is called.
|
|
|
|
// Without this optimization, the slowdown caused by calling waitpid() even just once each time
|
|
|
|
// `process_mark_finished_children()` is called is rather obvious (see the performance-related
|
|
|
|
// discussion in #5219), making it worth the complexity of this heuristic.
|
|
|
|
|
|
|
|
/// Tracks whether or not we received SIGCHLD without checking all jobs (due to jobs under
|
|
|
|
/// construction), forcing a full waitpid loop.
|
|
|
|
static bool dirty_state = true;
|
|
|
|
static process_generation_count_t last_sigchld_count = -1;
|
|
|
|
|
|
|
|
// If the last time that we received a SIGCHLD we did not waitpid all jobs, we cannot early out.
|
|
|
|
if (!dirty_state && last_sigchld_count == s_sigchld_generation_cnt) {
|
|
|
|
// If we have foreground jobs, we need to block on them below
|
2018-10-29 00:09:57 +00:00
|
|
|
if (!block_on_fg) {
|
2018-10-11 23:28:06 +00:00
|
|
|
// We can assume that no children have exited and that all waitpid calls with
|
|
|
|
// WNOHANG below will confirm that.
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
last_sigchld_count = s_sigchld_generation_cnt;
|
|
|
|
bool jobs_skipped = false;
|
2018-10-02 15:46:57 +00:00
|
|
|
bool has_error = false;
|
|
|
|
job_t *job_fg = nullptr;
|
2018-10-11 23:28:06 +00:00
|
|
|
|
2018-10-02 15:46:57 +00:00
|
|
|
// Reap only processes belonging to fully-constructed jobs to prevent reaping of processes
|
|
|
|
// before others in the same process group have a chance to join their pgrp.
|
|
|
|
job_iterator_t jobs;
|
|
|
|
while (auto j = jobs.next()) {
|
2018-10-11 23:28:06 +00:00
|
|
|
// (A job can have pgrp INVALID_PID if it consists solely of builtins that perform no IO)
|
2018-10-02 17:30:23 +00:00
|
|
|
if (j->pgid == INVALID_PID || !j->is_constructed()) {
|
2018-10-02 16:19:56 +00:00
|
|
|
debug(5, "Skipping wait on incomplete job %d (%ls)", j->job_id, j->preview().c_str());
|
2018-10-11 23:28:06 +00:00
|
|
|
jobs_skipped = true;
|
2018-10-02 15:46:57 +00:00
|
|
|
continue;
|
|
|
|
}
|
2018-09-29 04:13:13 +00:00
|
|
|
|
2018-10-11 23:28:06 +00:00
|
|
|
if (j != job_fg && j->is_foreground() && !j->is_stopped() && !j->is_completed()) {
|
2018-11-04 09:11:12 +00:00
|
|
|
// Ensure that we don't have multiple fully constructed foreground jobs.
|
|
|
|
assert((!job_fg || !job_fg->job_chain_is_fully_constructed() ||
|
|
|
|
!j->job_chain_is_fully_constructed()) &&
|
|
|
|
"More than one active, fully-constructed foreground job!");
|
2018-10-11 23:28:06 +00:00
|
|
|
job_fg = j;
|
|
|
|
}
|
|
|
|
|
2018-10-29 00:09:57 +00:00
|
|
|
// Whether we will wait for uncompleted processes depends on the combination of
|
|
|
|
// `block_on_fg` and the nature of the process. Default is WNOHANG, but if foreground,
|
|
|
|
// constructed, not stopped, *and* block_on_fg is true, then no WNOHANG (i.e. "HANG").
|
2018-10-02 15:46:57 +00:00
|
|
|
int options = WUNTRACED | WNOHANG;
|
2018-10-02 17:30:23 +00:00
|
|
|
|
2018-10-02 15:46:57 +00:00
|
|
|
// We should never block twice in the same go, as `waitpid()' returning could mean one
|
|
|
|
// process completed or many, and there is a race condition when calling `waitpid()` after
|
|
|
|
// the process group exits having reaped all children and terminated the process group and
|
|
|
|
// when a subsequent call to `waitpid()` for the same process group returns immediately if
|
|
|
|
// that process group no longer exists. i.e. it's possible for all processes to have exited
|
|
|
|
// but the process group to remain momentarily valid, in which case calling `waitpid()`
|
|
|
|
// without WNOHANG can cause an infinite wait. Additionally, only wait on external jobs that
|
|
|
|
// spawned new process groups (i.e. JOB_CONTROL). We do not break or return on error as we
|
|
|
|
// wait on only one pgrp at a time and we need to check all pgrps before returning, but we
|
|
|
|
// never wait/block on fg processes after an error has been encountered to give ourselves
|
|
|
|
// (elsewhere) a chance to handle the fallout from process termination, etc.
|
2018-10-29 04:37:53 +00:00
|
|
|
if (!has_error && block_on_fg && j == job_fg) {
|
2018-10-02 17:30:23 +00:00
|
|
|
debug(4, "Waiting on processes from foreground job %d", job_fg->pgid);
|
2018-10-02 15:46:57 +00:00
|
|
|
options &= ~WNOHANG;
|
|
|
|
}
|
2018-09-29 04:13:13 +00:00
|
|
|
|
2019-01-02 06:12:07 +00:00
|
|
|
// Child jobs (produced via execution of functions) share job ids with their not-yet-
|
|
|
|
// fully-constructed parent jobs, so we have to wait on these by individual process id
|
|
|
|
// and not by the shared pgroup. End result is the same, but it just makes more calls
|
|
|
|
// to the kernel.
|
|
|
|
bool wait_by_process = !j->job_chain_is_fully_constructed();
|
|
|
|
|
|
|
|
// Firejail can result in jobs with pgroup 0, in which case we cannot wait by
|
|
|
|
// job id. See discussion in #5295.
|
|
|
|
if (j->pgid == 0) {
|
|
|
|
wait_by_process = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Cygwin does some voodoo with regards to process management that I do not understand, but
|
|
|
|
// long story short, we cannot reap processes by their pgroup. The way child processes are
|
|
|
|
// launched under Cygwin is... weird, and outwardly they do not appear to retain information
|
|
|
|
// about their parent process when viewed in Task Manager. Waiting on processes by their
|
|
|
|
// pgroup results in never reaping any, so we just wait on them by process id instead.
|
|
|
|
if (is_cygwin()) {
|
|
|
|
wait_by_process = true;
|
|
|
|
}
|
|
|
|
|
2018-12-31 01:02:38 +00:00
|
|
|
// When waiting on processes individually in a pipeline, we need to enumerate in reverse
|
|
|
|
// order so that the first process we actually wait on (i.e. ~WNOHANG) is the last process
|
|
|
|
// in the IO chain, because that's the one that controls the lifetime of the foreground job
|
|
|
|
// - as long as it is still running, we are in the background and once it exits or is
|
|
|
|
// killed, all previous jobs in the IO pipeline must necessarily terminate as well.
|
|
|
|
auto process = j->processes.rbegin();
|
2018-10-11 23:28:06 +00:00
|
|
|
// waitpid(2) returns 1 process each time, we need to keep calling it until we've reaped all
|
|
|
|
// children of the pgrp in question or else we can't reset the dirty_state flag. In all
|
|
|
|
// cases, calling waitpid(2) is faster than potentially calling select_try() on a process
|
|
|
|
// that has exited, which will force us to wait the full timeout before coming back here and
|
|
|
|
// calling waitpid() again.
|
|
|
|
while (true) {
|
2018-10-08 17:44:47 +00:00
|
|
|
int status;
|
|
|
|
pid_t pid;
|
|
|
|
|
|
|
|
if (wait_by_process) {
|
|
|
|
// If the evaluation of a function resulted in the sharing of a pgroup between the
|
|
|
|
// real job and the job that shouldn't have been created as a separate job AND the
|
|
|
|
// parent job is still under construction (which is the case when continue_job() is
|
|
|
|
// first called on the child job during the recursive call to exec_job() before the
|
|
|
|
// parent job has been fully constructed), we need to call waitpid(2) on the
|
|
|
|
// individual processes of the child job instead of using a catch-all waitpid(2)
|
|
|
|
// call on the job's process group.
|
2018-12-31 01:02:38 +00:00
|
|
|
if (process == j->processes.rend()) {
|
2018-10-08 17:44:47 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
assert((*process)->pid != INVALID_PID && "Waiting by process on an invalid PID!");
|
2018-12-31 02:46:49 +00:00
|
|
|
if ((*process)->completed) {
|
|
|
|
// This process has already been waited on to completion
|
2018-12-31 03:44:14 +00:00
|
|
|
process++;
|
2018-12-31 02:46:49 +00:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2018-12-31 01:02:38 +00:00
|
|
|
if ((options & WNOHANG) == 0) {
|
2018-12-31 03:44:14 +00:00
|
|
|
debug(4, "Waiting on individual process %d: %ls", (*process)->pid, (*process)->argv0());
|
2018-12-31 01:02:38 +00:00
|
|
|
} else {
|
|
|
|
debug(4, "waitpid with WNOHANG on individual process %d", (*process)->pid);
|
|
|
|
}
|
2018-10-08 17:44:47 +00:00
|
|
|
pid = waitpid((*process)->pid, &status, options);
|
2018-12-31 03:44:14 +00:00
|
|
|
|
2018-10-08 17:44:47 +00:00
|
|
|
process++;
|
|
|
|
} else {
|
2018-10-29 00:09:57 +00:00
|
|
|
// A negative PID passed in to `waitpid()` means wait on any child in that process
|
|
|
|
// group
|
2018-10-08 17:44:47 +00:00
|
|
|
pid = waitpid(-1 * j->pgid, &status, options);
|
|
|
|
}
|
|
|
|
|
2018-10-11 23:28:06 +00:00
|
|
|
if (pid > 0) {
|
|
|
|
// A child process has been reaped
|
|
|
|
handle_child_status(pid, status);
|
2019-01-20 23:04:27 +00:00
|
|
|
|
|
|
|
// Always set WNOHANG (that is, don't hang). Otherwise we might wait on a non-stopped job
|
|
|
|
// that becomes stopped, but we don't refresh our view of the process state before
|
|
|
|
// calling waitpid(2) again here.
|
|
|
|
options |= WNOHANG;
|
2018-10-29 00:09:57 +00:00
|
|
|
} else if (pid == 0 || errno == ECHILD) {
|
2018-10-11 23:28:06 +00:00
|
|
|
// No killed/dead children in this particular process group
|
2018-10-08 17:44:47 +00:00
|
|
|
if (!wait_by_process) {
|
|
|
|
break;
|
|
|
|
}
|
2018-10-11 23:28:06 +00:00
|
|
|
} else {
|
2018-10-29 00:09:57 +00:00
|
|
|
// pid < 0 indicates an error. One likely failure is ECHILD (no children), which is
|
|
|
|
// not an error and is ignored. The other likely failure is EINTR, which means we
|
|
|
|
// got a signal, which is considered an error. We absolutely do not break or return
|
|
|
|
// on error, as we need to iterate over all constructed jobs but we only call
|
|
|
|
// waitpid for one pgrp at a time. We do bypass future waits in case of error,
|
|
|
|
// however.
|
2018-10-08 17:44:47 +00:00
|
|
|
has_error = true;
|
2018-10-29 19:22:46 +00:00
|
|
|
|
|
|
|
// Do not audibly complain on interrupt (see #5293)
|
|
|
|
if (errno != EINTR) {
|
|
|
|
wperror(L"waitpid in process_mark_finished_children");
|
|
|
|
}
|
2018-10-11 23:28:06 +00:00
|
|
|
break;
|
2014-10-25 23:51:25 +00:00
|
|
|
}
|
2012-11-19 00:30:30 +00:00
|
|
|
}
|
|
|
|
}
|
2014-10-25 23:51:25 +00:00
|
|
|
|
2018-11-18 21:18:18 +00:00
|
|
|
// Poll disowned processes/process groups, but do nothing with the result. Only used to avoid
|
2018-12-31 01:02:38 +00:00
|
|
|
// zombie processes. Entries have already been converted to negative for process groups.
|
2018-11-18 21:18:18 +00:00
|
|
|
int status;
|
|
|
|
s_disowned_pids.erase(std::remove_if(s_disowned_pids.begin(), s_disowned_pids.end(),
|
|
|
|
[&status](pid_t pid) { return waitpid(pid, &status, WNOHANG) > 0; }),
|
|
|
|
s_disowned_pids.end());
|
|
|
|
|
2018-10-11 23:28:06 +00:00
|
|
|
// Yes, the below can be collapsed to a single line, but it's worth being explicit about it with
|
|
|
|
// the comments. Fret not, the compiler will optimize it. (It better!)
|
|
|
|
if (jobs_skipped) {
|
|
|
|
// We received SIGCHLD but were not able to definitely say whether or not all children were
|
|
|
|
// reaped.
|
|
|
|
dirty_state = true;
|
2018-10-29 00:09:57 +00:00
|
|
|
} else {
|
2018-10-11 23:28:06 +00:00
|
|
|
// We can safely assume that no SIGCHLD means we can just return next time around
|
|
|
|
dirty_state = false;
|
|
|
|
}
|
|
|
|
|
2018-10-02 15:46:57 +00:00
|
|
|
return !has_error;
|
2014-10-25 23:51:25 +00:00
|
|
|
}
|
|
|
|
|
2016-05-03 04:41:17 +00:00
|
|
|
/// This is called from a signal handler. The signal is always SIGCHLD.
|
2016-10-09 21:38:26 +00:00
|
|
|
void job_handle_signal(int signal, siginfo_t *info, void *context) {
|
|
|
|
UNUSED(signal);
|
|
|
|
UNUSED(info);
|
|
|
|
UNUSED(context);
|
2016-05-03 04:41:17 +00:00
|
|
|
// This is the only place that this generation count is modified. It's OK if it overflows.
|
2016-11-03 04:54:57 +00:00
|
|
|
s_sigchld_generation_cnt += 1;
|
2005-09-20 13:26:39 +00:00
|
|
|
}
|
|
|
|
|
2016-05-03 04:41:17 +00:00
|
|
|
/// Given a command like "cat file", truncate it to a reasonable length.
|
|
|
|
static wcstring truncate_command(const wcstring &cmd) {
|
2014-11-03 18:56:16 +00:00
|
|
|
const size_t max_len = 32;
|
2016-05-03 04:41:17 +00:00
|
|
|
if (cmd.size() <= max_len) {
|
|
|
|
// No truncation necessary.
|
2014-11-03 18:56:16 +00:00
|
|
|
return cmd;
|
|
|
|
}
|
2016-05-03 04:41:17 +00:00
|
|
|
|
|
|
|
// Truncation required.
|
2018-10-29 00:09:57 +00:00
|
|
|
const size_t ellipsis_length = wcslen(ellipsis_str); // no need for wcwidth
|
2014-11-03 18:56:16 +00:00
|
|
|
size_t trunc_length = max_len - ellipsis_length;
|
2016-05-03 04:41:17 +00:00
|
|
|
// Eat trailing whitespace.
|
|
|
|
while (trunc_length > 0 && iswspace(cmd.at(trunc_length - 1))) {
|
2014-11-03 18:56:16 +00:00
|
|
|
trunc_length -= 1;
|
|
|
|
}
|
|
|
|
wcstring result = wcstring(cmd, 0, trunc_length);
|
2016-05-03 04:41:17 +00:00
|
|
|
// Append ellipsis.
|
2018-03-09 20:40:35 +00:00
|
|
|
result.append(ellipsis_str);
|
2014-11-03 18:56:16 +00:00
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2016-05-03 04:41:17 +00:00
|
|
|
/// Format information about job status for the user to look at.
|
2017-05-27 22:41:22 +00:00
|
|
|
typedef enum { JOB_STOPPED, JOB_ENDED } job_status_t;
|
|
|
|
static void format_job_info(const job_t *j, job_status_t status) {
|
|
|
|
const wchar_t *msg = L"Job %d, '%ls' has ended"; // this is the most common status msg
|
|
|
|
if (status == JOB_STOPPED) msg = L"Job %d, '%ls' has stopped";
|
|
|
|
|
2012-11-19 00:30:30 +00:00
|
|
|
fwprintf(stdout, L"\r");
|
2017-05-27 22:41:22 +00:00
|
|
|
fwprintf(stdout, _(msg), j->job_id, truncate_command(j->command()).c_str());
|
2012-11-19 00:30:30 +00:00
|
|
|
fflush(stdout);
|
2018-11-28 12:02:49 +00:00
|
|
|
if (clr_eol) tputs(clr_eol, 1, &writeb);
|
2012-11-19 00:30:30 +00:00
|
|
|
fwprintf(stdout, L"\n");
|
2005-09-20 13:26:39 +00:00
|
|
|
}
|
|
|
|
|
2016-05-03 04:41:17 +00:00
|
|
|
void proc_fire_event(const wchar_t *msg, int type, pid_t pid, int status) {
|
|
|
|
event.type = type;
|
2012-11-19 00:30:30 +00:00
|
|
|
event.param1.pid = pid;
|
|
|
|
|
2012-12-20 09:52:44 +00:00
|
|
|
event.arguments.push_back(msg);
|
|
|
|
event.arguments.push_back(to_string<int>(pid));
|
|
|
|
event.arguments.push_back(to_string<int>(status));
|
2012-11-19 00:30:30 +00:00
|
|
|
event_fire(&event);
|
2012-12-20 09:52:44 +00:00
|
|
|
event.arguments.resize(0);
|
2012-11-19 00:30:30 +00:00
|
|
|
}
|
2005-10-11 19:23:43 +00:00
|
|
|
|
2018-12-31 06:43:26 +00:00
|
|
|
static bool process_clean_after_marking(bool allow_interactive) {
|
2012-02-16 08:24:27 +00:00
|
|
|
ASSERT_IS_MAIN_THREAD();
|
2012-11-19 00:30:30 +00:00
|
|
|
job_t *jnext;
|
2018-12-31 06:43:26 +00:00
|
|
|
bool found = false;
|
2012-11-19 00:30:30 +00:00
|
|
|
|
2018-10-29 00:09:57 +00:00
|
|
|
// this function may fire an event handler, we do not want to call ourselves recursively (to
|
|
|
|
// avoid infinite recursion).
|
2014-10-25 23:51:25 +00:00
|
|
|
static bool locked = false;
|
2016-05-03 04:41:17 +00:00
|
|
|
if (locked) {
|
2014-10-25 23:51:25 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
locked = true;
|
2016-05-03 04:41:17 +00:00
|
|
|
|
2016-09-09 20:13:45 +00:00
|
|
|
// this may be invoked in an exit handler, after the TERM has been torn down
|
|
|
|
// don't try to print in that case (#3222)
|
|
|
|
const bool interactive = allow_interactive && cur_term != NULL;
|
|
|
|
|
2012-01-30 00:36:21 +00:00
|
|
|
job_iterator_t jobs;
|
2014-11-03 18:56:16 +00:00
|
|
|
const size_t job_count = jobs.count();
|
2012-01-30 00:36:21 +00:00
|
|
|
jnext = jobs.next();
|
2016-05-03 04:41:17 +00:00
|
|
|
while (jnext) {
|
2012-01-30 00:36:21 +00:00
|
|
|
job_t *j = jnext;
|
|
|
|
jnext = jobs.next();
|
2012-11-19 00:30:30 +00:00
|
|
|
|
2016-05-03 04:41:17 +00:00
|
|
|
// If we are reaping only jobs who do not need status messages sent to the console, do not
|
|
|
|
// consider reaping jobs that need status messages.
|
2018-10-02 17:30:23 +00:00
|
|
|
if ((!j->get_flag(job_flag_t::SKIP_NOTIFICATION)) && (!interactive) &&
|
|
|
|
(!j->is_foreground())) {
|
2012-11-19 00:30:30 +00:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2017-01-23 17:28:34 +00:00
|
|
|
for (const process_ptr_t &p : j->processes) {
|
2012-11-19 00:30:30 +00:00
|
|
|
int s;
|
2016-05-03 04:41:17 +00:00
|
|
|
if (!p->completed) continue;
|
2012-11-19 00:30:30 +00:00
|
|
|
|
2016-05-03 04:41:17 +00:00
|
|
|
if (!p->pid) continue;
|
2012-11-19 00:30:30 +00:00
|
|
|
|
|
|
|
s = p->status;
|
|
|
|
|
2018-02-03 15:38:15 +00:00
|
|
|
// TODO: The generic process-exit event is useless and unused.
|
|
|
|
// Remove this in future.
|
2018-10-03 00:53:52 +00:00
|
|
|
// Update: This event is used for cleaning up the psub temporary files and folders.
|
|
|
|
// Removing it breaks the psub tests as a result.
|
2016-05-03 04:41:17 +00:00
|
|
|
proc_fire_event(L"PROCESS_EXIT", EVENT_EXIT, p->pid,
|
|
|
|
(WIFSIGNALED(s) ? -1 : WEXITSTATUS(s)));
|
|
|
|
|
2016-10-30 21:37:54 +00:00
|
|
|
// Ignore signal SIGPIPE.We issue it ourselves to the pipe writer when the pipe reader
|
|
|
|
// dies.
|
|
|
|
if (!WIFSIGNALED(s) || WTERMSIG(s) == SIGPIPE) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Handle signals other than SIGPIPE.
|
2017-01-23 17:28:34 +00:00
|
|
|
int proc_is_job = (p->is_first_in_job && p->is_last_in_job);
|
2018-10-02 17:30:23 +00:00
|
|
|
if (proc_is_job) j->set_flag(job_flag_t::NOTIFIED, true);
|
2018-09-18 13:56:36 +00:00
|
|
|
// Always report crashes.
|
2018-10-02 17:30:23 +00:00
|
|
|
if (j->get_flag(job_flag_t::SKIP_NOTIFICATION) &&
|
|
|
|
!contains(crashsignals, WTERMSIG(p->status))) {
|
2016-10-30 21:37:54 +00:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Print nothing if we get SIGINT in the foreground process group, to avoid spamming
|
|
|
|
// obvious stuff on the console (#1119). If we get SIGINT for the foreground
|
|
|
|
// process, assume the user typed ^C and can see it working. It's possible they
|
|
|
|
// didn't, and the signal was delivered via pkill, etc., but the SIGINT/SIGTERM
|
|
|
|
// distinction is precisely to allow INT to be from a UI
|
|
|
|
// and TERM to be programmatic, so this assumption is keeping with the design of
|
|
|
|
// signals. If echoctl is on, then the terminal will have written ^C to the console.
|
|
|
|
// If off, it won't have. We don't echo ^C either way, so as to respect the user's
|
|
|
|
// preference.
|
2018-10-02 17:30:23 +00:00
|
|
|
if (WTERMSIG(p->status) != SIGINT || !j->is_foreground()) {
|
2016-10-30 21:37:54 +00:00
|
|
|
if (proc_is_job) {
|
|
|
|
// We want to report the job number, unless it's the only job, in which case
|
|
|
|
// we don't need to.
|
|
|
|
const wcstring job_number_desc =
|
2017-02-09 21:32:30 +00:00
|
|
|
(job_count == 1) ? wcstring() : format_string(_(L"Job %d, "), j->job_id);
|
2018-11-28 14:08:24 +00:00
|
|
|
fwprintf(stdout, _(L"%ls: %ls\'%ls\' terminated by signal %ls (%ls)"),
|
2016-11-03 04:54:57 +00:00
|
|
|
program_name, job_number_desc.c_str(),
|
|
|
|
truncate_command(j->command()).c_str(), sig2wcs(WTERMSIG(p->status)),
|
|
|
|
signal_get_desc(WTERMSIG(p->status)));
|
2016-10-30 21:37:54 +00:00
|
|
|
} else {
|
|
|
|
const wcstring job_number_desc =
|
2016-11-03 04:54:57 +00:00
|
|
|
(job_count == 1) ? wcstring() : format_string(L"from job %d, ", j->job_id);
|
2017-05-02 04:44:30 +00:00
|
|
|
const wchar_t *fmt =
|
2018-11-28 14:08:24 +00:00
|
|
|
_(L"%ls: Process %d, \'%ls\' %ls\'%ls\' terminated by signal %ls (%ls)");
|
2017-05-02 04:44:30 +00:00
|
|
|
fwprintf(stdout, fmt, program_name, p->pid, p->argv0(), job_number_desc.c_str(),
|
2016-11-03 04:54:57 +00:00
|
|
|
truncate_command(j->command()).c_str(), sig2wcs(WTERMSIG(p->status)),
|
|
|
|
signal_get_desc(WTERMSIG(p->status)));
|
2012-11-19 00:30:30 +00:00
|
|
|
}
|
2016-10-22 18:21:13 +00:00
|
|
|
|
2018-11-28 12:02:49 +00:00
|
|
|
if (clr_eol) tputs(clr_eol, 1, &writeb);
|
2016-10-30 21:37:54 +00:00
|
|
|
fwprintf(stdout, L"\n");
|
2012-11-19 00:30:30 +00:00
|
|
|
}
|
2018-12-31 06:43:26 +00:00
|
|
|
found = false;
|
2016-10-30 21:37:54 +00:00
|
|
|
p->status = 0; // clear status so it is not reported more than once
|
2012-11-19 00:30:30 +00:00
|
|
|
}
|
|
|
|
|
2016-05-03 04:41:17 +00:00
|
|
|
// If all processes have completed, tell the user the job has completed and delete it from
|
|
|
|
// the active job list.
|
2018-10-02 17:30:23 +00:00
|
|
|
if (j->is_completed()) {
|
|
|
|
if (!j->is_foreground() && !j->get_flag(job_flag_t::NOTIFIED) &&
|
|
|
|
!j->get_flag(job_flag_t::SKIP_NOTIFICATION)) {
|
2017-05-27 22:41:22 +00:00
|
|
|
format_job_info(j, JOB_ENDED);
|
2018-12-31 06:43:26 +00:00
|
|
|
found = true;
|
2012-11-19 00:30:30 +00:00
|
|
|
}
|
2018-02-03 15:38:15 +00:00
|
|
|
// TODO: The generic process-exit event is useless and unused.
|
|
|
|
// Remove this in future.
|
2018-10-02 03:55:18 +00:00
|
|
|
// Don't fire the exit-event for jobs with pgid INVALID_PID.
|
2018-02-02 22:31:53 +00:00
|
|
|
// That's our "sentinel" pgid, for jobs that don't (yet) have a pgid,
|
|
|
|
// or jobs that consist entirely of builtins (and hence don't have a process).
|
|
|
|
// This causes issues if fish is PID 2, which is quite common on WSL. See #4582.
|
2018-10-02 03:55:18 +00:00
|
|
|
if (j->pgid != INVALID_PID) {
|
2018-02-02 22:31:53 +00:00
|
|
|
proc_fire_event(L"JOB_EXIT", EVENT_EXIT, -j->pgid, 0);
|
|
|
|
}
|
2012-11-19 00:30:30 +00:00
|
|
|
proc_fire_event(L"JOB_EXIT", EVENT_JOB_ID, j->job_id, 0);
|
|
|
|
|
2017-01-26 22:47:32 +00:00
|
|
|
job_remove(j);
|
2018-10-02 17:30:23 +00:00
|
|
|
} else if (j->is_stopped() && !j->get_flag(job_flag_t::NOTIFIED)) {
|
2016-05-03 04:41:17 +00:00
|
|
|
// Notify the user about newly stopped jobs.
|
2018-10-02 17:30:23 +00:00
|
|
|
if (!j->get_flag(job_flag_t::SKIP_NOTIFICATION)) {
|
2017-05-27 22:41:22 +00:00
|
|
|
format_job_info(j, JOB_STOPPED);
|
2018-12-31 06:43:26 +00:00
|
|
|
found = true;
|
2012-11-19 00:30:30 +00:00
|
|
|
}
|
2018-10-02 17:30:23 +00:00
|
|
|
j->set_flag(job_flag_t::NOTIFIED, true);
|
2012-11-19 00:30:30 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-03 04:41:17 +00:00
|
|
|
if (found) fflush(stdout);
|
2012-11-19 00:30:30 +00:00
|
|
|
|
2017-10-22 07:10:23 +00:00
|
|
|
locked = false;
|
|
|
|
|
|
|
|
return found;
|
|
|
|
}
|
|
|
|
|
2018-12-31 06:43:26 +00:00
|
|
|
bool job_reap(bool allow_interactive) {
|
2017-10-22 07:10:23 +00:00
|
|
|
ASSERT_IS_MAIN_THREAD();
|
2018-12-31 06:43:26 +00:00
|
|
|
bool found = false;
|
2017-10-22 07:10:23 +00:00
|
|
|
|
|
|
|
process_mark_finished_children(false);
|
|
|
|
|
|
|
|
// Preserve the exit status.
|
|
|
|
const int saved_status = proc_get_last_status();
|
|
|
|
|
|
|
|
found = process_clean_after_marking(allow_interactive);
|
|
|
|
|
2016-05-03 04:41:17 +00:00
|
|
|
// Restore the exit status.
|
2013-12-31 22:37:37 +00:00
|
|
|
proc_set_last_status(saved_status);
|
|
|
|
|
2012-11-19 00:30:30 +00:00
|
|
|
return found;
|
2005-09-20 13:26:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef HAVE__PROC_SELF_STAT
|
2006-01-20 14:27:21 +00:00
|
|
|
|
2016-05-03 04:41:17 +00:00
|
|
|
/// Maximum length of a /proc/[PID]/stat filename.
|
2006-01-20 14:27:21 +00:00
|
|
|
#define FN_SIZE 256
|
|
|
|
|
2016-05-03 04:41:17 +00:00
|
|
|
/// Get the CPU time for the specified process.
|
|
|
|
unsigned long proc_get_jiffies(process_t *p) {
|
2016-05-29 05:28:26 +00:00
|
|
|
if (p->pid <= 0) return 0;
|
2012-11-19 00:30:30 +00:00
|
|
|
|
2016-05-29 05:28:26 +00:00
|
|
|
wchar_t fn[FN_SIZE];
|
2012-11-19 00:30:30 +00:00
|
|
|
char state;
|
2016-05-03 04:41:17 +00:00
|
|
|
int pid, ppid, pgrp, session, tty_nr, tpgid, exit_signal, processor;
|
|
|
|
long int cutime, cstime, priority, nice, placeholder, itrealvalue, rss;
|
|
|
|
unsigned long int flags, minflt, cminflt, majflt, cmajflt, utime, stime, starttime, vsize, rlim,
|
|
|
|
startcode, endcode, startstack, kstkesp, kstkeip, signal, blocked, sigignore, sigcatch,
|
|
|
|
wchan, nswap, cnswap;
|
2012-11-19 00:30:30 +00:00
|
|
|
char comm[1024];
|
|
|
|
|
|
|
|
swprintf(fn, FN_SIZE, L"/proc/%d/stat", p->pid);
|
|
|
|
FILE *f = wfopen(fn, "r");
|
2016-05-03 04:41:17 +00:00
|
|
|
if (!f) return 0;
|
2012-11-19 00:30:30 +00:00
|
|
|
|
2016-05-29 05:28:26 +00:00
|
|
|
// TODO: replace the use of fscanf() as it is brittle and should never be used.
|
|
|
|
int count = fscanf(f,
|
|
|
|
"%9d %1023s %c %9d %9d %9d %9d %9d %9lu "
|
|
|
|
"%9lu %9lu %9lu %9lu %9lu %9lu %9ld %9ld %9ld "
|
|
|
|
"%9ld %9ld %9ld %9lu %9lu %9ld %9lu %9lu %9lu "
|
|
|
|
"%9lu %9lu %9lu %9lu %9lu %9lu %9lu %9lu %9lu "
|
|
|
|
"%9lu %9d %9d ",
|
|
|
|
&pid, comm, &state, &ppid, &pgrp, &session, &tty_nr, &tpgid, &flags, &minflt,
|
|
|
|
&cminflt, &majflt, &cmajflt, &utime, &stime, &cutime, &cstime, &priority,
|
|
|
|
&nice, &placeholder, &itrealvalue, &starttime, &vsize, &rss, &rlim,
|
|
|
|
&startcode, &endcode, &startstack, &kstkesp, &kstkeip, &signal, &blocked,
|
|
|
|
&sigignore, &sigcatch, &wchan, &nswap, &cnswap, &exit_signal, &processor);
|
2014-04-26 15:36:20 +00:00
|
|
|
fclose(f);
|
2016-05-29 05:28:26 +00:00
|
|
|
if (count < 17) return 0;
|
2016-05-03 04:41:17 +00:00
|
|
|
return utime + stime + cutime + cstime;
|
2005-09-20 13:26:39 +00:00
|
|
|
}
|
|
|
|
|
2016-05-03 04:41:17 +00:00
|
|
|
/// Update the CPU time for all jobs.
|
|
|
|
void proc_update_jiffies() {
|
|
|
|
job_t *job;
|
2012-11-19 00:30:30 +00:00
|
|
|
job_iterator_t j;
|
|
|
|
|
2016-05-03 04:41:17 +00:00
|
|
|
for (job = j.next(); job; job = j.next()) {
|
2017-01-23 18:38:55 +00:00
|
|
|
for (process_ptr_t &p : job->processes) {
|
2012-11-19 00:30:30 +00:00
|
|
|
gettimeofday(&p->last_time, 0);
|
2017-01-23 18:39:53 +00:00
|
|
|
p->last_jiffies = proc_get_jiffies(p.get());
|
2012-11-19 00:30:30 +00:00
|
|
|
}
|
|
|
|
}
|
2005-09-20 13:26:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
2018-10-29 00:00:52 +00:00
|
|
|
/// The return value of select_try(), indicating IO readiness or an error
|
|
|
|
enum class select_try_t {
|
|
|
|
/// One or more fds have data ready for read
|
|
|
|
DATA_READY,
|
|
|
|
/// The timeout elapsed without any data becoming available for read
|
|
|
|
TIMEOUT,
|
|
|
|
/// There were no FDs in the io chain for which to select on.
|
|
|
|
IOCHAIN_EMPTY,
|
|
|
|
};
|
|
|
|
|
2016-05-03 04:41:17 +00:00
|
|
|
/// Check if there are buffers associated with the job, and select on them for a while if available.
|
|
|
|
///
|
|
|
|
/// \param j the job to test
|
2018-10-02 20:10:42 +00:00
|
|
|
/// \return the status of the select operation
|
|
|
|
static select_try_t select_try(job_t *j) {
|
2012-11-19 00:30:30 +00:00
|
|
|
fd_set fds;
|
2016-05-03 04:41:17 +00:00
|
|
|
int maxfd = -1;
|
2012-11-19 00:30:30 +00:00
|
|
|
|
|
|
|
FD_ZERO(&fds);
|
2005-09-20 13:26:39 +00:00
|
|
|
|
Big fat refactoring of how redirections work. In fish 1.x and 2.0.0, the redirections for a process were flattened into a big list associated with the job, so there was no way to tell which redirections applied to each process. Each process therefore got all the redirections associated with the job. See https://github.com/fish-shell/fish-shell/issues/877 for how this could manifest.
With this change, jobs only track their block-level redirections. Process level redirections are correctly associated with the process, and at exec time we stitch them together (block, pipe, and process redirects).
This fixes the weird issues where redirects bleed across pipelines (like #877), and also allows us to play with the order in which redirections are applied, since the final list is constructed right before it's needed. This lets us put pipes after block level redirections but before process level redirections, so that a 2>&1-type redirection gets picked up after the pipe, i.e. it should fix https://github.com/fish-shell/fish-shell/issues/110
This is a significant change. The tests all pass. Cross your fingers.
2013-08-19 23:16:41 +00:00
|
|
|
const io_chain_t chain = j->all_io_redirections();
|
2018-10-02 20:10:42 +00:00
|
|
|
for (const auto &io : chain) {
|
2019-01-31 20:12:46 +00:00
|
|
|
if (io->io_mode == io_mode_t::buffer) {
|
2018-10-02 20:10:42 +00:00
|
|
|
auto io_pipe = static_cast<const io_pipe_t *>(io.get());
|
2013-01-15 09:31:36 +00:00
|
|
|
int fd = io_pipe->pipe_fd[0];
|
2012-11-19 00:30:30 +00:00
|
|
|
FD_SET(fd, &fds);
|
2018-10-02 18:24:05 +00:00
|
|
|
maxfd = std::max(maxfd, fd);
|
|
|
|
debug(4, L"select_try on fd %d", fd);
|
2012-11-19 00:30:30 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-03 04:41:17 +00:00
|
|
|
if (maxfd >= 0) {
|
2018-10-02 20:10:42 +00:00
|
|
|
struct timeval timeout;
|
2012-11-19 00:30:30 +00:00
|
|
|
|
2018-10-02 20:10:42 +00:00
|
|
|
timeout.tv_sec = 0;
|
|
|
|
timeout.tv_usec = 10000;
|
2012-11-19 00:30:30 +00:00
|
|
|
|
2018-10-02 20:10:42 +00:00
|
|
|
int retval = select(maxfd + 1, &fds, 0, 0, &timeout);
|
2015-01-13 06:26:07 +00:00
|
|
|
if (retval == 0) {
|
2018-10-02 16:19:56 +00:00
|
|
|
debug(4, L"select_try hit timeout");
|
2018-10-02 20:10:42 +00:00
|
|
|
return select_try_t::TIMEOUT;
|
2015-01-13 06:26:07 +00:00
|
|
|
}
|
2018-10-02 20:10:42 +00:00
|
|
|
return select_try_t::DATA_READY;
|
2012-11-19 00:30:30 +00:00
|
|
|
}
|
|
|
|
|
2018-10-29 00:00:52 +00:00
|
|
|
return select_try_t::IOCHAIN_EMPTY;
|
2005-09-20 13:26:39 +00:00
|
|
|
}
|
|
|
|
|
2016-05-03 04:41:17 +00:00
|
|
|
/// Read from descriptors until they are empty.
|
|
|
|
///
|
|
|
|
/// \param j the job to test
|
|
|
|
static void read_try(job_t *j) {
|
2013-01-30 10:22:38 +00:00
|
|
|
io_buffer_t *buff = NULL;
|
2005-09-20 13:26:39 +00:00
|
|
|
|
2016-05-03 04:41:17 +00:00
|
|
|
// Find the last buffer, which is the one we want to read from.
|
Big fat refactoring of how redirections work. In fish 1.x and 2.0.0, the redirections for a process were flattened into a big list associated with the job, so there was no way to tell which redirections applied to each process. Each process therefore got all the redirections associated with the job. See https://github.com/fish-shell/fish-shell/issues/877 for how this could manifest.
With this change, jobs only track their block-level redirections. Process level redirections are correctly associated with the process, and at exec time we stitch them together (block, pipe, and process redirects).
This fixes the weird issues where redirects bleed across pipelines (like #877), and also allows us to play with the order in which redirections are applied, since the final list is constructed right before it's needed. This lets us put pipes after block level redirections but before process level redirections, so that a 2>&1-type redirection gets picked up after the pipe, i.e. it should fix https://github.com/fish-shell/fish-shell/issues/110
This is a significant change. The tests all pass. Cross your fingers.
2013-08-19 23:16:41 +00:00
|
|
|
const io_chain_t chain = j->all_io_redirections();
|
2016-05-03 04:41:17 +00:00
|
|
|
for (size_t idx = 0; idx < chain.size(); idx++) {
|
Big fat refactoring of how redirections work. In fish 1.x and 2.0.0, the redirections for a process were flattened into a big list associated with the job, so there was no way to tell which redirections applied to each process. Each process therefore got all the redirections associated with the job. See https://github.com/fish-shell/fish-shell/issues/877 for how this could manifest.
With this change, jobs only track their block-level redirections. Process level redirections are correctly associated with the process, and at exec time we stitch them together (block, pipe, and process redirects).
This fixes the weird issues where redirects bleed across pipelines (like #877), and also allows us to play with the order in which redirections are applied, since the final list is constructed right before it's needed. This lets us put pipes after block level redirections but before process level redirections, so that a 2>&1-type redirection gets picked up after the pipe, i.e. it should fix https://github.com/fish-shell/fish-shell/issues/110
This is a significant change. The tests all pass. Cross your fingers.
2013-08-19 23:16:41 +00:00
|
|
|
io_data_t *d = chain.at(idx).get();
|
2019-01-31 20:12:46 +00:00
|
|
|
if (d->io_mode == io_mode_t::buffer) {
|
2013-01-15 08:44:31 +00:00
|
|
|
buff = static_cast<io_buffer_t *>(d);
|
2012-11-19 00:30:30 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-03 04:41:17 +00:00
|
|
|
if (buff) {
|
2018-10-02 16:19:56 +00:00
|
|
|
debug(4, L"proc::read_try('%ls')", j->command_wcstr());
|
2016-05-03 04:41:17 +00:00
|
|
|
while (1) {
|
2012-11-19 00:30:30 +00:00
|
|
|
char b[BUFFER_SIZE];
|
2018-05-28 08:27:26 +00:00
|
|
|
long len = read_blocked(buff->pipe_fd[0], b, BUFFER_SIZE);
|
|
|
|
if (len == 0) {
|
2012-11-19 00:30:30 +00:00
|
|
|
break;
|
2018-05-28 08:27:26 +00:00
|
|
|
} else if (len < 0) {
|
2016-05-03 04:41:17 +00:00
|
|
|
if (errno != EAGAIN) {
|
|
|
|
debug(1, _(L"An error occured while reading output from code block"));
|
2012-11-19 00:30:30 +00:00
|
|
|
wperror(L"read_try");
|
|
|
|
}
|
|
|
|
break;
|
2016-05-03 04:41:17 +00:00
|
|
|
} else {
|
2018-05-28 08:27:26 +00:00
|
|
|
buff->append(b, len);
|
2012-11-19 00:30:30 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2005-09-20 13:26:39 +00:00
|
|
|
}
|
|
|
|
|
2018-10-02 18:24:05 +00:00
|
|
|
// Return control of the terminal to a job's process group. restore_attrs is true if we are restoring
|
|
|
|
// a previously-stopped job, in which case we need to restore terminal attributes.
|
|
|
|
bool terminal_give_to_job(const job_t *j, bool restore_attrs) {
|
2017-02-27 05:46:15 +00:00
|
|
|
if (j->pgid == 0) {
|
|
|
|
debug(2, "terminal_give_to_job() returning early due to no process group");
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2018-10-02 18:24:05 +00:00
|
|
|
// RAII wrappers must have a name so that their scope is tied to the function as it is legal for
|
|
|
|
// the compiler to construct and then immediately deconstruct unnamed objects otherwise.
|
|
|
|
signal_block_t signal_block;
|
2017-07-29 17:03:37 +00:00
|
|
|
|
2017-08-06 22:47:01 +00:00
|
|
|
// It may not be safe to call tcsetpgrp if we've already done so, as at that point we are no
|
|
|
|
// longer the controlling process group for the terminal and no longer have permission to set
|
|
|
|
// the process group that is in control, causing tcsetpgrp to return EPERM, even though that's
|
|
|
|
// not the documented behavior in tcsetpgrp(3), which instead says other bad things will happen
|
|
|
|
// (it says SIGTTOU will be sent to all members of the background *calling* process group, but
|
|
|
|
// it's more complicated than that, SIGTTOU may or may not be sent depending on the TTY
|
|
|
|
// configuration and whether or not signal handlers for SIGTTOU are installed. Read:
|
|
|
|
// http://curiousthing.org/sigttin-sigttou-deep-dive-linux In all cases, our goal here was just
|
|
|
|
// to hand over control of the terminal to this process group, which is a no-op if it's already
|
|
|
|
// been done.
|
2018-10-02 03:55:18 +00:00
|
|
|
if (j->pgid == INVALID_PID || tcgetpgrp(STDIN_FILENO) == j->pgid) {
|
2018-02-18 21:46:46 +00:00
|
|
|
debug(4, L"Process group %d already has control of terminal\n", j->pgid);
|
2017-08-06 23:05:51 +00:00
|
|
|
} else {
|
|
|
|
debug(4,
|
|
|
|
L"Attempting to bring process group to foreground via tcsetpgrp for job->pgid %d\n",
|
|
|
|
j->pgid);
|
2017-07-29 17:03:37 +00:00
|
|
|
|
2017-08-06 22:47:01 +00:00
|
|
|
// The tcsetpgrp(2) man page says that EPERM is thrown if "pgrp has a supported value, but
|
|
|
|
// is not the process group ID of a process in the same session as the calling process."
|
|
|
|
// Since we _guarantee_ that this isn't the case (the child calls setpgid before it calls
|
|
|
|
// SIGSTOP, and the child was created in the same session as us), it seems that EPERM is
|
|
|
|
// being thrown because of an caching issue - the call to tcsetpgrp isn't seeing the
|
|
|
|
// newly-created process group just yet. On this developer's test machine (WSL running Linux
|
|
|
|
// 4.4.0), EPERM does indeed disappear on retry. The important thing is that we can
|
|
|
|
// guarantee the process isn't going to exit while we wait (which would cause us to possibly
|
|
|
|
// block indefinitely).
|
2017-07-29 17:03:37 +00:00
|
|
|
while (tcsetpgrp(STDIN_FILENO, j->pgid) != 0) {
|
2018-10-02 18:24:05 +00:00
|
|
|
debug(3, "tcsetpgrp failed: %d", errno);
|
|
|
|
|
2017-07-29 23:18:01 +00:00
|
|
|
bool pgroup_terminated = false;
|
2018-10-02 18:24:05 +00:00
|
|
|
// No need to test for EINTR as we are blocking signals
|
|
|
|
if (errno == EINVAL) {
|
2017-08-06 22:47:01 +00:00
|
|
|
// OS X returns EINVAL if the process group no longer lives. Probably other OSes,
|
|
|
|
// too. Unlike EPERM below, EINVAL can only happen if the process group has
|
|
|
|
// terminated.
|
2017-07-29 23:18:01 +00:00
|
|
|
pgroup_terminated = true;
|
2017-08-06 23:05:51 +00:00
|
|
|
} else if (errno == EPERM) {
|
2017-08-06 22:47:01 +00:00
|
|
|
// Retry so long as this isn't because the process group is dead.
|
2017-07-29 17:03:37 +00:00
|
|
|
int wait_result = waitpid(-1 * j->pgid, &wait_result, WNOHANG);
|
|
|
|
if (wait_result == -1) {
|
2017-08-06 22:47:01 +00:00
|
|
|
// Note that -1 is technically an "error" for waitpid in the sense that an
|
|
|
|
// invalid argument was specified because no such process group exists any
|
|
|
|
// longer. This is the observed behavior on Linux 4.4.0. a "success" result
|
|
|
|
// would mean processes from the group still exist but is still running in some
|
|
|
|
// state or the other.
|
2017-07-29 23:18:01 +00:00
|
|
|
pgroup_terminated = true;
|
2017-08-06 23:05:51 +00:00
|
|
|
} else {
|
2017-08-06 22:47:01 +00:00
|
|
|
// Debug the original tcsetpgrp error (not the waitpid errno) to the log, and
|
|
|
|
// then retry until not EPERM or the process group has exited.
|
2017-07-29 23:18:01 +00:00
|
|
|
debug(2, L"terminal_give_to_job(): EPERM.\n", j->pgid);
|
2018-10-02 18:24:05 +00:00
|
|
|
continue;
|
2017-07-29 17:03:37 +00:00
|
|
|
}
|
2017-08-06 23:05:51 +00:00
|
|
|
} else {
|
2018-10-02 18:24:05 +00:00
|
|
|
if (errno == ENOTTY) {
|
|
|
|
redirect_tty_output();
|
|
|
|
}
|
2018-10-29 00:09:57 +00:00
|
|
|
debug(1, _(L"Could not send job %d ('%ls') with pgid %d to foreground"), j->job_id,
|
|
|
|
j->command_wcstr(), j->pgid);
|
2017-07-29 17:03:37 +00:00
|
|
|
wperror(L"tcsetpgrp");
|
|
|
|
return false;
|
|
|
|
}
|
2017-07-29 23:18:01 +00:00
|
|
|
|
|
|
|
if (pgroup_terminated) {
|
2018-10-02 18:24:05 +00:00
|
|
|
// All processes in the process group has exited.
|
|
|
|
// Since we delay reaping any processes in a process group until all members of that
|
2018-10-29 00:09:57 +00:00
|
|
|
// job/group have been started, the only way this can happen is if the very last
|
|
|
|
// process in the group terminated and didn't need to access the terminal, otherwise
|
|
|
|
// it would have hung waiting for terminal IO (SIGTTIN). We can safely ignore this.
|
2017-07-29 23:18:01 +00:00
|
|
|
debug(3, L"tcsetpgrp called but process group %d has terminated.\n", j->pgid);
|
2018-09-29 04:13:13 +00:00
|
|
|
mark_job_complete(j);
|
|
|
|
return true;
|
2017-07-29 23:18:01 +00:00
|
|
|
}
|
2018-10-02 18:24:05 +00:00
|
|
|
|
|
|
|
break;
|
2017-07-29 17:03:37 +00:00
|
|
|
}
|
2012-11-19 00:30:30 +00:00
|
|
|
}
|
|
|
|
|
2018-10-02 18:24:05 +00:00
|
|
|
if (restore_attrs) {
|
|
|
|
auto result = tcsetattr(STDIN_FILENO, TCSADRAIN, &j->tmodes);
|
2017-01-27 04:00:43 +00:00
|
|
|
if (result == -1) {
|
2018-10-02 18:24:05 +00:00
|
|
|
// No need to test for EINTR and retry since we have blocked all signals
|
|
|
|
if (errno == ENOTTY) {
|
|
|
|
redirect_tty_output();
|
|
|
|
}
|
|
|
|
|
2018-10-29 00:09:57 +00:00
|
|
|
debug(1, _(L"Could not send job %d ('%ls') to foreground"), j->job_id,
|
|
|
|
j->preview().c_str());
|
2017-01-27 04:00:43 +00:00
|
|
|
wperror(L"tcsetattr");
|
|
|
|
return false;
|
|
|
|
}
|
2016-12-29 02:52:33 +00:00
|
|
|
}
|
|
|
|
|
2013-04-07 19:40:08 +00:00
|
|
|
return true;
|
2006-11-11 10:48:40 +00:00
|
|
|
}
|
|
|
|
|
2018-08-18 23:56:01 +00:00
|
|
|
pid_t terminal_acquire_before_builtin(int job_pgid) {
|
2018-11-13 17:39:53 +00:00
|
|
|
pid_t selfpgid = getpgrp();
|
|
|
|
|
2018-08-05 00:32:04 +00:00
|
|
|
pid_t current_owner = tcgetpgrp(STDIN_FILENO);
|
2018-11-13 17:39:53 +00:00
|
|
|
if (current_owner >= 0 && current_owner != selfpgid && current_owner == job_pgid) {
|
|
|
|
if (tcsetpgrp(STDIN_FILENO, selfpgid) == 0) {
|
2018-08-05 00:32:04 +00:00
|
|
|
return current_owner;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2016-05-03 04:41:17 +00:00
|
|
|
/// Returns control of the terminal to the shell, and saves the terminal attribute state to the job,
|
|
|
|
/// so that we can restore the terminal ownership to the job at a later time.
|
2017-02-27 05:46:15 +00:00
|
|
|
static bool terminal_return_from_job(job_t *j) {
|
|
|
|
errno = 0;
|
2018-12-07 20:35:09 +00:00
|
|
|
if (j->pgid == 0) {
|
2017-02-27 05:46:15 +00:00
|
|
|
debug(2, "terminal_return_from_job() returning early due to no process group");
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2017-08-06 21:46:12 +00:00
|
|
|
signal_block();
|
2016-12-15 03:21:36 +00:00
|
|
|
if (tcsetpgrp(STDIN_FILENO, getpgrp()) == -1) {
|
|
|
|
if (errno == ENOTTY) redirect_tty_output();
|
2012-11-19 00:30:30 +00:00
|
|
|
debug(1, _(L"Could not return shell to foreground"));
|
|
|
|
wperror(L"tcsetpgrp");
|
2017-08-06 21:46:12 +00:00
|
|
|
signal_unblock();
|
2017-02-27 05:46:15 +00:00
|
|
|
return false;
|
2012-11-19 00:30:30 +00:00
|
|
|
}
|
|
|
|
|
2016-12-15 03:21:36 +00:00
|
|
|
// Save jobs terminal modes.
|
|
|
|
if (tcgetattr(STDIN_FILENO, &j->tmodes)) {
|
2017-01-11 05:52:10 +00:00
|
|
|
if (errno == EIO) redirect_tty_output();
|
2012-11-19 00:30:30 +00:00
|
|
|
debug(1, _(L"Could not return shell to foreground"));
|
|
|
|
wperror(L"tcgetattr");
|
2017-08-06 21:46:12 +00:00
|
|
|
signal_unblock();
|
2017-02-27 05:46:15 +00:00
|
|
|
return false;
|
2012-11-19 00:30:30 +00:00
|
|
|
}
|
|
|
|
|
2016-05-03 04:41:17 +00:00
|
|
|
// Disabling this per
|
|
|
|
// https://github.com/adityagodbole/fish-shell/commit/9d229cd18c3e5c25a8bd37e9ddd3b67ddc2d1b72 On
|
|
|
|
// Linux, 'cd . ; ftp' prevents you from typing into the ftp prompt. See
|
|
|
|
// https://github.com/fish-shell/fish-shell/issues/121
|
2012-10-06 01:23:38 +00:00
|
|
|
#if 0
|
2018-10-29 00:09:57 +00:00
|
|
|
// Restore the shell's terminal modes.
|
|
|
|
if (tcsetattr(STDIN_FILENO, TCSADRAIN, &shell_modes) == -1) {
|
|
|
|
if (errno == EIO) redirect_tty_output();
|
|
|
|
debug(1, _(L"Could not return shell to foreground"));
|
|
|
|
wperror(L"tcsetattr");
|
|
|
|
return false;
|
|
|
|
}
|
2012-10-06 01:23:38 +00:00
|
|
|
#endif
|
2006-11-11 10:48:40 +00:00
|
|
|
|
2017-08-06 21:46:12 +00:00
|
|
|
signal_unblock();
|
2017-02-27 05:46:15 +00:00
|
|
|
return true;
|
2006-11-11 10:48:40 +00:00
|
|
|
}
|
|
|
|
|
2018-10-02 20:10:42 +00:00
|
|
|
void job_t::continue_job(bool send_sigcont) {
|
2016-05-03 04:41:17 +00:00
|
|
|
// Put job first in the job list.
|
2018-10-02 17:30:23 +00:00
|
|
|
promote();
|
|
|
|
set_flag(job_flag_t::NOTIFIED, false);
|
2012-11-19 00:30:30 +00:00
|
|
|
|
2018-10-02 20:10:42 +00:00
|
|
|
debug(4, L"%ls job %d, gid %d (%ls), %ls, %ls", send_sigcont ? L"Continue" : L"Start", job_id,
|
2018-10-02 18:24:05 +00:00
|
|
|
pgid, command_wcstr(), is_completed() ? L"COMPLETED" : L"UNCOMPLETED",
|
2016-05-03 04:41:17 +00:00
|
|
|
is_interactive ? L"INTERACTIVE" : L"NON-INTERACTIVE");
|
|
|
|
|
2018-10-02 22:10:14 +00:00
|
|
|
// Make sure we retake control of the terminal before leaving this function.
|
|
|
|
bool term_transferred = false;
|
|
|
|
cleanup_t take_term_back([&]() {
|
2018-10-29 00:09:57 +00:00
|
|
|
if (term_transferred) {
|
|
|
|
terminal_return_from_job(this);
|
|
|
|
}
|
|
|
|
});
|
2018-10-02 22:10:14 +00:00
|
|
|
|
|
|
|
bool read_attempted = false;
|
2018-10-02 17:30:23 +00:00
|
|
|
if (!is_completed()) {
|
|
|
|
if (get_flag(job_flag_t::TERMINAL) && is_foreground()) {
|
2018-10-02 20:10:42 +00:00
|
|
|
// Put the job into the foreground and give it control of the terminal.
|
2018-10-02 18:24:05 +00:00
|
|
|
// Hack: ensure that stdin is marked as blocking first (issue #176).
|
2013-04-07 19:40:08 +00:00
|
|
|
make_fd_blocking(STDIN_FILENO);
|
2018-10-02 20:10:42 +00:00
|
|
|
if (!terminal_give_to_job(this, send_sigcont)) {
|
2018-10-29 00:09:57 +00:00
|
|
|
// This scenario has always returned without any error handling. Presumably that is
|
|
|
|
// OK.
|
2018-10-02 20:10:42 +00:00
|
|
|
return;
|
|
|
|
}
|
2018-10-02 22:10:14 +00:00
|
|
|
term_transferred = true;
|
2012-11-19 00:30:30 +00:00
|
|
|
}
|
|
|
|
|
2018-10-02 22:10:14 +00:00
|
|
|
// If both requested and necessary, send the job a continue signal.
|
2018-10-02 20:10:42 +00:00
|
|
|
if (send_sigcont) {
|
2018-10-02 22:10:14 +00:00
|
|
|
// This code used to check for JOB_CONTROL to decide between using killpg to signal all
|
|
|
|
// processes in the group or iterating over each process in the group and sending the
|
|
|
|
// signal individually. job_t::signal() does the same, but uses the shell's own pgroup
|
|
|
|
// to make that distinction.
|
2018-10-02 20:10:42 +00:00
|
|
|
if (!signal(SIGCONT)) {
|
|
|
|
debug(2, "Failed to send SIGCONT to any processes in pgroup %d!", pgid);
|
2018-10-02 22:10:14 +00:00
|
|
|
// This returns without bubbling up the error. Presumably that is OK.
|
2018-10-02 20:10:42 +00:00
|
|
|
return;
|
2012-11-19 00:30:30 +00:00
|
|
|
}
|
2018-10-02 22:10:14 +00:00
|
|
|
|
|
|
|
// reset the status of each process instance
|
|
|
|
for (auto &p : processes) {
|
|
|
|
p->stopped = false;
|
|
|
|
}
|
2012-11-19 00:30:30 +00:00
|
|
|
}
|
|
|
|
|
2018-10-02 17:30:23 +00:00
|
|
|
if (is_foreground()) {
|
2018-10-02 22:10:14 +00:00
|
|
|
// This is an optimization to not call select_try() in case a process has exited. While
|
|
|
|
// it may seem silly, unless there is IO (and there usually isn't in terms of total CPU
|
|
|
|
// time), select_try() will wait for 10ms (our timeout) before returning. If during
|
|
|
|
// these 10ms a process exited, the shell will basically hang until the timeout happens
|
|
|
|
// and we are free to call `process_mark_finished_children()` to discover that fact. By
|
|
|
|
// calling it here before calling `select_try()` below, shell responsiveness can be
|
|
|
|
// dramatically improved (noticably so, not just "theoretically speaking" per the
|
|
|
|
// discussion in #5219).
|
2015-01-13 06:26:07 +00:00
|
|
|
process_mark_finished_children(false);
|
|
|
|
|
2018-10-08 17:44:47 +00:00
|
|
|
// If this is a child job and the parent job is still under construction (i.e. job1 |
|
|
|
|
// some_func), we can't block on execution of the nested job for `some_func`. Doing
|
|
|
|
// so can cause hangs if job1 emits more data than fits in the OS pipe buffer.
|
2018-10-28 12:24:10 +00:00
|
|
|
// The solution is to to not block on fg from the initial call in exec_job(), which
|
|
|
|
// is also the only place that send_sigcont is false. parent_job.is_constructed()
|
|
|
|
// must also be true, which coincides with WAIT_BY_PROCESS (which will have to do
|
|
|
|
// since we don't store a reference to the parent job in the job_t structure).
|
2018-11-04 09:23:35 +00:00
|
|
|
bool block_on_fg = send_sigcont && job_chain_is_fully_constructed();
|
2018-10-02 20:10:42 +00:00
|
|
|
|
|
|
|
// Wait for data to become available or the status of our own job to change
|
2018-10-02 17:30:23 +00:00
|
|
|
while (!reader_exit_forced() && !is_stopped() && !is_completed()) {
|
2018-10-02 20:10:42 +00:00
|
|
|
auto result = select_try(this);
|
2018-10-02 22:10:14 +00:00
|
|
|
read_attempted = true;
|
2018-10-02 20:10:42 +00:00
|
|
|
|
2018-10-29 00:12:01 +00:00
|
|
|
switch (result) {
|
|
|
|
case select_try_t::DATA_READY:
|
|
|
|
// Read the data that we know is now available, then scan for finished processes
|
|
|
|
// but do not block. We don't block so long as we have IO to process, once the
|
|
|
|
// fd buffers are empty we'll block in the second case below.
|
|
|
|
read_try(this);
|
|
|
|
process_mark_finished_children(false);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case select_try_t::TIMEOUT:
|
|
|
|
// No FDs are ready. Look for finished processes instead.
|
2018-10-29 04:37:53 +00:00
|
|
|
debug(4, L"select_try: no fds returned valid data within the timeout" );
|
2018-10-29 00:12:01 +00:00
|
|
|
process_mark_finished_children(block_on_fg);
|
|
|
|
break;
|
|
|
|
|
2018-10-29 00:00:52 +00:00
|
|
|
case select_try_t::IOCHAIN_EMPTY:
|
|
|
|
// There were no IO fds to select on.
|
2018-10-29 04:37:53 +00:00
|
|
|
debug(4, L"select_try: no IO fds" );
|
2018-10-29 00:12:01 +00:00
|
|
|
process_mark_finished_children(true);
|
|
|
|
|
|
|
|
// If it turns out that we encountered this because the file descriptor we were
|
|
|
|
// reading from has died, process_mark_finished_children() should take care of
|
|
|
|
// changing the status of our is_completed() (assuming it is appropriate to do
|
|
|
|
// so), in which case we will break out of this loop.
|
|
|
|
break;
|
2012-11-19 00:30:30 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-10-02 17:30:23 +00:00
|
|
|
if (is_foreground()) {
|
|
|
|
if (is_completed()) {
|
2018-10-02 22:10:14 +00:00
|
|
|
// It's possible that the job will produce output and exit before we've even read from
|
|
|
|
// it. In that case, make sure we read that output now, before we've executed any
|
|
|
|
// subsequent calls. This is why prompt colors were getting screwed up - the builtin
|
|
|
|
// `echo` calls were sometimes having their output combined with the `set_color` calls
|
|
|
|
// in the wrong order!
|
|
|
|
if (!read_attempted) {
|
|
|
|
read_try(this);
|
|
|
|
}
|
2012-11-19 00:30:30 +00:00
|
|
|
|
2018-10-29 00:09:57 +00:00
|
|
|
// Set $status only if we are in the foreground and the last process in the job has
|
|
|
|
// finished and is not a short-circuited builtin.
|
2018-10-02 20:10:42 +00:00
|
|
|
auto &p = processes.back();
|
2016-10-22 18:21:13 +00:00
|
|
|
if ((WIFEXITED(p->status) || WIFSIGNALED(p->status)) && p->pid) {
|
|
|
|
int status = proc_format_status(p->status);
|
2018-10-02 17:30:23 +00:00
|
|
|
proc_set_last_status(get_flag(job_flag_t::NEGATE) ? !status : status);
|
2012-11-19 00:30:30 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2005-09-20 13:26:39 +00:00
|
|
|
}
|
|
|
|
|
2016-05-03 04:41:17 +00:00
|
|
|
int proc_format_status(int status) {
|
|
|
|
if (WIFSIGNALED(status)) {
|
|
|
|
return 128 + WTERMSIG(status);
|
|
|
|
} else if (WIFEXITED(status)) {
|
2012-11-19 00:30:30 +00:00
|
|
|
return WEXITSTATUS(status);
|
|
|
|
}
|
|
|
|
return status;
|
2009-02-21 16:46:56 +00:00
|
|
|
}
|
|
|
|
|
2016-05-03 04:41:17 +00:00
|
|
|
void proc_sanity_check() {
|
2017-01-26 22:47:32 +00:00
|
|
|
const job_t *fg_job = NULL;
|
2012-11-19 00:30:30 +00:00
|
|
|
|
2012-01-30 00:36:21 +00:00
|
|
|
job_iterator_t jobs;
|
2017-01-26 22:47:32 +00:00
|
|
|
while (const job_t *j = jobs.next()) {
|
2018-10-02 17:30:23 +00:00
|
|
|
if (!j->is_constructed()) continue;
|
2012-11-19 00:30:30 +00:00
|
|
|
|
2016-05-03 04:41:17 +00:00
|
|
|
// More than one foreground job?
|
2018-10-02 17:30:23 +00:00
|
|
|
if (j->is_foreground() && !(j->is_stopped() || j->is_completed())) {
|
2017-01-24 23:14:56 +00:00
|
|
|
if (fg_job) {
|
2016-05-03 04:41:17 +00:00
|
|
|
debug(0, _(L"More than one job in foreground: job 1: '%ls' job 2: '%ls'"),
|
|
|
|
fg_job->command_wcstr(), j->command_wcstr());
|
2012-11-19 00:30:30 +00:00
|
|
|
sanity_lose();
|
|
|
|
}
|
|
|
|
fg_job = j;
|
|
|
|
}
|
|
|
|
|
2017-01-23 17:28:34 +00:00
|
|
|
for (const process_ptr_t &p : j->processes) {
|
2016-05-03 04:41:17 +00:00
|
|
|
// Internal block nodes do not have argv - see issue #1545.
|
2014-07-12 18:01:00 +00:00
|
|
|
bool null_ok = (p->type == INTERNAL_BLOCK_NODE);
|
|
|
|
validate_pointer(p->get_argv(), _(L"Process argument list"), null_ok);
|
|
|
|
validate_pointer(p->argv0(), _(L"Process name"), null_ok);
|
2012-11-19 00:30:30 +00:00
|
|
|
|
2016-05-03 04:41:17 +00:00
|
|
|
if ((p->stopped & (~0x00000001)) != 0) {
|
|
|
|
debug(0, _(L"Job '%ls', process '%ls' has inconsistent state \'stopped\'=%d"),
|
|
|
|
j->command_wcstr(), p->argv0(), p->stopped);
|
2012-11-19 00:30:30 +00:00
|
|
|
sanity_lose();
|
|
|
|
}
|
|
|
|
|
2016-05-03 04:41:17 +00:00
|
|
|
if ((p->completed & (~0x00000001)) != 0) {
|
|
|
|
debug(0, _(L"Job '%ls', process '%ls' has inconsistent state \'completed\'=%d"),
|
|
|
|
j->command_wcstr(), p->argv0(), p->completed);
|
2012-11-19 00:30:30 +00:00
|
|
|
sanity_lose();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2005-09-20 13:26:39 +00:00
|
|
|
}
|
|
|
|
|
2016-05-03 04:41:17 +00:00
|
|
|
void proc_push_interactive(int value) {
|
2012-02-26 02:54:49 +00:00
|
|
|
ASSERT_IS_MAIN_THREAD();
|
2012-11-19 00:30:30 +00:00
|
|
|
int old = is_interactive;
|
2012-02-10 03:26:44 +00:00
|
|
|
interactive_stack.push_back(is_interactive);
|
2012-11-19 00:30:30 +00:00
|
|
|
is_interactive = value;
|
2016-05-03 04:41:17 +00:00
|
|
|
if (old != value) signal_set_handlers();
|
2006-02-16 13:36:32 +00:00
|
|
|
}
|
|
|
|
|
2016-05-03 04:41:17 +00:00
|
|
|
void proc_pop_interactive() {
|
2012-02-26 02:54:49 +00:00
|
|
|
ASSERT_IS_MAIN_THREAD();
|
2012-11-19 00:30:30 +00:00
|
|
|
int old = is_interactive;
|
2016-05-03 04:41:17 +00:00
|
|
|
is_interactive = interactive_stack.back();
|
2012-02-10 03:26:44 +00:00
|
|
|
interactive_stack.pop_back();
|
2016-05-03 04:41:17 +00:00
|
|
|
if (is_interactive != old) signal_set_handlers();
|
2006-02-16 13:36:32 +00:00
|
|
|
}
|
2017-10-22 07:10:23 +00:00
|
|
|
|
|
|
|
pid_t proc_wait_any() {
|
|
|
|
int pid_status;
|
|
|
|
pid_t pid = waitpid(-1, &pid_status, WUNTRACED);
|
|
|
|
if (pid == -1) return -1;
|
|
|
|
handle_child_status(pid, pid_status);
|
|
|
|
process_clean_after_marking(is_interactive);
|
|
|
|
return pid;
|
|
|
|
}
|
2018-10-20 18:58:51 +00:00
|
|
|
|
|
|
|
void hup_background_jobs() {
|
|
|
|
job_iterator_t jobs;
|
|
|
|
|
|
|
|
while (job_t *j = jobs.next()) {
|
|
|
|
// Make sure we don't try to SIGHUP the calling builtin
|
|
|
|
if (j->pgid == INVALID_PID || !j->get_flag(job_flag_t::JOB_CONTROL)) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!j->is_completed()) {
|
|
|
|
if (j->is_stopped()) {
|
|
|
|
j->signal(SIGCONT);
|
|
|
|
}
|
|
|
|
j->signal(SIGHUP);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2018-09-09 08:36:21 +00:00
|
|
|
|
|
|
|
static std::atomic<bool> s_is_within_fish_initialization{false};
|
|
|
|
|
|
|
|
void set_is_within_fish_initialization(bool flag) { s_is_within_fish_initialization.store(flag); }
|
|
|
|
|
|
|
|
bool is_within_fish_initialization() { return s_is_within_fish_initialization.load(); }
|