2017-10-22 07:10:23 +00:00
|
|
|
// Functions for waiting for processes completed.
|
2019-10-13 22:50:48 +00:00
|
|
|
#include "builtin_wait.h"
|
|
|
|
|
|
|
|
#include <sys/wait.h>
|
|
|
|
|
2018-03-24 17:21:15 +00:00
|
|
|
#include <algorithm>
|
2017-10-22 07:10:23 +00:00
|
|
|
#include <vector>
|
|
|
|
|
|
|
|
#include "builtin.h"
|
|
|
|
#include "common.h"
|
2019-05-05 05:12:31 +00:00
|
|
|
#include "parser.h"
|
2017-10-22 07:10:23 +00:00
|
|
|
#include "proc.h"
|
2019-05-26 02:05:24 +00:00
|
|
|
#include "signal.h"
|
2017-10-22 07:10:23 +00:00
|
|
|
#include "wgetopt.h"
|
|
|
|
#include "wutil.h"
|
|
|
|
|
2018-03-25 05:22:27 +00:00
|
|
|
/// Return the job id to which the process with pid belongs.
|
2018-03-24 17:21:15 +00:00
|
|
|
/// If a specified process has already finished but the job hasn't, parser_t::job_get_from_pid()
|
|
|
|
/// doesn't work properly, so use this function in wait command.
|
2019-05-05 05:12:31 +00:00
|
|
|
static job_id_t get_job_id_from_pid(pid_t pid, const parser_t &parser) {
|
|
|
|
for (const auto &j : parser.jobs()) {
|
2020-05-29 21:51:48 +00:00
|
|
|
if (j->get_pgid() == maybe_t<pid_t>{pid}) {
|
Introduce the internal jobs for functions
This PR is aimed at improving how job ids are assigned. In particular,
previous to this commit, a job id would be consumed by functions (and
thus aliases). Since it's usual to use functions as command wrappers
this results in awkward job id assignments.
For example if the user is like me and just made the jump from vim -> neovim
then the user might create the following alias:
```
alias vim=nvim
```
Previous to this commit if the user ran `vim` after setting up this
alias, backgrounded (^Z) and ran `jobs` then the output might be:
```
Job Group State Command
2 60267 stopped nvim $argv
```
If the user subsequently opened another vim (nvim) session, backgrounded
and ran jobs then they might see what follows:
```
Job Group State Command
4 70542 stopped nvim $argv
2 60267 stopped nvim $argv
```
These job ids feel unnatural, especially when transitioning away from
e.g. bash where job ids are sequentially incremented (and aliases/functions
don't consume a job id).
See #6053 for more details.
As @ridiculousfish pointed out in
https://github.com/fish-shell/fish-shell/issues/6053#issuecomment-559899400,
we want to elide a job's job id if it corresponds to a single function in the
foreground. This translates to the following prerequisites:
- A job must correspond to a single process (i.e. the job continuation
must be empty)
- A job must be in the foreground (i.e. `&` wasn't appended)
- The job's single process must resolve to a function invocation
If all of these conditions are true then we should mark a job as
"internal" and somehow remove it from consideration when any
infrastructure tries to interact with jobs / job ids.
I saw two paths to implement these requirements:
- At the time of job creation calculate whether or not a job is
"internal" and use a separate list of job ids to track their ids.
Additionally introduce a new flag denoting that a job is internal so
that e.g. `jobs` doesn't list internal jobs
- I started implementing this route but quickly realized I was
computing the same information that would be computed later on (e.g.
"is this job a single process" and "is this jobs statement a
function"). Specifically I was computing data that populate_job_process
would end up computing later anyway. Additionally this added some
weird complexities to the job system (after the change there were two
job id lists AND an additional flag that had to be taken into
consideration)
- Once a function is about to be executed we release the current jobs
job id if the prerequisites are satisfied (which at this point have
been fully computed).
- I opted for this solution since it seems cleaner. In this
implementation "releasing a job id" is done by both calling
`release_job_id` and by marking the internal job_id member variable to
-1. The former operation allows subsequent child jobs to reuse that
same job id (so e.g. the situation described in Motivation doesn't
occur), and the latter ensures that no other job / job id
infrastructure will interact with these jobs because valid jobs have
positive job ids. The second operation causes job_id to become
non-const which leads to the list of code changes outside of `exec.c`
(i.e. a codemod from `job_t::job_id` -> `job_t::job_id()` and moving the
old member variable to a non-const private `job_t::job_id_`)
Note: Its very possible I missed something and setting the job id to -1
will break some other infrastructure, please let me know if so!
I tried to run `make/ninja lint`, but a bunch of non-relevant issues
appeared (e.g. `fatal error: 'config.h' file not found`). I did
successfully clang-format (`git clang-format -f`) and run tests, though.
This PR closes #6053.
2019-12-29 15:46:07 +00:00
|
|
|
return j->job_id();
|
2018-03-24 17:21:15 +00:00
|
|
|
}
|
|
|
|
// Check if the specified pid is a child process of the job.
|
|
|
|
for (const process_ptr_t &p : j->processes) {
|
|
|
|
if (p->pid == pid) {
|
Introduce the internal jobs for functions
This PR is aimed at improving how job ids are assigned. In particular,
previous to this commit, a job id would be consumed by functions (and
thus aliases). Since it's usual to use functions as command wrappers
this results in awkward job id assignments.
For example if the user is like me and just made the jump from vim -> neovim
then the user might create the following alias:
```
alias vim=nvim
```
Previous to this commit if the user ran `vim` after setting up this
alias, backgrounded (^Z) and ran `jobs` then the output might be:
```
Job Group State Command
2 60267 stopped nvim $argv
```
If the user subsequently opened another vim (nvim) session, backgrounded
and ran jobs then they might see what follows:
```
Job Group State Command
4 70542 stopped nvim $argv
2 60267 stopped nvim $argv
```
These job ids feel unnatural, especially when transitioning away from
e.g. bash where job ids are sequentially incremented (and aliases/functions
don't consume a job id).
See #6053 for more details.
As @ridiculousfish pointed out in
https://github.com/fish-shell/fish-shell/issues/6053#issuecomment-559899400,
we want to elide a job's job id if it corresponds to a single function in the
foreground. This translates to the following prerequisites:
- A job must correspond to a single process (i.e. the job continuation
must be empty)
- A job must be in the foreground (i.e. `&` wasn't appended)
- The job's single process must resolve to a function invocation
If all of these conditions are true then we should mark a job as
"internal" and somehow remove it from consideration when any
infrastructure tries to interact with jobs / job ids.
I saw two paths to implement these requirements:
- At the time of job creation calculate whether or not a job is
"internal" and use a separate list of job ids to track their ids.
Additionally introduce a new flag denoting that a job is internal so
that e.g. `jobs` doesn't list internal jobs
- I started implementing this route but quickly realized I was
computing the same information that would be computed later on (e.g.
"is this job a single process" and "is this jobs statement a
function"). Specifically I was computing data that populate_job_process
would end up computing later anyway. Additionally this added some
weird complexities to the job system (after the change there were two
job id lists AND an additional flag that had to be taken into
consideration)
- Once a function is about to be executed we release the current jobs
job id if the prerequisites are satisfied (which at this point have
been fully computed).
- I opted for this solution since it seems cleaner. In this
implementation "releasing a job id" is done by both calling
`release_job_id` and by marking the internal job_id member variable to
-1. The former operation allows subsequent child jobs to reuse that
same job id (so e.g. the situation described in Motivation doesn't
occur), and the latter ensures that no other job / job id
infrastructure will interact with these jobs because valid jobs have
positive job ids. The second operation causes job_id to become
non-const which leads to the list of code changes outside of `exec.c`
(i.e. a codemod from `job_t::job_id` -> `job_t::job_id()` and moving the
old member variable to a non-const private `job_t::job_id_`)
Note: Its very possible I missed something and setting the job id to -1
will break some other infrastructure, please let me know if so!
I tried to run `make/ninja lint`, but a bunch of non-relevant issues
appeared (e.g. `fatal error: 'config.h' file not found`). I did
successfully clang-format (`git clang-format -f`) and run tests, though.
This PR closes #6053.
2019-12-29 15:46:07 +00:00
|
|
|
return j->job_id();
|
2018-03-24 17:21:15 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2018-03-25 05:22:27 +00:00
|
|
|
return 0;
|
2018-03-24 17:21:15 +00:00
|
|
|
}
|
|
|
|
|
2019-05-05 05:12:31 +00:00
|
|
|
static bool all_jobs_finished(const parser_t &parser) {
|
|
|
|
for (const auto &j : parser.jobs()) {
|
2017-10-22 07:10:23 +00:00
|
|
|
// If any job is not completed, return false.
|
|
|
|
// If there are stopped jobs, they are ignored.
|
2018-10-02 17:30:23 +00:00
|
|
|
if (j->is_constructed() && !j->is_completed() && !j->is_stopped()) {
|
2017-10-22 07:10:23 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-05-05 05:12:31 +00:00
|
|
|
static bool any_jobs_finished(size_t jobs_len, const parser_t &parser) {
|
2017-10-22 07:10:23 +00:00
|
|
|
bool no_jobs_running = true;
|
|
|
|
|
|
|
|
// If any job is removed from list, return true.
|
2019-05-05 05:12:31 +00:00
|
|
|
if (jobs_len != parser.jobs().size()) {
|
2017-10-22 07:10:23 +00:00
|
|
|
return true;
|
|
|
|
}
|
2019-05-05 05:12:31 +00:00
|
|
|
for (const auto &j : parser.jobs()) {
|
2017-10-22 07:10:23 +00:00
|
|
|
// If any job is completed, return true.
|
2018-10-02 17:30:23 +00:00
|
|
|
if (j->is_constructed() && (j->is_completed() || j->is_stopped())) {
|
2017-10-22 07:10:23 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
// Check for jobs running exist or not.
|
2018-10-02 17:30:23 +00:00
|
|
|
if (j->is_constructed() && !j->is_stopped()) {
|
2017-10-22 07:10:23 +00:00
|
|
|
no_jobs_running = false;
|
|
|
|
}
|
|
|
|
}
|
2019-11-26 00:51:28 +00:00
|
|
|
return no_jobs_running;
|
2017-10-22 07:10:23 +00:00
|
|
|
}
|
|
|
|
|
2019-04-30 03:58:58 +00:00
|
|
|
static int wait_for_backgrounds(parser_t &parser, bool any_flag) {
|
2019-05-26 02:05:24 +00:00
|
|
|
sigint_checker_t sigint;
|
2019-05-05 05:12:31 +00:00
|
|
|
size_t jobs_len = parser.jobs().size();
|
|
|
|
while ((!any_flag && !all_jobs_finished(parser)) ||
|
|
|
|
(any_flag && !any_jobs_finished(jobs_len, parser))) {
|
2019-05-26 02:05:24 +00:00
|
|
|
if (sigint.check()) {
|
2018-10-01 16:59:27 +00:00
|
|
|
return 128 + SIGINT;
|
2017-10-22 07:10:23 +00:00
|
|
|
}
|
2019-04-30 03:58:58 +00:00
|
|
|
proc_wait_any(parser);
|
2017-10-22 07:10:23 +00:00
|
|
|
}
|
2018-10-01 16:59:27 +00:00
|
|
|
return 0;
|
2017-10-22 07:10:23 +00:00
|
|
|
}
|
|
|
|
|
2020-02-08 20:47:13 +00:00
|
|
|
static bool all_specified_jobs_finished(const parser_t &parser, const std::vector<job_id_t> &ids) {
|
2018-03-25 05:22:27 +00:00
|
|
|
for (auto id : ids) {
|
2020-02-08 20:47:13 +00:00
|
|
|
if (const job_t *j = parser.job_get(id)) {
|
2017-10-22 07:10:23 +00:00
|
|
|
// If any specified job is not completed, return false.
|
|
|
|
// If there are stopped jobs, they are ignored.
|
2018-10-02 17:30:23 +00:00
|
|
|
if (j->is_constructed() && !j->is_completed() && !j->is_stopped()) {
|
2017-10-22 07:10:23 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2020-02-08 20:47:13 +00:00
|
|
|
static bool any_specified_jobs_finished(const parser_t &parser, const std::vector<job_id_t> &ids) {
|
2018-03-25 05:22:27 +00:00
|
|
|
for (auto id : ids) {
|
2020-02-08 20:47:13 +00:00
|
|
|
if (const job_t *j = parser.job_get(id)) {
|
2017-10-22 07:10:23 +00:00
|
|
|
// If any specified job is completed, return true.
|
2018-10-02 17:30:23 +00:00
|
|
|
if (j->is_constructed() && (j->is_completed() || j->is_stopped())) {
|
2017-10-22 07:10:23 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// If any specified job is removed from list, return true.
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2019-04-30 03:58:58 +00:00
|
|
|
static int wait_for_backgrounds_specified(parser_t &parser, const std::vector<job_id_t> &ids,
|
|
|
|
bool any_flag) {
|
2019-05-26 02:05:24 +00:00
|
|
|
sigint_checker_t sigint;
|
2020-02-08 20:47:13 +00:00
|
|
|
while ((!any_flag && !all_specified_jobs_finished(parser, ids)) ||
|
|
|
|
(any_flag && !any_specified_jobs_finished(parser, ids))) {
|
2019-05-26 02:05:24 +00:00
|
|
|
if (sigint.check()) {
|
2018-10-01 16:59:27 +00:00
|
|
|
return 128 + SIGINT;
|
2017-10-22 07:10:23 +00:00
|
|
|
}
|
2019-04-30 03:58:58 +00:00
|
|
|
proc_wait_any(parser);
|
2017-10-22 07:10:23 +00:00
|
|
|
}
|
2018-10-01 16:59:27 +00:00
|
|
|
return 0;
|
2017-10-22 07:10:23 +00:00
|
|
|
}
|
|
|
|
|
2018-03-24 17:21:15 +00:00
|
|
|
/// Tests if all characters in the wide string are numeric.
|
|
|
|
static bool iswnumeric(const wchar_t *n) {
|
|
|
|
for (; *n; n++) {
|
|
|
|
if (*n < L'0' || *n > L'9') {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// See if the process described by \c proc matches the commandline \c cmd.
|
|
|
|
static bool match_pid(const wcstring &cmd, const wchar_t *proc) {
|
|
|
|
// Don't wait for itself
|
2019-03-12 21:06:01 +00:00
|
|
|
if (std::wcscmp(proc, L"wait") == 0) return false;
|
2018-03-24 17:21:15 +00:00
|
|
|
|
|
|
|
// Get the command to match against. We're only interested in the last path component.
|
|
|
|
const wcstring base_cmd = wbasename(cmd);
|
2019-03-12 21:06:01 +00:00
|
|
|
return std::wcscmp(proc, base_cmd.c_str()) == 0;
|
2018-03-24 17:21:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/// It should search the job list for something matching the given proc.
|
2019-05-05 05:12:31 +00:00
|
|
|
static bool find_job_by_name(const wchar_t *proc, std::vector<job_id_t> &ids,
|
|
|
|
const parser_t &parser) {
|
2018-03-24 17:21:15 +00:00
|
|
|
bool found = false;
|
|
|
|
|
2019-05-05 05:12:31 +00:00
|
|
|
for (const auto &j : parser.jobs()) {
|
2019-09-09 16:07:25 +00:00
|
|
|
if (j->command().empty()) continue;
|
2018-03-24 17:21:15 +00:00
|
|
|
|
|
|
|
if (match_pid(j->command(), proc)) {
|
Introduce the internal jobs for functions
This PR is aimed at improving how job ids are assigned. In particular,
previous to this commit, a job id would be consumed by functions (and
thus aliases). Since it's usual to use functions as command wrappers
this results in awkward job id assignments.
For example if the user is like me and just made the jump from vim -> neovim
then the user might create the following alias:
```
alias vim=nvim
```
Previous to this commit if the user ran `vim` after setting up this
alias, backgrounded (^Z) and ran `jobs` then the output might be:
```
Job Group State Command
2 60267 stopped nvim $argv
```
If the user subsequently opened another vim (nvim) session, backgrounded
and ran jobs then they might see what follows:
```
Job Group State Command
4 70542 stopped nvim $argv
2 60267 stopped nvim $argv
```
These job ids feel unnatural, especially when transitioning away from
e.g. bash where job ids are sequentially incremented (and aliases/functions
don't consume a job id).
See #6053 for more details.
As @ridiculousfish pointed out in
https://github.com/fish-shell/fish-shell/issues/6053#issuecomment-559899400,
we want to elide a job's job id if it corresponds to a single function in the
foreground. This translates to the following prerequisites:
- A job must correspond to a single process (i.e. the job continuation
must be empty)
- A job must be in the foreground (i.e. `&` wasn't appended)
- The job's single process must resolve to a function invocation
If all of these conditions are true then we should mark a job as
"internal" and somehow remove it from consideration when any
infrastructure tries to interact with jobs / job ids.
I saw two paths to implement these requirements:
- At the time of job creation calculate whether or not a job is
"internal" and use a separate list of job ids to track their ids.
Additionally introduce a new flag denoting that a job is internal so
that e.g. `jobs` doesn't list internal jobs
- I started implementing this route but quickly realized I was
computing the same information that would be computed later on (e.g.
"is this job a single process" and "is this jobs statement a
function"). Specifically I was computing data that populate_job_process
would end up computing later anyway. Additionally this added some
weird complexities to the job system (after the change there were two
job id lists AND an additional flag that had to be taken into
consideration)
- Once a function is about to be executed we release the current jobs
job id if the prerequisites are satisfied (which at this point have
been fully computed).
- I opted for this solution since it seems cleaner. In this
implementation "releasing a job id" is done by both calling
`release_job_id` and by marking the internal job_id member variable to
-1. The former operation allows subsequent child jobs to reuse that
same job id (so e.g. the situation described in Motivation doesn't
occur), and the latter ensures that no other job / job id
infrastructure will interact with these jobs because valid jobs have
positive job ids. The second operation causes job_id to become
non-const which leads to the list of code changes outside of `exec.c`
(i.e. a codemod from `job_t::job_id` -> `job_t::job_id()` and moving the
old member variable to a non-const private `job_t::job_id_`)
Note: Its very possible I missed something and setting the job id to -1
will break some other infrastructure, please let me know if so!
I tried to run `make/ninja lint`, but a bunch of non-relevant issues
appeared (e.g. `fatal error: 'config.h' file not found`). I did
successfully clang-format (`git clang-format -f`) and run tests, though.
This PR closes #6053.
2019-12-29 15:46:07 +00:00
|
|
|
if (!contains(ids, j->job_id())) {
|
2018-03-24 17:21:15 +00:00
|
|
|
// If pids doesn't already have the pgid, add it.
|
Introduce the internal jobs for functions
This PR is aimed at improving how job ids are assigned. In particular,
previous to this commit, a job id would be consumed by functions (and
thus aliases). Since it's usual to use functions as command wrappers
this results in awkward job id assignments.
For example if the user is like me and just made the jump from vim -> neovim
then the user might create the following alias:
```
alias vim=nvim
```
Previous to this commit if the user ran `vim` after setting up this
alias, backgrounded (^Z) and ran `jobs` then the output might be:
```
Job Group State Command
2 60267 stopped nvim $argv
```
If the user subsequently opened another vim (nvim) session, backgrounded
and ran jobs then they might see what follows:
```
Job Group State Command
4 70542 stopped nvim $argv
2 60267 stopped nvim $argv
```
These job ids feel unnatural, especially when transitioning away from
e.g. bash where job ids are sequentially incremented (and aliases/functions
don't consume a job id).
See #6053 for more details.
As @ridiculousfish pointed out in
https://github.com/fish-shell/fish-shell/issues/6053#issuecomment-559899400,
we want to elide a job's job id if it corresponds to a single function in the
foreground. This translates to the following prerequisites:
- A job must correspond to a single process (i.e. the job continuation
must be empty)
- A job must be in the foreground (i.e. `&` wasn't appended)
- The job's single process must resolve to a function invocation
If all of these conditions are true then we should mark a job as
"internal" and somehow remove it from consideration when any
infrastructure tries to interact with jobs / job ids.
I saw two paths to implement these requirements:
- At the time of job creation calculate whether or not a job is
"internal" and use a separate list of job ids to track their ids.
Additionally introduce a new flag denoting that a job is internal so
that e.g. `jobs` doesn't list internal jobs
- I started implementing this route but quickly realized I was
computing the same information that would be computed later on (e.g.
"is this job a single process" and "is this jobs statement a
function"). Specifically I was computing data that populate_job_process
would end up computing later anyway. Additionally this added some
weird complexities to the job system (after the change there were two
job id lists AND an additional flag that had to be taken into
consideration)
- Once a function is about to be executed we release the current jobs
job id if the prerequisites are satisfied (which at this point have
been fully computed).
- I opted for this solution since it seems cleaner. In this
implementation "releasing a job id" is done by both calling
`release_job_id` and by marking the internal job_id member variable to
-1. The former operation allows subsequent child jobs to reuse that
same job id (so e.g. the situation described in Motivation doesn't
occur), and the latter ensures that no other job / job id
infrastructure will interact with these jobs because valid jobs have
positive job ids. The second operation causes job_id to become
non-const which leads to the list of code changes outside of `exec.c`
(i.e. a codemod from `job_t::job_id` -> `job_t::job_id()` and moving the
old member variable to a non-const private `job_t::job_id_`)
Note: Its very possible I missed something and setting the job id to -1
will break some other infrastructure, please let me know if so!
I tried to run `make/ninja lint`, but a bunch of non-relevant issues
appeared (e.g. `fatal error: 'config.h' file not found`). I did
successfully clang-format (`git clang-format -f`) and run tests, though.
This PR closes #6053.
2019-12-29 15:46:07 +00:00
|
|
|
ids.push_back(j->job_id());
|
2018-03-24 17:21:15 +00:00
|
|
|
}
|
|
|
|
found = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check if the specified pid is a child process of the job.
|
|
|
|
for (const process_ptr_t &p : j->processes) {
|
|
|
|
if (p->actual_cmd.empty()) continue;
|
|
|
|
|
|
|
|
if (match_pid(p->actual_cmd, proc)) {
|
Introduce the internal jobs for functions
This PR is aimed at improving how job ids are assigned. In particular,
previous to this commit, a job id would be consumed by functions (and
thus aliases). Since it's usual to use functions as command wrappers
this results in awkward job id assignments.
For example if the user is like me and just made the jump from vim -> neovim
then the user might create the following alias:
```
alias vim=nvim
```
Previous to this commit if the user ran `vim` after setting up this
alias, backgrounded (^Z) and ran `jobs` then the output might be:
```
Job Group State Command
2 60267 stopped nvim $argv
```
If the user subsequently opened another vim (nvim) session, backgrounded
and ran jobs then they might see what follows:
```
Job Group State Command
4 70542 stopped nvim $argv
2 60267 stopped nvim $argv
```
These job ids feel unnatural, especially when transitioning away from
e.g. bash where job ids are sequentially incremented (and aliases/functions
don't consume a job id).
See #6053 for more details.
As @ridiculousfish pointed out in
https://github.com/fish-shell/fish-shell/issues/6053#issuecomment-559899400,
we want to elide a job's job id if it corresponds to a single function in the
foreground. This translates to the following prerequisites:
- A job must correspond to a single process (i.e. the job continuation
must be empty)
- A job must be in the foreground (i.e. `&` wasn't appended)
- The job's single process must resolve to a function invocation
If all of these conditions are true then we should mark a job as
"internal" and somehow remove it from consideration when any
infrastructure tries to interact with jobs / job ids.
I saw two paths to implement these requirements:
- At the time of job creation calculate whether or not a job is
"internal" and use a separate list of job ids to track their ids.
Additionally introduce a new flag denoting that a job is internal so
that e.g. `jobs` doesn't list internal jobs
- I started implementing this route but quickly realized I was
computing the same information that would be computed later on (e.g.
"is this job a single process" and "is this jobs statement a
function"). Specifically I was computing data that populate_job_process
would end up computing later anyway. Additionally this added some
weird complexities to the job system (after the change there were two
job id lists AND an additional flag that had to be taken into
consideration)
- Once a function is about to be executed we release the current jobs
job id if the prerequisites are satisfied (which at this point have
been fully computed).
- I opted for this solution since it seems cleaner. In this
implementation "releasing a job id" is done by both calling
`release_job_id` and by marking the internal job_id member variable to
-1. The former operation allows subsequent child jobs to reuse that
same job id (so e.g. the situation described in Motivation doesn't
occur), and the latter ensures that no other job / job id
infrastructure will interact with these jobs because valid jobs have
positive job ids. The second operation causes job_id to become
non-const which leads to the list of code changes outside of `exec.c`
(i.e. a codemod from `job_t::job_id` -> `job_t::job_id()` and moving the
old member variable to a non-const private `job_t::job_id_`)
Note: Its very possible I missed something and setting the job id to -1
will break some other infrastructure, please let me know if so!
I tried to run `make/ninja lint`, but a bunch of non-relevant issues
appeared (e.g. `fatal error: 'config.h' file not found`). I did
successfully clang-format (`git clang-format -f`) and run tests, though.
This PR closes #6053.
2019-12-29 15:46:07 +00:00
|
|
|
if (!contains(ids, j->job_id())) {
|
2018-03-24 17:21:15 +00:00
|
|
|
// If pids doesn't already have the pgid, add it.
|
Introduce the internal jobs for functions
This PR is aimed at improving how job ids are assigned. In particular,
previous to this commit, a job id would be consumed by functions (and
thus aliases). Since it's usual to use functions as command wrappers
this results in awkward job id assignments.
For example if the user is like me and just made the jump from vim -> neovim
then the user might create the following alias:
```
alias vim=nvim
```
Previous to this commit if the user ran `vim` after setting up this
alias, backgrounded (^Z) and ran `jobs` then the output might be:
```
Job Group State Command
2 60267 stopped nvim $argv
```
If the user subsequently opened another vim (nvim) session, backgrounded
and ran jobs then they might see what follows:
```
Job Group State Command
4 70542 stopped nvim $argv
2 60267 stopped nvim $argv
```
These job ids feel unnatural, especially when transitioning away from
e.g. bash where job ids are sequentially incremented (and aliases/functions
don't consume a job id).
See #6053 for more details.
As @ridiculousfish pointed out in
https://github.com/fish-shell/fish-shell/issues/6053#issuecomment-559899400,
we want to elide a job's job id if it corresponds to a single function in the
foreground. This translates to the following prerequisites:
- A job must correspond to a single process (i.e. the job continuation
must be empty)
- A job must be in the foreground (i.e. `&` wasn't appended)
- The job's single process must resolve to a function invocation
If all of these conditions are true then we should mark a job as
"internal" and somehow remove it from consideration when any
infrastructure tries to interact with jobs / job ids.
I saw two paths to implement these requirements:
- At the time of job creation calculate whether or not a job is
"internal" and use a separate list of job ids to track their ids.
Additionally introduce a new flag denoting that a job is internal so
that e.g. `jobs` doesn't list internal jobs
- I started implementing this route but quickly realized I was
computing the same information that would be computed later on (e.g.
"is this job a single process" and "is this jobs statement a
function"). Specifically I was computing data that populate_job_process
would end up computing later anyway. Additionally this added some
weird complexities to the job system (after the change there were two
job id lists AND an additional flag that had to be taken into
consideration)
- Once a function is about to be executed we release the current jobs
job id if the prerequisites are satisfied (which at this point have
been fully computed).
- I opted for this solution since it seems cleaner. In this
implementation "releasing a job id" is done by both calling
`release_job_id` and by marking the internal job_id member variable to
-1. The former operation allows subsequent child jobs to reuse that
same job id (so e.g. the situation described in Motivation doesn't
occur), and the latter ensures that no other job / job id
infrastructure will interact with these jobs because valid jobs have
positive job ids. The second operation causes job_id to become
non-const which leads to the list of code changes outside of `exec.c`
(i.e. a codemod from `job_t::job_id` -> `job_t::job_id()` and moving the
old member variable to a non-const private `job_t::job_id_`)
Note: Its very possible I missed something and setting the job id to -1
will break some other infrastructure, please let me know if so!
I tried to run `make/ninja lint`, but a bunch of non-relevant issues
appeared (e.g. `fatal error: 'config.h' file not found`). I did
successfully clang-format (`git clang-format -f`) and run tests, though.
This PR closes #6053.
2019-12-29 15:46:07 +00:00
|
|
|
ids.push_back(j->job_id());
|
2018-03-24 17:21:15 +00:00
|
|
|
}
|
|
|
|
found = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return found;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// The following function is invoked on the main thread, because the job operation is not thread
|
|
|
|
/// safe. It waits for child jobs, not for child processes individually.
|
2017-10-22 07:10:23 +00:00
|
|
|
int builtin_wait(parser_t &parser, io_streams_t &streams, wchar_t **argv) {
|
2018-03-24 17:21:15 +00:00
|
|
|
ASSERT_IS_MAIN_THREAD();
|
2018-10-01 16:59:27 +00:00
|
|
|
int retval = STATUS_CMD_OK;
|
2017-10-22 07:10:23 +00:00
|
|
|
const wchar_t *cmd = argv[0];
|
|
|
|
int argc = builtin_count_args(argv);
|
|
|
|
bool any_flag = false; // flag for -n option
|
2020-03-31 19:16:42 +00:00
|
|
|
bool print_help = false;
|
2017-10-22 07:10:23 +00:00
|
|
|
|
2020-03-31 19:16:42 +00:00
|
|
|
static const wchar_t *const short_options = L":nh";
|
2019-11-19 02:34:50 +00:00
|
|
|
static const struct woption long_options[] = {{L"any", no_argument, nullptr, 'n'},
|
2020-03-31 19:16:42 +00:00
|
|
|
{L"help", no_argument, nullptr, 'h'},
|
2019-11-19 02:34:50 +00:00
|
|
|
{nullptr, 0, nullptr, 0}};
|
2017-10-22 07:10:23 +00:00
|
|
|
|
|
|
|
int opt;
|
|
|
|
wgetopter_t w;
|
2019-11-19 02:34:50 +00:00
|
|
|
while ((opt = w.wgetopt_long(argc, argv, short_options, long_options, nullptr)) != -1) {
|
2017-10-22 07:10:23 +00:00
|
|
|
switch (opt) {
|
|
|
|
case 'n':
|
|
|
|
any_flag = true;
|
|
|
|
break;
|
2020-03-31 19:16:42 +00:00
|
|
|
case 'h':
|
|
|
|
print_help = true;
|
|
|
|
break;
|
2017-10-22 07:10:23 +00:00
|
|
|
case ':': {
|
|
|
|
builtin_missing_argument(parser, streams, cmd, argv[w.woptind - 1]);
|
|
|
|
return STATUS_INVALID_ARGS;
|
|
|
|
}
|
|
|
|
case '?': {
|
|
|
|
builtin_unknown_option(parser, streams, cmd, argv[w.woptind - 1]);
|
|
|
|
return STATUS_INVALID_ARGS;
|
|
|
|
}
|
|
|
|
default: {
|
|
|
|
DIE("unexpected retval from wgetopt_long");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-31 19:16:42 +00:00
|
|
|
if (print_help) {
|
|
|
|
builtin_print_help(parser, streams, cmd);
|
|
|
|
return STATUS_CMD_OK;
|
|
|
|
}
|
|
|
|
|
2017-10-22 07:10:23 +00:00
|
|
|
if (w.woptind == argc) {
|
|
|
|
// no jobs specified
|
2019-04-30 03:58:58 +00:00
|
|
|
retval = wait_for_backgrounds(parser, any_flag);
|
2017-10-22 07:10:23 +00:00
|
|
|
} else {
|
|
|
|
// jobs specified
|
2018-03-25 05:22:27 +00:00
|
|
|
std::vector<job_id_t> waited_job_ids;
|
2017-10-22 07:10:23 +00:00
|
|
|
|
|
|
|
for (int i = w.woptind; i < argc; i++) {
|
2018-03-24 17:21:15 +00:00
|
|
|
if (iswnumeric(argv[i])) {
|
|
|
|
// argument is pid
|
|
|
|
pid_t pid = fish_wcstoi(argv[i]);
|
|
|
|
if (errno || pid <= 0) {
|
|
|
|
streams.err.append_format(_(L"%ls: '%ls' is not a valid process id\n"), cmd,
|
|
|
|
argv[i]);
|
|
|
|
continue;
|
2017-10-22 07:10:23 +00:00
|
|
|
}
|
2019-05-05 05:12:31 +00:00
|
|
|
if (job_id_t id = get_job_id_from_pid(pid, parser)) {
|
2018-03-25 05:22:27 +00:00
|
|
|
waited_job_ids.push_back(id);
|
2018-03-24 17:21:15 +00:00
|
|
|
} else {
|
|
|
|
streams.err.append_format(
|
|
|
|
_(L"%ls: Could not find a job with process id '%d'\n"), cmd, pid);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// argument is process name
|
2019-05-05 05:12:31 +00:00
|
|
|
if (!find_job_by_name(argv[i], waited_job_ids, parser)) {
|
2018-03-24 17:21:15 +00:00
|
|
|
streams.err.append_format(
|
|
|
|
_(L"%ls: Could not find child processes with the name '%ls'\n"), cmd,
|
|
|
|
argv[i]);
|
2017-10-22 07:10:23 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-25 05:22:27 +00:00
|
|
|
if (waited_job_ids.empty()) return STATUS_INVALID_ARGS;
|
2017-10-22 07:10:23 +00:00
|
|
|
|
2019-04-30 03:58:58 +00:00
|
|
|
retval = wait_for_backgrounds_specified(parser, waited_job_ids, any_flag);
|
2017-10-22 07:10:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return retval;
|
|
|
|
}
|