mirror of
https://github.com/fish-shell/fish-shell
synced 2024-12-25 20:33:08 +00:00
Initial work towards various IO cleanups with an eye to fixing https://github.com/fish-shell/fish-shell/issues/110
This commit is contained in:
parent
2979d3bf16
commit
e849beabba
10 changed files with 81 additions and 32 deletions
30
exec.cpp
30
exec.cpp
|
@ -49,7 +49,6 @@
|
|||
#include "expand.h"
|
||||
#include "signal.h"
|
||||
|
||||
|
||||
#include "parse_util.h"
|
||||
|
||||
/**
|
||||
|
@ -490,7 +489,7 @@ static bool io_transmogrify(const io_chain_t &in_chain, io_chain_t &out_chain, s
|
|||
static void internal_exec_helper(parser_t &parser,
|
||||
const wchar_t *def,
|
||||
enum block_type_t block_type,
|
||||
io_chain_t &ios)
|
||||
const io_chain_t &ios)
|
||||
{
|
||||
io_chain_t morphed_chain;
|
||||
std::vector<int> opened_fds;
|
||||
|
@ -540,9 +539,10 @@ static bool can_use_posix_spawn_for_job(const job_t *job, const process_t *proce
|
|||
|
||||
/* Now see if we have a redirection involving a file. The only one we allow is /dev/null, which we assume will not fail. */
|
||||
bool result = true;
|
||||
for (size_t idx = 0; idx < job->io.size(); idx++)
|
||||
const io_chain_t &ios = job->io_chain();
|
||||
for (size_t idx = 0; idx < ios.size(); idx++)
|
||||
{
|
||||
const shared_ptr<const io_data_t> &io = job->io.at(idx);
|
||||
const shared_ptr<const io_data_t> &io = ios.at(idx);
|
||||
if (redirection_is_to_real_file(io.get()))
|
||||
{
|
||||
result = false;
|
||||
|
@ -621,9 +621,10 @@ void exec(parser_t &parser, job_t *j)
|
|||
}
|
||||
|
||||
const io_buffer_t *input_redirect = NULL;
|
||||
for (size_t idx = 0; idx < j->io.size(); idx++)
|
||||
const io_chain_t &ios = j->io_chain();
|
||||
for (size_t idx = 0; idx < ios.size(); idx++)
|
||||
{
|
||||
const shared_ptr<io_data_t> &io = j->io.at(idx);
|
||||
const shared_ptr<io_data_t> &io = ios.at(idx);
|
||||
|
||||
if ((io->io_mode == IO_BUFFER))
|
||||
{
|
||||
|
@ -770,13 +771,14 @@ void exec(parser_t &parser, job_t *j)
|
|||
pipe_read.reset(new io_pipe_t(p->pipe_read_fd, true));
|
||||
/* Record the current read in pipe_read */
|
||||
pipe_read->pipe_fd[0] = pipe_current_read;
|
||||
j->io.push_back(pipe_read);
|
||||
j->append_io(pipe_read);
|
||||
}
|
||||
|
||||
if (p->next)
|
||||
{
|
||||
pipe_write.reset(new io_pipe_t(p->pipe_write_fd, false));
|
||||
j->io.push_back(pipe_write);
|
||||
j->append_io(pipe_write);
|
||||
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -821,6 +823,10 @@ void exec(parser_t &parser, job_t *j)
|
|||
pipe_next_read = local_pipe[0];
|
||||
}
|
||||
|
||||
//fprintf(stderr, "before IO: ");
|
||||
//io_print(j->io);
|
||||
|
||||
|
||||
switch (p->type)
|
||||
{
|
||||
case INTERNAL_FUNCTION:
|
||||
|
@ -873,13 +879,13 @@ void exec(parser_t &parser, job_t *j)
|
|||
}
|
||||
else
|
||||
{
|
||||
j->io.push_back(io_buffer);
|
||||
j->append_io(io_buffer);
|
||||
}
|
||||
}
|
||||
|
||||
if (! exec_error)
|
||||
{
|
||||
internal_exec_helper(parser, def.c_str(), TOP, j->io);
|
||||
internal_exec_helper(parser, def.c_str(), TOP, j->io_chain());
|
||||
}
|
||||
|
||||
parser.allow_function();
|
||||
|
@ -915,7 +921,7 @@ void exec(parser_t &parser, job_t *j)
|
|||
case INTERNAL_BUILTIN:
|
||||
{
|
||||
int builtin_stdin=0;
|
||||
int close_stdin=0;
|
||||
bool close_stdin = false;
|
||||
|
||||
/*
|
||||
If this is the first process, check the io
|
||||
|
@ -959,7 +965,7 @@ void exec(parser_t &parser, job_t *j)
|
|||
}
|
||||
else
|
||||
{
|
||||
close_stdin = 1;
|
||||
close_stdin = true;
|
||||
}
|
||||
|
||||
break;
|
||||
|
|
6
io.cpp
6
io.cpp
|
@ -202,6 +202,12 @@ void io_chain_t::push_back(const shared_ptr<io_data_t> &element)
|
|||
std::vector<shared_ptr<io_data_t> >::push_back(element);
|
||||
}
|
||||
|
||||
void io_chain_t::push_front(const shared_ptr<io_data_t> &element)
|
||||
{
|
||||
assert(element.get() != NULL);
|
||||
this->insert(this->begin(), element);
|
||||
}
|
||||
|
||||
void io_remove(io_chain_t &list, const shared_ptr<const io_data_t> &element)
|
||||
{
|
||||
list.remove(element);
|
||||
|
|
1
io.h
1
io.h
|
@ -188,6 +188,7 @@ public:
|
|||
|
||||
void remove(const shared_ptr<const io_data_t> &element);
|
||||
void push_back(const shared_ptr<io_data_t> &element);
|
||||
void push_front(const shared_ptr<io_data_t> &element);
|
||||
|
||||
shared_ptr<const io_data_t> get_io_for_fd(int fd) const;
|
||||
shared_ptr<io_data_t> get_io_for_fd(int fd);
|
||||
|
|
|
@ -1559,7 +1559,7 @@ void parser_t::parse_job_argument_list(process_t *p,
|
|||
|
||||
if (new_io.get() != NULL)
|
||||
{
|
||||
j->io.push_back(new_io);
|
||||
j->append_io(new_io);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -2318,7 +2318,6 @@ static bool job_should_skip_elseif(const job_t *job, const block_t *current_bloc
|
|||
void parser_t::eval_job(tokenizer_t *tok)
|
||||
{
|
||||
ASSERT_IS_MAIN_THREAD();
|
||||
job_t *j;
|
||||
|
||||
int start_pos = job_start_pos = tok_get_pos(tok);
|
||||
long long t1=0, t2=0, t3=0;
|
||||
|
@ -2341,7 +2340,7 @@ void parser_t::eval_job(tokenizer_t *tok)
|
|||
{
|
||||
case TOK_STRING:
|
||||
{
|
||||
j = this->job_create();
|
||||
job_t *j = this->job_create();
|
||||
job_set_flag(j, JOB_FOREGROUND, 1);
|
||||
job_set_flag(j, JOB_TERMINAL, job_get_flag(j, JOB_CONTROL));
|
||||
job_set_flag(j, JOB_TERMINAL, job_get_flag(j, JOB_CONTROL) \
|
||||
|
|
26
postfork.cpp
26
postfork.cpp
|
@ -37,6 +37,8 @@
|
|||
/** pipe error */
|
||||
#define LOCAL_PIPE_ERROR "An error occurred while setting up pipe"
|
||||
|
||||
static bool log_redirections = false;
|
||||
|
||||
/* Cover for debug_safe that can take an int. The format string should expect a %s */
|
||||
static void debug_safe_int(int level, const char *format, int val)
|
||||
{
|
||||
|
@ -164,11 +166,11 @@ static void free_redirected_fds_from_pipes(io_chain_t &io_chain)
|
|||
|
||||
\return 0 on sucess, -1 on failiure
|
||||
*/
|
||||
static int handle_child_io(io_chain_t &io_chain)
|
||||
static int handle_child_io(const io_chain_t &io_chain)
|
||||
{
|
||||
|
||||
//fprintf(stderr, "child IO for %d\n", getpid());
|
||||
close_unused_internal_pipes(io_chain);
|
||||
free_redirected_fds_from_pipes(io_chain);
|
||||
//free_redirected_fds_from_pipes(io_chain);
|
||||
for (size_t idx = 0; idx < io_chain.size(); idx++)
|
||||
{
|
||||
io_data_t *io = io_chain.at(idx).get();
|
||||
|
@ -183,6 +185,7 @@ static int handle_child_io(io_chain_t &io_chain)
|
|||
{
|
||||
case IO_CLOSE:
|
||||
{
|
||||
if (log_redirections) fprintf(stderr, "%d: close %d\n", getpid(), io->fd);
|
||||
if (close(io->fd))
|
||||
{
|
||||
debug_safe_int(0, "Failed to close file descriptor %s", io->fd);
|
||||
|
@ -232,13 +235,17 @@ static int handle_child_io(io_chain_t &io_chain)
|
|||
|
||||
case IO_FD:
|
||||
{
|
||||
int old_fd = static_cast<const io_fd_t *>(io)->old_fd;
|
||||
if (log_redirections) fprintf(stderr, "%d: fd dup %d to %d\n", getpid(), old_fd, io->fd);
|
||||
|
||||
/*
|
||||
This call will sometimes fail, but that is ok,
|
||||
this is just a precausion.
|
||||
*/
|
||||
close(io->fd);
|
||||
|
||||
if (dup2(static_cast<const io_fd_t *>(io)->old_fd, io->fd) == -1)
|
||||
|
||||
if (dup2(old_fd, io->fd) == -1)
|
||||
{
|
||||
debug_safe_int(1, FD_ERROR, io->fd);
|
||||
safe_perror("dup2");
|
||||
|
@ -262,6 +269,7 @@ static int handle_child_io(io_chain_t &io_chain)
|
|||
io->pipe_fd[0],
|
||||
io->pipe_fd[1]);
|
||||
*/
|
||||
if (log_redirections) fprintf(stderr, "%d: %s dup %d to %d\n", getpid(), io->io_mode == IO_BUFFER ? "buffer" : "pipe", io_pipe->pipe_fd[write_pipe_idx], io->fd);
|
||||
if (dup2(io_pipe->pipe_fd[write_pipe_idx], io->fd) != io->fd)
|
||||
{
|
||||
debug_safe(1, LOCAL_PIPE_ERROR);
|
||||
|
@ -295,7 +303,7 @@ int setup_child_process(job_t *j, process_t *p)
|
|||
|
||||
if (ok)
|
||||
{
|
||||
ok = (0 == handle_child_io(j->io));
|
||||
ok = (0 == handle_child_io(j->io_chain()));
|
||||
if (p != 0 && ! ok)
|
||||
{
|
||||
exit_without_destructors(1);
|
||||
|
@ -436,19 +444,19 @@ bool fork_actions_make_spawn_properties(posix_spawnattr_t *attr, posix_spawn_fil
|
|||
err = posix_spawnattr_setsigmask(attr, &sigmask);
|
||||
|
||||
/* Make sure that our pipes don't use an fd that the redirection itself wants to use */
|
||||
free_redirected_fds_from_pipes(j->io);
|
||||
//free_redirected_fds_from_pipes(j->io);
|
||||
|
||||
/* Close unused internal pipes */
|
||||
std::vector<int> files_to_close;
|
||||
get_unused_internal_pipes(files_to_close, j->io);
|
||||
get_unused_internal_pipes(files_to_close, j->io_chain());
|
||||
for (size_t i = 0; ! err && i < files_to_close.size(); i++)
|
||||
{
|
||||
err = posix_spawn_file_actions_addclose(actions, files_to_close.at(i));
|
||||
}
|
||||
|
||||
for (size_t idx = 0; idx < j->io.size(); idx++)
|
||||
for (size_t idx = 0; idx < j->io_chain().size(); idx++)
|
||||
{
|
||||
shared_ptr<const io_data_t> io = j->io.at(idx);
|
||||
shared_ptr<const io_data_t> io = j->io_chain().at(idx);
|
||||
|
||||
if (io->io_mode == IO_FD)
|
||||
{
|
||||
|
|
10
proc.cpp
10
proc.cpp
|
@ -542,11 +542,11 @@ process_t::~process_t()
|
|||
job_t::job_t(job_id_t jobid) :
|
||||
command_str(),
|
||||
command_narrow(),
|
||||
io(),
|
||||
first_process(NULL),
|
||||
pgid(0),
|
||||
tmodes(),
|
||||
job_id(jobid),
|
||||
io(),
|
||||
flags(0)
|
||||
{
|
||||
}
|
||||
|
@ -876,9 +876,9 @@ static int select_try(job_t *j)
|
|||
|
||||
FD_ZERO(&fds);
|
||||
|
||||
for (size_t idx = 0; idx < j->io.size(); idx++)
|
||||
for (size_t idx = 0; idx < j->io_chain().size(); idx++)
|
||||
{
|
||||
const io_data_t *io = j->io.at(idx).get();
|
||||
const io_data_t *io = j->io_chain().at(idx).get();
|
||||
if (io->io_mode == IO_BUFFER)
|
||||
{
|
||||
CAST_INIT(const io_pipe_t *, io_pipe, io);
|
||||
|
@ -917,9 +917,9 @@ static void read_try(job_t *j)
|
|||
/*
|
||||
Find the last buffer, which is the one we want to read from
|
||||
*/
|
||||
for (size_t idx = 0; idx < j->io.size(); idx++)
|
||||
for (size_t idx = 0; idx < j->io_chain().size(); idx++)
|
||||
{
|
||||
io_data_t *d = j->io.at(idx).get();
|
||||
io_data_t *d = j->io_chain().at(idx).get();
|
||||
if (d->io_mode == IO_BUFFER)
|
||||
{
|
||||
buff = static_cast<io_buffer_t *>(d);
|
||||
|
|
12
proc.h
12
proc.h
|
@ -272,6 +272,7 @@ void release_job_id(job_id_t jobid);
|
|||
A struct represeting a job. A job is basically a pipeline of one
|
||||
or more processes and a couple of flags.
|
||||
*/
|
||||
class parser_t;
|
||||
class job_t
|
||||
{
|
||||
/**
|
||||
|
@ -284,6 +285,10 @@ class job_t
|
|||
/* narrow copy so we don't have to convert after fork */
|
||||
narrow_string_rep_t command_narrow;
|
||||
|
||||
/** List of all IO redirections for this job. */
|
||||
io_chain_t io;
|
||||
friend void exec(parser_t &parser, job_t *j);
|
||||
|
||||
/* No copying */
|
||||
job_t(const job_t &rhs);
|
||||
void operator=(const job_t &);
|
||||
|
@ -350,13 +355,14 @@ public:
|
|||
*/
|
||||
const job_id_t job_id;
|
||||
|
||||
/** List of all IO redirections for this job. */
|
||||
io_chain_t io;
|
||||
|
||||
/**
|
||||
Bitset containing information about the job. A combination of the JOB_* constants.
|
||||
*/
|
||||
unsigned int flags;
|
||||
|
||||
const io_chain_t &io_chain() const { return this->io; }
|
||||
|
||||
void append_io(const shared_ptr<io_data_t> & new_io) { this->io.push_back(new_io); }
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
|
@ -19,6 +19,19 @@ function eval -S -d "Evaluate parameters as a command"
|
|||
status --job-control full
|
||||
end
|
||||
|
||||
# rfish: To eval 'foo', we construct a block "begin ; foo; end <&3 3<&-"
|
||||
# The 'eval2_inner' is a param to 'begin' itself; I believe it does nothing.
|
||||
# Note the redirections are also within the quotes.
|
||||
#
|
||||
# We then pipe this to 'source 3<&0' which dup2's 3 to stdin.
|
||||
#
|
||||
# You might expect that the dup2(3, stdin) should overwrite stdin,
|
||||
# and therefore prevent 'source' from reading the piped-in block. This doesn't happen
|
||||
# because when you pipe to a builtin, we don't overwrite stdin with the read end
|
||||
# of the block; instead we set a separate fd in a variable 'builtin_stdin', which is
|
||||
# what it reads from. So builtins are magic in that, in pipes, their stdin
|
||||
# is not fd 0.
|
||||
|
||||
echo "begin; $argv "\n" ;end eval2_inner <&3 3<&-" | source 3<&0
|
||||
set -l res $status
|
||||
|
||||
|
|
|
@ -94,6 +94,11 @@ else
|
|||
end
|
||||
echo Test 5 $sta
|
||||
|
||||
# Verify that we can turn stderr into stdout and then pipe it.
|
||||
# Note that the order here seems unspecified - 'errput' appears before 'output', why?
|
||||
echo Test redirections
|
||||
begin ; echo output ; echo errput 1>&2 ; end 2>&1 | tee /tmp/tee_test.txt ; cat /tmp/tee_test.txt
|
||||
|
||||
# echo tests
|
||||
|
||||
echo 'abc\ndef'
|
||||
|
|
|
@ -18,6 +18,11 @@ Test pass
|
|||
Test 3 pass
|
||||
Test 4 pass
|
||||
Test 5 pass
|
||||
Test redirections
|
||||
errput
|
||||
output
|
||||
errput
|
||||
output
|
||||
abc\ndef
|
||||
abc
|
||||
def
|
||||
|
|
Loading…
Reference in a new issue