2016-05-02 03:32:40 +00:00
|
|
|
// Utilities for io redirection.
|
2016-05-18 22:30:21 +00:00
|
|
|
#include "config.h" // IWYU pragma: keep
|
|
|
|
|
2019-10-13 22:50:48 +00:00
|
|
|
#include "io.h"
|
|
|
|
|
2016-05-02 03:32:40 +00:00
|
|
|
#include <errno.h>
|
2019-12-13 01:27:48 +00:00
|
|
|
#include <fcntl.h>
|
2016-05-02 03:32:40 +00:00
|
|
|
#include <stdio.h>
|
2021-11-19 20:11:28 +00:00
|
|
|
#include <sys/stat.h>
|
2016-05-02 03:32:40 +00:00
|
|
|
#include <unistd.h>
|
2019-10-13 22:50:48 +00:00
|
|
|
|
2019-03-12 21:06:01 +00:00
|
|
|
#include <cwchar>
|
2022-08-21 06:14:48 +00:00
|
|
|
#include <functional>
|
2006-02-28 13:17:16 +00:00
|
|
|
|
2005-10-08 11:20:51 +00:00
|
|
|
#include "common.h"
|
2016-05-02 03:32:40 +00:00
|
|
|
#include "fallback.h" // IWYU pragma: keep
|
2023-02-18 01:21:44 +00:00
|
|
|
#include "fd_monitor.rs.h"
|
|
|
|
#include "fds.h"
|
|
|
|
#include "fds.rs.h"
|
2022-08-21 06:14:48 +00:00
|
|
|
#include "flog.h"
|
|
|
|
#include "maybe.h"
|
2019-12-13 01:27:48 +00:00
|
|
|
#include "path.h"
|
2019-02-13 23:17:07 +00:00
|
|
|
#include "redirection.h"
|
2023-04-26 02:38:53 +00:00
|
|
|
#include "threads.rs.h"
|
2016-05-02 03:32:40 +00:00
|
|
|
#include "wutil.h" // IWYU pragma: keep
|
2006-07-19 22:55:49 +00:00
|
|
|
|
2019-12-13 01:27:48 +00:00
|
|
|
/// File redirection error message.
|
|
|
|
#define FILE_ERROR _(L"An error occurred while redirecting file '%ls'")
|
|
|
|
#define NOCLOB_ERROR _(L"The file '%ls' already exists")
|
|
|
|
|
|
|
|
/// Base open mode to pass to calls to open.
|
2022-09-20 18:58:37 +00:00
|
|
|
#define OPEN_MASK 0666
|
2019-12-13 01:27:48 +00:00
|
|
|
|
2020-02-05 01:49:07 +00:00
|
|
|
/// Provide the fd monitor used for background fillthread operations.
|
|
|
|
static fd_monitor_t &fd_monitor() {
|
|
|
|
// Deliberately leaked to avoid shutdown dtors.
|
2023-02-18 01:21:44 +00:00
|
|
|
static auto fdm = make_fd_monitor_t();
|
2020-02-05 01:49:07 +00:00
|
|
|
return *fdm;
|
|
|
|
}
|
|
|
|
|
2018-02-19 02:44:58 +00:00
|
|
|
io_data_t::~io_data_t() = default;
|
2019-12-29 23:51:22 +00:00
|
|
|
io_pipe_t::~io_pipe_t() = default;
|
|
|
|
io_fd_t::~io_fd_t() = default;
|
|
|
|
io_close_t::~io_close_t() = default;
|
|
|
|
io_file_t::~io_file_t() = default;
|
|
|
|
io_bufferfill_t::~io_bufferfill_t() = default;
|
2019-12-13 01:27:48 +00:00
|
|
|
|
2019-03-12 21:06:01 +00:00
|
|
|
void io_close_t::print() const { std::fwprintf(stderr, L"close %d\n", fd); }
|
2013-01-09 08:02:04 +00:00
|
|
|
|
2019-12-29 23:14:08 +00:00
|
|
|
void io_fd_t::print() const { std::fwprintf(stderr, L"FD map %d -> %d\n", source_fd, fd); }
|
2013-01-15 07:37:33 +00:00
|
|
|
|
2020-04-26 03:25:28 +00:00
|
|
|
void io_file_t::print() const { std::fwprintf(stderr, L"file %d -> %d\n", file_fd_.fd(), fd); }
|
2013-01-15 08:18:03 +00:00
|
|
|
|
2016-05-02 03:32:40 +00:00
|
|
|
void io_pipe_t::print() const {
|
2020-04-26 03:25:28 +00:00
|
|
|
std::fwprintf(stderr, L"pipe {%d} (input: %s) -> %d\n", source_fd, is_input_ ? "yes" : "no",
|
|
|
|
fd);
|
2013-01-15 09:31:36 +00:00
|
|
|
}
|
|
|
|
|
2020-04-26 03:25:28 +00:00
|
|
|
void io_bufferfill_t::print() const {
|
|
|
|
std::fwprintf(stderr, L"bufferfill %d -> %d\n", write_fd_.fd(), fd);
|
|
|
|
}
|
2013-01-15 08:44:31 +00:00
|
|
|
|
2021-02-05 01:02:31 +00:00
|
|
|
ssize_t io_buffer_t::read_once(int fd, acquired_lock<separated_buffer_t> &buffer) {
|
2020-02-05 01:49:07 +00:00
|
|
|
assert(fd >= 0 && "Invalid fd");
|
|
|
|
errno = 0;
|
2021-02-05 01:02:31 +00:00
|
|
|
char bytes[4096 * 4];
|
2020-02-05 01:49:07 +00:00
|
|
|
|
|
|
|
// We want to swallow EINTR only; in particular EAGAIN needs to be returned back to the caller.
|
2021-02-05 01:02:31 +00:00
|
|
|
ssize_t amt;
|
2020-02-05 01:49:07 +00:00
|
|
|
do {
|
2021-02-05 01:02:31 +00:00
|
|
|
amt = read(fd, bytes, sizeof bytes);
|
|
|
|
} while (amt < 0 && errno == EINTR);
|
2022-07-23 21:15:55 +00:00
|
|
|
if (amt < 0 && errno != EAGAIN && errno != EWOULDBLOCK) {
|
2020-02-05 01:49:07 +00:00
|
|
|
wperror(L"read");
|
2021-02-05 01:02:31 +00:00
|
|
|
} else if (amt > 0) {
|
|
|
|
buffer->append(bytes, static_cast<size_t>(amt));
|
2012-11-18 10:23:22 +00:00
|
|
|
}
|
2021-02-05 01:02:31 +00:00
|
|
|
return amt;
|
2005-10-08 11:20:51 +00:00
|
|
|
}
|
|
|
|
|
2023-02-18 01:21:44 +00:00
|
|
|
struct callback_args_t {
|
|
|
|
io_buffer_t *instance;
|
|
|
|
std::shared_ptr<std::promise<void>> promise;
|
|
|
|
};
|
|
|
|
|
|
|
|
extern "C" {
|
|
|
|
static void item_callback_trampoline(autoclose_fd_t2 &fd, item_wake_reason_t reason,
|
|
|
|
callback_args_t *args) {
|
|
|
|
(args->instance)->item_callback(fd, (uint8_t)reason, args);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-02-05 01:49:07 +00:00
|
|
|
void io_buffer_t::begin_filling(autoclose_fd_t fd) {
|
2019-11-23 23:52:53 +00:00
|
|
|
assert(!fillthread_running() && "Already have a fillthread");
|
2019-02-01 09:58:06 +00:00
|
|
|
|
2020-02-05 01:49:07 +00:00
|
|
|
// We want to fill buffer_ by reading from fd. fd is the read end of a pipe; the write end is
|
|
|
|
// owned by another process, or something else writing in fish.
|
|
|
|
// Pass fd to an fd_monitor. It will add fd to its select() loop, and give us a callback when
|
2021-01-07 20:06:21 +00:00
|
|
|
// the fd is readable, or when our item is poked. The usual path is that we will get called
|
2020-02-05 01:49:07 +00:00
|
|
|
// back, read a bit from the fd, and append it to the buffer. Eventually the write end of the
|
|
|
|
// pipe will be closed - probably the other process exited - and fd will be widowed; read() will
|
|
|
|
// then return 0 and we will stop reading.
|
|
|
|
// In exotic circumstances the write end of the pipe will not be closed; this may happen in
|
|
|
|
// e.g.:
|
|
|
|
// cmd ( background & ; echo hi )
|
|
|
|
// Here the background process will inherit the write end of the pipe and hold onto it forever.
|
2021-01-07 20:06:21 +00:00
|
|
|
// In this case, when complete_background_fillthread() is called, the callback will be invoked
|
|
|
|
// with item_wake_reason_t::poke, and we will notice that the shutdown flag is set (this
|
|
|
|
// indicates that the command substitution is done); in this case we will read until we get
|
|
|
|
// EAGAIN and then give up.
|
2019-02-01 09:58:06 +00:00
|
|
|
|
2021-02-06 21:21:36 +00:00
|
|
|
// Construct a promise. We will fulfill it in our fill thread, and wait for it in
|
|
|
|
// complete_background_fillthread(). Note that TSan complains if the promise's dtor races with
|
|
|
|
// the future's call to wait(), so we store the promise, not just its future (#7681).
|
2019-11-23 23:52:53 +00:00
|
|
|
auto promise = std::make_shared<std::promise<void>>();
|
2021-02-06 21:21:36 +00:00
|
|
|
this->fill_waiter_ = promise;
|
2019-11-23 23:52:53 +00:00
|
|
|
|
|
|
|
// Run our function to read until the receiver is closed.
|
2020-02-05 01:49:07 +00:00
|
|
|
// It's OK to capture 'this' by value because 'this' waits for the promise in its dtor.
|
2023-02-18 01:21:44 +00:00
|
|
|
auto args = new callback_args_t;
|
|
|
|
args->instance = this;
|
|
|
|
args->promise = std::move(promise);
|
|
|
|
|
2023-02-24 23:54:46 +00:00
|
|
|
item_id_ = fd_monitor().add_item(fd.acquire(), kNoTimeout, (uint8_t *)item_callback_trampoline,
|
|
|
|
(uint8_t *)args);
|
2015-01-08 02:07:06 +00:00
|
|
|
}
|
2005-10-08 11:20:51 +00:00
|
|
|
|
2023-02-18 01:21:44 +00:00
|
|
|
/// This is a hack to work around the difficulties in passing a capturing lambda across FFI
|
|
|
|
/// boundaries. A static function that takes a generic/untyped callback parameter is easy to
|
|
|
|
/// marshall with the basic C ABI.
|
|
|
|
void io_buffer_t::item_callback(autoclose_fd_t2 &fd, uint8_t r, callback_args_t *args) {
|
|
|
|
item_wake_reason_t reason = (item_wake_reason_t)r;
|
|
|
|
auto &promise = *args->promise;
|
|
|
|
|
|
|
|
// Only check the shutdown flag if we timed out or were poked.
|
|
|
|
// It's important that if select() indicated we were readable, that we call select() again
|
|
|
|
// allowing it to time out. Note the typical case is that the fd will be closed, in which
|
|
|
|
// case select will return immediately.
|
|
|
|
bool done = false;
|
|
|
|
if (reason == item_wake_reason_t::Readable) {
|
|
|
|
// select() reported us as readable; read a bit.
|
|
|
|
auto buffer = buffer_.acquire();
|
|
|
|
ssize_t ret = read_once(fd.fd(), buffer);
|
|
|
|
done = (ret == 0 || (ret < 0 && errno != EAGAIN && errno != EWOULDBLOCK));
|
|
|
|
} else if (shutdown_fillthread_) {
|
|
|
|
// Here our caller asked us to shut down; read while we keep getting data.
|
|
|
|
// This will stop when the fd is closed or if we get EAGAIN.
|
|
|
|
auto buffer = buffer_.acquire();
|
|
|
|
ssize_t ret;
|
|
|
|
do {
|
|
|
|
ret = read_once(fd.fd(), buffer);
|
|
|
|
} while (ret > 0);
|
|
|
|
done = true;
|
|
|
|
}
|
|
|
|
if (done) {
|
|
|
|
fd.close();
|
|
|
|
promise.set_value();
|
|
|
|
// When we close the fd, we signal to the caller that the fd should be removed from its set
|
|
|
|
// and that this callback should never be called again.
|
|
|
|
// Manual memory management is not nice but this is just during the cpp-to-rust transition.
|
|
|
|
delete args;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2021-02-05 01:13:15 +00:00
|
|
|
separated_buffer_t io_buffer_t::complete_background_fillthread_and_take_buffer() {
|
2021-01-07 05:03:49 +00:00
|
|
|
// Mark that our fillthread is done, then wake it up.
|
2019-11-23 23:52:53 +00:00
|
|
|
assert(fillthread_running() && "Should have a fillthread");
|
2021-01-07 05:03:49 +00:00
|
|
|
assert(this->item_id_ > 0 && "Should have a valid item ID");
|
2019-11-23 22:11:07 +00:00
|
|
|
shutdown_fillthread_ = true;
|
2021-01-07 05:03:49 +00:00
|
|
|
fd_monitor().poke_item(this->item_id_);
|
2019-11-23 23:52:53 +00:00
|
|
|
|
|
|
|
// Wait for the fillthread to fulfill its promise, and then clear the future so we know we no
|
|
|
|
// longer have one.
|
2021-02-06 21:21:36 +00:00
|
|
|
fill_waiter_->get_future().wait();
|
|
|
|
fill_waiter_.reset();
|
2021-02-05 01:13:15 +00:00
|
|
|
|
|
|
|
// Return our buffer, transferring ownership.
|
|
|
|
auto locked_buff = buffer_.acquire();
|
|
|
|
separated_buffer_t result = std::move(*locked_buff);
|
|
|
|
locked_buff->clear();
|
|
|
|
return result;
|
2019-02-01 00:05:42 +00:00
|
|
|
}
|
2012-11-19 00:30:30 +00:00
|
|
|
|
2021-02-06 02:14:50 +00:00
|
|
|
shared_ptr<io_bufferfill_t> io_bufferfill_t::create(size_t buffer_limit, int target) {
|
2020-04-26 02:15:08 +00:00
|
|
|
assert(target >= 0 && "Invalid target fd");
|
|
|
|
|
2019-02-01 00:05:42 +00:00
|
|
|
// Construct our pipes.
|
2021-02-03 04:30:52 +00:00
|
|
|
auto pipes = make_autoclose_pipes();
|
2019-02-01 00:05:42 +00:00
|
|
|
if (!pipes) {
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
// Our buffer will read from the read end of the pipe. This end must be non-blocking. This is
|
2019-02-01 09:58:06 +00:00
|
|
|
// because our fillthread needs to poll to decide if it should shut down, and also accept input
|
|
|
|
// from direct buffer transfers.
|
2019-02-01 00:05:42 +00:00
|
|
|
if (make_fd_nonblocking(pipes->read.fd())) {
|
2020-01-19 12:38:47 +00:00
|
|
|
FLOGF(warning, PIPE_ERROR);
|
2012-11-19 00:30:30 +00:00
|
|
|
wperror(L"fcntl");
|
2019-02-01 00:05:42 +00:00
|
|
|
return nullptr;
|
2012-11-19 00:30:30 +00:00
|
|
|
}
|
2019-02-01 09:58:06 +00:00
|
|
|
// Our fillthread gets the read end of the pipe; out_pipe gets the write end.
|
|
|
|
auto buffer = std::make_shared<io_buffer_t>(buffer_limit);
|
2020-02-05 01:49:07 +00:00
|
|
|
buffer->begin_filling(std::move(pipes->read));
|
2020-04-26 02:15:08 +00:00
|
|
|
return std::make_shared<io_bufferfill_t>(target, std::move(pipes->write), buffer);
|
2019-02-01 09:58:06 +00:00
|
|
|
}
|
|
|
|
|
2021-02-05 01:13:15 +00:00
|
|
|
separated_buffer_t io_bufferfill_t::finish(std::shared_ptr<io_bufferfill_t> &&filler) {
|
2019-02-01 09:58:06 +00:00
|
|
|
// The io filler is passed in. This typically holds the only instance of the write side of the
|
|
|
|
// pipe used by the buffer's fillthread (except for that side held by other processes). Get the
|
|
|
|
// buffer out of the bufferfill and clear the shared_ptr; this will typically widow the pipe.
|
|
|
|
// Then allow the buffer to finish.
|
|
|
|
assert(filler && "Null pointer in finish");
|
|
|
|
auto buffer = filler->buffer();
|
|
|
|
filler.reset();
|
2021-02-05 01:13:15 +00:00
|
|
|
return buffer->complete_background_fillthread_and_take_buffer();
|
2019-02-01 09:58:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
io_buffer_t::~io_buffer_t() {
|
2019-11-23 23:52:53 +00:00
|
|
|
assert(!fillthread_running() && "io_buffer_t destroyed with outstanding fillthread");
|
2019-02-01 09:58:06 +00:00
|
|
|
}
|
2005-10-08 11:20:51 +00:00
|
|
|
|
2016-05-02 03:32:40 +00:00
|
|
|
void io_chain_t::remove(const shared_ptr<const io_data_t> &element) {
|
|
|
|
// See if you can guess why std::find doesn't work here.
|
2020-04-02 23:04:04 +00:00
|
|
|
for (auto iter = this->begin(); iter != this->end(); ++iter) {
|
2016-05-02 03:32:40 +00:00
|
|
|
if (*iter == element) {
|
2012-08-15 07:57:56 +00:00
|
|
|
this->erase(iter);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2005-10-08 11:20:51 +00:00
|
|
|
|
2019-11-10 22:00:30 +00:00
|
|
|
void io_chain_t::push_back(io_data_ref_t element) {
|
2016-05-02 03:32:40 +00:00
|
|
|
// Ensure we never push back NULL.
|
2019-02-01 02:49:52 +00:00
|
|
|
assert(element.get() != nullptr);
|
2019-11-10 22:00:30 +00:00
|
|
|
std::vector<io_data_ref_t>::push_back(std::move(element));
|
2013-01-30 10:22:38 +00:00
|
|
|
}
|
|
|
|
|
2022-10-08 17:24:27 +00:00
|
|
|
bool io_chain_t::append(const io_chain_t &chain) {
|
2019-11-10 21:31:47 +00:00
|
|
|
assert(&chain != this && "Cannot append self to self");
|
Big fat refactoring of how redirections work. In fish 1.x and 2.0.0, the redirections for a process were flattened into a big list associated with the job, so there was no way to tell which redirections applied to each process. Each process therefore got all the redirections associated with the job. See https://github.com/fish-shell/fish-shell/issues/877 for how this could manifest.
With this change, jobs only track their block-level redirections. Process level redirections are correctly associated with the process, and at exec time we stitch them together (block, pipe, and process redirects).
This fixes the weird issues where redirects bleed across pipelines (like #877), and also allows us to play with the order in which redirections are applied, since the final list is constructed right before it's needed. This lets us put pipes after block level redirections but before process level redirections, so that a 2>&1-type redirection gets picked up after the pipe, i.e. it should fix https://github.com/fish-shell/fish-shell/issues/110
This is a significant change. The tests all pass. Cross your fingers.
2013-08-19 23:16:41 +00:00
|
|
|
this->insert(this->end(), chain.begin(), chain.end());
|
2022-10-08 17:24:27 +00:00
|
|
|
return true;
|
Big fat refactoring of how redirections work. In fish 1.x and 2.0.0, the redirections for a process were flattened into a big list associated with the job, so there was no way to tell which redirections applied to each process. Each process therefore got all the redirections associated with the job. See https://github.com/fish-shell/fish-shell/issues/877 for how this could manifest.
With this change, jobs only track their block-level redirections. Process level redirections are correctly associated with the process, and at exec time we stitch them together (block, pipe, and process redirects).
This fixes the weird issues where redirects bleed across pipelines (like #877), and also allows us to play with the order in which redirections are applied, since the final list is constructed right before it's needed. This lets us put pipes after block level redirections but before process level redirections, so that a 2>&1-type redirection gets picked up after the pipe, i.e. it should fix https://github.com/fish-shell/fish-shell/issues/110
This is a significant change. The tests all pass. Cross your fingers.
2013-08-19 23:16:41 +00:00
|
|
|
}
|
|
|
|
|
2019-12-13 01:27:48 +00:00
|
|
|
bool io_chain_t::append_from_specs(const redirection_spec_list_t &specs, const wcstring &pwd) {
|
2020-05-30 15:37:41 +00:00
|
|
|
bool have_error = false;
|
2023-02-04 10:21:42 +00:00
|
|
|
for (size_t i = 0; i < specs.size(); i++) {
|
|
|
|
const redirection_spec_t *spec = specs.at(i);
|
|
|
|
switch (spec->mode()) {
|
2019-12-13 00:44:24 +00:00
|
|
|
case redirection_mode_t::fd: {
|
2023-02-04 10:21:42 +00:00
|
|
|
if (spec->is_close()) {
|
|
|
|
this->push_back(make_unique<io_close_t>(spec->fd()));
|
2019-12-13 00:44:24 +00:00
|
|
|
} else {
|
2023-02-04 10:21:42 +00:00
|
|
|
auto target_fd = spec->get_target_as_fd();
|
|
|
|
assert(target_fd && "fd redirection should have been validated already");
|
|
|
|
this->push_back(make_unique<io_fd_t>(spec->fd(), *target_fd));
|
2019-12-13 00:44:24 +00:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
default: {
|
2019-12-13 01:27:48 +00:00
|
|
|
// We have a path-based redireciton. Resolve it to a file.
|
2019-12-14 00:16:19 +00:00
|
|
|
// Mark it as CLO_EXEC because we don't want it to be open in any child.
|
2023-02-04 10:21:42 +00:00
|
|
|
wcstring path = path_apply_working_directory(*spec->target(), pwd);
|
|
|
|
int oflags = spec->oflags();
|
2019-12-14 00:16:19 +00:00
|
|
|
autoclose_fd_t file{wopen_cloexec(path, oflags, OPEN_MASK)};
|
2019-12-13 01:27:48 +00:00
|
|
|
if (!file.valid()) {
|
|
|
|
if ((oflags & O_EXCL) && (errno == EEXIST)) {
|
2023-02-04 10:21:42 +00:00
|
|
|
FLOGF(warning, NOCLOB_ERROR, spec->target()->c_str());
|
2019-12-13 01:27:48 +00:00
|
|
|
} else {
|
2021-11-19 20:11:28 +00:00
|
|
|
if (should_flog(warning)) {
|
2023-02-04 10:21:42 +00:00
|
|
|
FLOGF(warning, FILE_ERROR, spec->target()->c_str());
|
2021-11-19 20:11:28 +00:00
|
|
|
auto err = errno;
|
|
|
|
// If the error is that the file doesn't exist
|
|
|
|
// or there's a non-directory component,
|
|
|
|
// find the first problematic component for a better message.
|
|
|
|
if (err == ENOENT || err == ENOTDIR) {
|
2023-02-04 10:21:42 +00:00
|
|
|
auto dname = *spec->target();
|
2021-11-19 20:11:28 +00:00
|
|
|
struct stat buf;
|
|
|
|
|
|
|
|
while (!dname.empty()) {
|
|
|
|
auto next = wdirname(dname);
|
|
|
|
if (!wstat(next, &buf)) {
|
|
|
|
if (!S_ISDIR(buf.st_mode)) {
|
2022-05-30 23:14:32 +00:00
|
|
|
FLOGF(warning, _(L"Path '%ls' is not a directory"),
|
|
|
|
next.c_str());
|
2021-11-19 20:11:28 +00:00
|
|
|
} else {
|
2022-05-30 23:14:32 +00:00
|
|
|
FLOGF(warning, _(L"Path '%ls' does not exist"),
|
|
|
|
dname.c_str());
|
2021-11-19 20:11:28 +00:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
dname = next;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
wperror(L"open");
|
|
|
|
}
|
|
|
|
}
|
2019-12-13 01:27:48 +00:00
|
|
|
}
|
2020-05-30 04:54:42 +00:00
|
|
|
// If opening a file fails, insert a closed FD instead of the file redirection
|
|
|
|
// and return false. This lets execution potentially recover and at least gives
|
|
|
|
// the shell a chance to gracefully regain control of the shell (see #7038).
|
2023-02-04 10:21:42 +00:00
|
|
|
this->push_back(make_unique<io_close_t>(spec->fd()));
|
2020-05-30 15:37:41 +00:00
|
|
|
have_error = true;
|
|
|
|
break;
|
2019-12-13 01:27:48 +00:00
|
|
|
}
|
2023-02-04 10:21:42 +00:00
|
|
|
this->push_back(std::make_shared<io_file_t>(spec->fd(), std::move(file)));
|
2019-12-13 00:44:24 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2020-05-30 15:37:41 +00:00
|
|
|
return !have_error;
|
2019-12-13 00:44:24 +00:00
|
|
|
}
|
|
|
|
|
2019-12-11 01:05:17 +00:00
|
|
|
void io_chain_t::print() const {
|
|
|
|
if (this->empty()) {
|
|
|
|
std::fwprintf(stderr, L"Empty chain %p\n", this);
|
2012-08-15 07:57:56 +00:00
|
|
|
return;
|
|
|
|
}
|
2012-11-18 10:23:22 +00:00
|
|
|
|
2020-04-08 23:56:59 +00:00
|
|
|
std::fwprintf(stderr, L"Chain %p (%ld items):\n", this, static_cast<long>(this->size()));
|
2019-12-11 01:05:17 +00:00
|
|
|
for (size_t i = 0; i < this->size(); i++) {
|
|
|
|
const auto &io = this->at(i);
|
2019-12-21 20:42:12 +00:00
|
|
|
if (io == nullptr) {
|
2019-03-12 21:06:01 +00:00
|
|
|
std::fwprintf(stderr, L"\t(null)\n");
|
2019-12-11 01:21:03 +00:00
|
|
|
} else {
|
2020-04-08 23:56:59 +00:00
|
|
|
std::fwprintf(stderr, L"\t%lu: fd:%d, ", static_cast<unsigned long>(i), io->fd);
|
2013-02-22 21:20:27 +00:00
|
|
|
io->print();
|
|
|
|
}
|
2012-08-15 07:57:56 +00:00
|
|
|
}
|
|
|
|
}
|
2005-10-08 11:20:51 +00:00
|
|
|
|
2019-11-10 22:00:30 +00:00
|
|
|
shared_ptr<const io_data_t> io_chain_t::io_for_fd(int fd) const {
|
|
|
|
for (auto iter = rbegin(); iter != rend(); ++iter) {
|
|
|
|
const auto &data = *iter;
|
2016-05-02 03:32:40 +00:00
|
|
|
if (data->fd == fd) {
|
2012-08-15 07:57:56 +00:00
|
|
|
return data;
|
|
|
|
}
|
|
|
|
}
|
2019-11-10 22:00:30 +00:00
|
|
|
return nullptr;
|
2012-08-15 07:57:56 +00:00
|
|
|
}
|
2020-04-26 02:15:08 +00:00
|
|
|
|
2023-02-04 10:21:42 +00:00
|
|
|
dup2_list_t dup2_list_resolve_chain_shim(const io_chain_t &io_chain) {
|
|
|
|
ASSERT_IS_NOT_FORKED_CHILD();
|
|
|
|
std::vector<dup2_action_t> chain;
|
|
|
|
for (const auto &io_data : io_chain) {
|
|
|
|
chain.push_back(dup2_action_t{io_data->source_fd, io_data->fd});
|
|
|
|
}
|
|
|
|
return dup2_list_resolve_chain(chain);
|
|
|
|
}
|
|
|
|
|
2022-10-08 17:24:27 +00:00
|
|
|
bool output_stream_t::append_narrow_buffer(const separated_buffer_t &buffer) {
|
2020-04-26 02:15:08 +00:00
|
|
|
for (const auto &rhs_elem : buffer.elements()) {
|
2022-10-08 17:24:27 +00:00
|
|
|
if (!append_with_separation(str2wcstring(rhs_elem.contents), rhs_elem.separation, false)) {
|
|
|
|
return false;
|
|
|
|
}
|
2020-04-26 02:15:08 +00:00
|
|
|
}
|
2022-10-08 17:24:27 +00:00
|
|
|
return true;
|
2020-04-26 02:15:08 +00:00
|
|
|
}
|
2020-07-29 23:03:29 +00:00
|
|
|
|
2022-10-08 17:24:27 +00:00
|
|
|
bool output_stream_t::append_with_separation(const wchar_t *s, size_t len, separation_type_t type,
|
2021-11-22 20:08:56 +00:00
|
|
|
bool want_newline) {
|
|
|
|
if (type == separation_type_t::explicitly && want_newline) {
|
2022-09-27 14:32:49 +00:00
|
|
|
// Try calling "append" less - it might write() to an fd
|
|
|
|
wcstring buf{s, len};
|
|
|
|
buf.push_back(L'\n');
|
2022-10-08 17:24:27 +00:00
|
|
|
return append(buf);
|
2022-09-27 14:32:49 +00:00
|
|
|
} else {
|
2022-10-08 17:24:27 +00:00
|
|
|
return append(s, len);
|
2020-07-29 23:03:29 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-02-04 23:18:34 +00:00
|
|
|
const wcstring &output_stream_t::contents() const { return g_empty_string; }
|
|
|
|
|
2021-04-03 22:36:04 +00:00
|
|
|
int output_stream_t::flush_and_check_error() { return STATUS_CMD_OK; }
|
|
|
|
|
2023-01-14 22:56:24 +00:00
|
|
|
fd_output_stream_t::fd_output_stream_t(int fd) : fd_(fd), sigcheck_(topic_t::sighupint) {
|
|
|
|
assert(fd_ >= 0 && "Invalid fd");
|
|
|
|
}
|
|
|
|
|
2022-10-08 17:24:27 +00:00
|
|
|
bool fd_output_stream_t::append(const wchar_t *s, size_t amt) {
|
|
|
|
if (errored_) return false;
|
2020-07-29 23:03:29 +00:00
|
|
|
int res = wwrite_to_fd(s, amt, this->fd_);
|
|
|
|
if (res < 0) {
|
2022-10-08 17:26:29 +00:00
|
|
|
// Some of our builtins emit multiple screens worth of data sent to a pager (the primary
|
|
|
|
// example being the `history` builtin) and receiving SIGINT should be considered normal and
|
|
|
|
// non-exceptional (user request to abort via Ctrl-C), meaning we shouldn't print an error.
|
|
|
|
if (errno == EINTR && sigcheck_.check()) {
|
|
|
|
// We have two options here: we can either return false without setting errored_ to
|
|
|
|
// true (*this* write will be silently aborted but the onus is on the caller to check
|
|
|
|
// the return value and skip future calls to `append()`) or we can flag the entire
|
|
|
|
// output stream as errored, causing us to both return false and skip any future writes.
|
|
|
|
// We're currently going with the latter, especially seeing as no callers currently
|
|
|
|
// check the result of `append()` (since it was always a void function before).
|
|
|
|
} else if (errno != EPIPE) {
|
2021-04-13 08:38:17 +00:00
|
|
|
wperror(L"write");
|
|
|
|
}
|
2020-07-29 23:03:29 +00:00
|
|
|
errored_ = true;
|
|
|
|
}
|
2022-10-08 17:24:27 +00:00
|
|
|
return !errored_;
|
2020-07-29 23:03:29 +00:00
|
|
|
}
|
|
|
|
|
2021-04-03 22:36:04 +00:00
|
|
|
int fd_output_stream_t::flush_and_check_error() {
|
|
|
|
// Return a generic 1 on any write failure.
|
|
|
|
return errored_ ? STATUS_CMD_ERROR : STATUS_CMD_OK;
|
|
|
|
}
|
|
|
|
|
2022-10-08 17:24:27 +00:00
|
|
|
bool null_output_stream_t::append(const wchar_t *, size_t) { return true; }
|
2020-07-29 23:03:29 +00:00
|
|
|
|
2023-05-21 03:04:26 +00:00
|
|
|
std::unique_ptr<io_streams_t> make_null_io_streams_ffi() {
|
|
|
|
// Temporary test helper.
|
|
|
|
static null_output_stream_t *null = new null_output_stream_t();
|
|
|
|
return std::make_unique<io_streams_t>(*null, *null);
|
|
|
|
}
|
|
|
|
|
2022-10-08 17:24:27 +00:00
|
|
|
bool string_output_stream_t::append(const wchar_t *s, size_t amt) {
|
|
|
|
contents_.append(s, amt);
|
|
|
|
return true;
|
|
|
|
}
|
2021-02-04 22:12:14 +00:00
|
|
|
|
2021-02-04 23:18:34 +00:00
|
|
|
const wcstring &string_output_stream_t::contents() const { return contents_; }
|
|
|
|
|
2022-10-08 17:24:27 +00:00
|
|
|
bool buffered_output_stream_t::append(const wchar_t *s, size_t amt) {
|
|
|
|
return buffer_->append(wcs2string(s, amt));
|
2021-02-04 23:18:34 +00:00
|
|
|
}
|
2020-07-29 23:03:29 +00:00
|
|
|
|
2022-10-08 17:24:27 +00:00
|
|
|
bool buffered_output_stream_t::append_with_separation(const wchar_t *s, size_t len,
|
2021-11-22 20:08:56 +00:00
|
|
|
separation_type_t type, bool want_newline) {
|
|
|
|
UNUSED(want_newline);
|
2022-10-08 17:24:27 +00:00
|
|
|
return buffer_->append(wcs2string(s, len), type);
|
2020-07-29 23:03:29 +00:00
|
|
|
}
|
2021-02-04 23:18:34 +00:00
|
|
|
|
2021-04-03 22:36:04 +00:00
|
|
|
int buffered_output_stream_t::flush_and_check_error() {
|
|
|
|
if (buffer_->discarded()) {
|
|
|
|
return STATUS_READ_TOO_MUCH;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|