2016-05-02 03:32:40 +00:00
|
|
|
// Utilities for io redirection.
|
2016-05-18 22:30:21 +00:00
|
|
|
#include "config.h" // IWYU pragma: keep
|
|
|
|
|
2019-10-13 22:50:48 +00:00
|
|
|
#include "io.h"
|
|
|
|
|
2016-05-02 03:32:40 +00:00
|
|
|
#include <errno.h>
|
2019-12-13 01:27:48 +00:00
|
|
|
#include <fcntl.h>
|
2016-04-21 06:00:54 +00:00
|
|
|
#include <stddef.h>
|
2016-05-02 03:32:40 +00:00
|
|
|
#include <stdio.h>
|
|
|
|
#include <unistd.h>
|
2019-10-13 22:50:48 +00:00
|
|
|
|
2019-05-05 10:09:25 +00:00
|
|
|
#include <cstring>
|
2019-03-12 21:06:01 +00:00
|
|
|
#include <cwchar>
|
2006-02-28 13:17:16 +00:00
|
|
|
|
2005-10-08 11:20:51 +00:00
|
|
|
#include "common.h"
|
2016-05-02 03:32:40 +00:00
|
|
|
#include "exec.h"
|
|
|
|
#include "fallback.h" // IWYU pragma: keep
|
2020-02-05 01:49:07 +00:00
|
|
|
#include "fd_monitor.h"
|
2019-02-01 09:58:06 +00:00
|
|
|
#include "iothread.h"
|
2019-12-13 01:27:48 +00:00
|
|
|
#include "path.h"
|
2019-02-13 23:17:07 +00:00
|
|
|
#include "redirection.h"
|
2016-05-02 03:32:40 +00:00
|
|
|
#include "wutil.h" // IWYU pragma: keep
|
2006-07-19 22:55:49 +00:00
|
|
|
|
2019-12-13 01:27:48 +00:00
|
|
|
/// File redirection error message.
|
|
|
|
#define FILE_ERROR _(L"An error occurred while redirecting file '%ls'")
|
|
|
|
#define NOCLOB_ERROR _(L"The file '%ls' already exists")
|
|
|
|
|
|
|
|
/// Base open mode to pass to calls to open.
|
|
|
|
#define OPEN_MASK 0666
|
|
|
|
|
2020-02-05 01:49:07 +00:00
|
|
|
/// Provide the fd monitor used for background fillthread operations.
|
|
|
|
static fd_monitor_t &fd_monitor() {
|
|
|
|
// Deliberately leaked to avoid shutdown dtors.
|
2020-04-02 23:04:04 +00:00
|
|
|
static auto fdm = new fd_monitor_t();
|
2020-02-05 01:49:07 +00:00
|
|
|
return *fdm;
|
|
|
|
}
|
|
|
|
|
2018-02-19 02:44:58 +00:00
|
|
|
io_data_t::~io_data_t() = default;
|
2019-12-29 23:51:22 +00:00
|
|
|
io_pipe_t::~io_pipe_t() = default;
|
|
|
|
io_fd_t::~io_fd_t() = default;
|
|
|
|
io_close_t::~io_close_t() = default;
|
|
|
|
io_file_t::~io_file_t() = default;
|
|
|
|
io_bufferfill_t::~io_bufferfill_t() = default;
|
2019-12-13 01:27:48 +00:00
|
|
|
|
2019-03-12 21:06:01 +00:00
|
|
|
void io_close_t::print() const { std::fwprintf(stderr, L"close %d\n", fd); }
|
2013-01-09 08:02:04 +00:00
|
|
|
|
2019-12-29 23:14:08 +00:00
|
|
|
void io_fd_t::print() const { std::fwprintf(stderr, L"FD map %d -> %d\n", source_fd, fd); }
|
2013-01-15 07:37:33 +00:00
|
|
|
|
2020-04-26 03:25:28 +00:00
|
|
|
void io_file_t::print() const { std::fwprintf(stderr, L"file %d -> %d\n", file_fd_.fd(), fd); }
|
2013-01-15 08:18:03 +00:00
|
|
|
|
2016-05-02 03:32:40 +00:00
|
|
|
void io_pipe_t::print() const {
|
2020-04-26 03:25:28 +00:00
|
|
|
std::fwprintf(stderr, L"pipe {%d} (input: %s) -> %d\n", source_fd, is_input_ ? "yes" : "no",
|
|
|
|
fd);
|
2013-01-15 09:31:36 +00:00
|
|
|
}
|
|
|
|
|
2020-04-26 03:25:28 +00:00
|
|
|
void io_bufferfill_t::print() const {
|
|
|
|
std::fwprintf(stderr, L"bufferfill %d -> %d\n", write_fd_.fd(), fd);
|
|
|
|
}
|
2013-01-15 08:44:31 +00:00
|
|
|
|
2018-05-28 06:56:20 +00:00
|
|
|
void io_buffer_t::append_from_stream(const output_stream_t &stream) {
|
2019-07-21 20:53:05 +00:00
|
|
|
const separated_buffer_t<wcstring> &input = stream.buffer();
|
2020-01-25 00:08:56 +00:00
|
|
|
if (input.elements().empty() && !input.discarded()) return;
|
2019-02-01 09:58:06 +00:00
|
|
|
scoped_lock locker(append_lock_);
|
2018-05-28 08:27:26 +00:00
|
|
|
if (buffer_.discarded()) return;
|
2019-07-21 20:53:05 +00:00
|
|
|
if (input.discarded()) {
|
2018-05-28 08:27:26 +00:00
|
|
|
buffer_.set_discard();
|
2018-05-28 06:56:20 +00:00
|
|
|
return;
|
|
|
|
}
|
2019-07-21 20:53:05 +00:00
|
|
|
buffer_.append_wide_buffer(input);
|
2018-05-28 06:56:20 +00:00
|
|
|
}
|
|
|
|
|
2020-02-05 01:49:07 +00:00
|
|
|
ssize_t io_buffer_t::read_once(int fd) {
|
|
|
|
assert(fd >= 0 && "Invalid fd");
|
|
|
|
ASSERT_IS_LOCKED(append_lock_);
|
|
|
|
errno = 0;
|
|
|
|
char buff[4096 * 4];
|
|
|
|
|
|
|
|
// We want to swallow EINTR only; in particular EAGAIN needs to be returned back to the caller.
|
|
|
|
ssize_t ret;
|
|
|
|
do {
|
|
|
|
ret = read(fd, buff, sizeof buff);
|
|
|
|
} while (ret < 0 && errno == EINTR);
|
|
|
|
if (ret < 0 && errno != EAGAIN) {
|
|
|
|
wperror(L"read");
|
|
|
|
} else if (ret > 0) {
|
|
|
|
buffer_.append(&buff[0], &buff[ret]);
|
2012-11-18 10:23:22 +00:00
|
|
|
}
|
2020-02-05 01:49:07 +00:00
|
|
|
return ret;
|
2005-10-08 11:20:51 +00:00
|
|
|
}
|
|
|
|
|
2020-02-05 01:49:07 +00:00
|
|
|
void io_buffer_t::begin_filling(autoclose_fd_t fd) {
|
2019-02-01 09:58:06 +00:00
|
|
|
ASSERT_IS_MAIN_THREAD();
|
2019-11-23 23:52:53 +00:00
|
|
|
assert(!fillthread_running() && "Already have a fillthread");
|
2019-02-01 09:58:06 +00:00
|
|
|
|
2020-02-05 01:49:07 +00:00
|
|
|
// We want to fill buffer_ by reading from fd. fd is the read end of a pipe; the write end is
|
|
|
|
// owned by another process, or something else writing in fish.
|
|
|
|
// Pass fd to an fd_monitor. It will add fd to its select() loop, and give us a callback when
|
|
|
|
// the fd is readable, or when our timeout is hit. The usual path is that we will get called
|
|
|
|
// back, read a bit from the fd, and append it to the buffer. Eventually the write end of the
|
|
|
|
// pipe will be closed - probably the other process exited - and fd will be widowed; read() will
|
|
|
|
// then return 0 and we will stop reading.
|
|
|
|
// In exotic circumstances the write end of the pipe will not be closed; this may happen in
|
|
|
|
// e.g.:
|
|
|
|
// cmd ( background & ; echo hi )
|
|
|
|
// Here the background process will inherit the write end of the pipe and hold onto it forever.
|
|
|
|
// In this case, we will hit the timeout on waiting for more data and notice that the shutdown
|
|
|
|
// flag is set (this indicates that the command substitution is done); in this case we will read
|
|
|
|
// until we get EAGAIN and then give up.
|
2019-02-01 09:58:06 +00:00
|
|
|
|
2019-11-23 23:52:53 +00:00
|
|
|
// Construct a promise that can go into our background thread.
|
|
|
|
auto promise = std::make_shared<std::promise<void>>();
|
|
|
|
|
|
|
|
// Get the future associated with our promise.
|
|
|
|
// Note this should only ever be called once.
|
|
|
|
fillthread_waiter_ = promise->get_future();
|
|
|
|
|
2020-02-05 01:49:07 +00:00
|
|
|
// 100 msec poll rate. Note that in most cases, the write end of the pipe will be closed so
|
|
|
|
// select() will return; the polling is important only for weird cases like a background process
|
|
|
|
// launched in a command substitution.
|
|
|
|
constexpr uint64_t usec_per_msec = 1000;
|
|
|
|
uint64_t poll_usec = 100 * usec_per_msec;
|
|
|
|
|
2019-11-23 23:52:53 +00:00
|
|
|
// Run our function to read until the receiver is closed.
|
2020-02-05 01:49:07 +00:00
|
|
|
// It's OK to capture 'this' by value because 'this' waits for the promise in its dtor.
|
|
|
|
fd_monitor_item_t item;
|
|
|
|
item.fd = std::move(fd);
|
|
|
|
item.timeout_usec = poll_usec;
|
|
|
|
item.callback = [this, promise](autoclose_fd_t &fd, bool timed_out) {
|
|
|
|
ASSERT_IS_BACKGROUND_THREAD();
|
|
|
|
// Only check the shutdown flag if we timed out.
|
|
|
|
// It's important that if select() indicated we were readable, that we call select() again
|
|
|
|
// allowing it to time out. Note the typical case is that the fd will be closed, in which
|
|
|
|
// case select will return immediately.
|
|
|
|
bool done = false;
|
|
|
|
if (!timed_out) {
|
|
|
|
// select() reported us as readable; read a bit.
|
|
|
|
scoped_lock locker(append_lock_);
|
|
|
|
ssize_t ret = read_once(fd.fd());
|
|
|
|
done = (ret == 0 || (ret < 0 && errno != EAGAIN));
|
|
|
|
} else if (shutdown_fillthread_) {
|
|
|
|
// Here our caller asked us to shut down; read while we keep getting data.
|
|
|
|
// This will stop when the fd is closed or if we get EAGAIN.
|
|
|
|
scoped_lock locker(append_lock_);
|
|
|
|
ssize_t ret;
|
|
|
|
do {
|
|
|
|
ret = read_once(fd.fd());
|
|
|
|
} while (ret > 0);
|
|
|
|
done = true;
|
|
|
|
}
|
|
|
|
if (done) {
|
|
|
|
fd.close();
|
|
|
|
promise->set_value();
|
|
|
|
}
|
|
|
|
};
|
|
|
|
fd_monitor().add(std::move(item));
|
2015-01-08 02:07:06 +00:00
|
|
|
}
|
2005-10-08 11:20:51 +00:00
|
|
|
|
2019-02-01 09:58:06 +00:00
|
|
|
void io_buffer_t::complete_background_fillthread() {
|
|
|
|
ASSERT_IS_MAIN_THREAD();
|
2019-11-23 23:52:53 +00:00
|
|
|
assert(fillthread_running() && "Should have a fillthread");
|
2019-11-23 22:11:07 +00:00
|
|
|
shutdown_fillthread_ = true;
|
2019-11-23 23:52:53 +00:00
|
|
|
|
|
|
|
// Wait for the fillthread to fulfill its promise, and then clear the future so we know we no
|
|
|
|
// longer have one.
|
|
|
|
fillthread_waiter_.wait();
|
|
|
|
fillthread_waiter_ = {};
|
2019-02-01 00:05:42 +00:00
|
|
|
}
|
2012-11-19 00:30:30 +00:00
|
|
|
|
2020-04-26 02:15:08 +00:00
|
|
|
shared_ptr<io_bufferfill_t> io_bufferfill_t::create(const fd_set_t &conflicts, size_t buffer_limit,
|
|
|
|
int target) {
|
|
|
|
assert(target >= 0 && "Invalid target fd");
|
|
|
|
|
2019-02-01 00:05:42 +00:00
|
|
|
// Construct our pipes.
|
|
|
|
auto pipes = make_autoclose_pipes(conflicts);
|
|
|
|
if (!pipes) {
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
// Our buffer will read from the read end of the pipe. This end must be non-blocking. This is
|
2019-02-01 09:58:06 +00:00
|
|
|
// because our fillthread needs to poll to decide if it should shut down, and also accept input
|
|
|
|
// from direct buffer transfers.
|
2019-02-01 00:05:42 +00:00
|
|
|
if (make_fd_nonblocking(pipes->read.fd())) {
|
2020-01-19 12:38:47 +00:00
|
|
|
FLOGF(warning, PIPE_ERROR);
|
2012-11-19 00:30:30 +00:00
|
|
|
wperror(L"fcntl");
|
2019-02-01 00:05:42 +00:00
|
|
|
return nullptr;
|
2012-11-19 00:30:30 +00:00
|
|
|
}
|
2019-02-01 09:58:06 +00:00
|
|
|
// Our fillthread gets the read end of the pipe; out_pipe gets the write end.
|
|
|
|
auto buffer = std::make_shared<io_buffer_t>(buffer_limit);
|
2020-02-05 01:49:07 +00:00
|
|
|
buffer->begin_filling(std::move(pipes->read));
|
2020-04-26 02:15:08 +00:00
|
|
|
return std::make_shared<io_bufferfill_t>(target, std::move(pipes->write), buffer);
|
2019-02-01 09:58:06 +00:00
|
|
|
}
|
|
|
|
|
2019-02-01 09:58:06 +00:00
|
|
|
std::shared_ptr<io_buffer_t> io_bufferfill_t::finish(std::shared_ptr<io_bufferfill_t> &&filler) {
|
|
|
|
// The io filler is passed in. This typically holds the only instance of the write side of the
|
|
|
|
// pipe used by the buffer's fillthread (except for that side held by other processes). Get the
|
|
|
|
// buffer out of the bufferfill and clear the shared_ptr; this will typically widow the pipe.
|
|
|
|
// Then allow the buffer to finish.
|
|
|
|
assert(filler && "Null pointer in finish");
|
|
|
|
auto buffer = filler->buffer();
|
|
|
|
filler.reset();
|
|
|
|
buffer->complete_background_fillthread();
|
|
|
|
return buffer;
|
|
|
|
}
|
|
|
|
|
|
|
|
io_buffer_t::~io_buffer_t() {
|
2019-11-23 23:52:53 +00:00
|
|
|
assert(!fillthread_running() && "io_buffer_t destroyed with outstanding fillthread");
|
2019-02-01 09:58:06 +00:00
|
|
|
}
|
2005-10-08 11:20:51 +00:00
|
|
|
|
2016-05-02 03:32:40 +00:00
|
|
|
void io_chain_t::remove(const shared_ptr<const io_data_t> &element) {
|
|
|
|
// See if you can guess why std::find doesn't work here.
|
2020-04-02 23:04:04 +00:00
|
|
|
for (auto iter = this->begin(); iter != this->end(); ++iter) {
|
2016-05-02 03:32:40 +00:00
|
|
|
if (*iter == element) {
|
2012-08-15 07:57:56 +00:00
|
|
|
this->erase(iter);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2005-10-08 11:20:51 +00:00
|
|
|
|
2019-11-10 22:00:30 +00:00
|
|
|
void io_chain_t::push_back(io_data_ref_t element) {
|
2016-05-02 03:32:40 +00:00
|
|
|
// Ensure we never push back NULL.
|
2019-02-01 02:49:52 +00:00
|
|
|
assert(element.get() != nullptr);
|
2019-11-10 22:00:30 +00:00
|
|
|
std::vector<io_data_ref_t>::push_back(std::move(element));
|
2013-01-30 10:22:38 +00:00
|
|
|
}
|
|
|
|
|
2016-05-02 03:32:40 +00:00
|
|
|
void io_chain_t::append(const io_chain_t &chain) {
|
2019-11-10 21:31:47 +00:00
|
|
|
assert(&chain != this && "Cannot append self to self");
|
Big fat refactoring of how redirections work. In fish 1.x and 2.0.0, the redirections for a process were flattened into a big list associated with the job, so there was no way to tell which redirections applied to each process. Each process therefore got all the redirections associated with the job. See https://github.com/fish-shell/fish-shell/issues/877 for how this could manifest.
With this change, jobs only track their block-level redirections. Process level redirections are correctly associated with the process, and at exec time we stitch them together (block, pipe, and process redirects).
This fixes the weird issues where redirects bleed across pipelines (like #877), and also allows us to play with the order in which redirections are applied, since the final list is constructed right before it's needed. This lets us put pipes after block level redirections but before process level redirections, so that a 2>&1-type redirection gets picked up after the pipe, i.e. it should fix https://github.com/fish-shell/fish-shell/issues/110
This is a significant change. The tests all pass. Cross your fingers.
2013-08-19 23:16:41 +00:00
|
|
|
this->insert(this->end(), chain.begin(), chain.end());
|
|
|
|
}
|
|
|
|
|
2019-12-13 01:27:48 +00:00
|
|
|
bool io_chain_t::append_from_specs(const redirection_spec_list_t &specs, const wcstring &pwd) {
|
2020-05-30 15:37:41 +00:00
|
|
|
bool have_error = false;
|
2019-12-13 00:44:24 +00:00
|
|
|
for (const auto &spec : specs) {
|
|
|
|
switch (spec.mode) {
|
|
|
|
case redirection_mode_t::fd: {
|
|
|
|
if (spec.is_close()) {
|
|
|
|
this->push_back(make_unique<io_close_t>(spec.fd));
|
|
|
|
} else {
|
|
|
|
auto target_fd = spec.get_target_as_fd();
|
|
|
|
assert(target_fd.has_value() &&
|
|
|
|
"fd redirection should have been validated already");
|
2019-12-19 22:14:23 +00:00
|
|
|
this->push_back(make_unique<io_fd_t>(spec.fd, *target_fd));
|
2019-12-13 00:44:24 +00:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
default: {
|
2019-12-13 01:27:48 +00:00
|
|
|
// We have a path-based redireciton. Resolve it to a file.
|
2019-12-14 00:16:19 +00:00
|
|
|
// Mark it as CLO_EXEC because we don't want it to be open in any child.
|
2019-12-13 01:27:48 +00:00
|
|
|
wcstring path = path_apply_working_directory(spec.target, pwd);
|
|
|
|
int oflags = spec.oflags();
|
2019-12-14 00:16:19 +00:00
|
|
|
autoclose_fd_t file{wopen_cloexec(path, oflags, OPEN_MASK)};
|
2019-12-13 01:27:48 +00:00
|
|
|
if (!file.valid()) {
|
|
|
|
if ((oflags & O_EXCL) && (errno == EEXIST)) {
|
2020-01-19 12:38:47 +00:00
|
|
|
FLOGF(warning, NOCLOB_ERROR, spec.target.c_str());
|
2019-12-13 01:27:48 +00:00
|
|
|
} else {
|
2020-01-19 12:38:47 +00:00
|
|
|
FLOGF(warning, FILE_ERROR, spec.target.c_str());
|
2020-01-19 13:19:34 +00:00
|
|
|
if (should_flog(warning)) wperror(L"open");
|
2019-12-13 01:27:48 +00:00
|
|
|
}
|
2020-05-30 04:54:42 +00:00
|
|
|
// If opening a file fails, insert a closed FD instead of the file redirection
|
|
|
|
// and return false. This lets execution potentially recover and at least gives
|
|
|
|
// the shell a chance to gracefully regain control of the shell (see #7038).
|
|
|
|
this->push_back(make_unique<io_close_t>(spec.fd));
|
2020-05-30 15:37:41 +00:00
|
|
|
have_error = true;
|
|
|
|
break;
|
2019-12-13 01:27:48 +00:00
|
|
|
}
|
2019-12-20 22:47:54 +00:00
|
|
|
this->push_back(std::make_shared<io_file_t>(spec.fd, std::move(file)));
|
2019-12-13 00:44:24 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2020-05-30 15:37:41 +00:00
|
|
|
return !have_error;
|
2019-12-13 00:44:24 +00:00
|
|
|
}
|
|
|
|
|
2019-12-11 01:05:17 +00:00
|
|
|
void io_chain_t::print() const {
|
|
|
|
if (this->empty()) {
|
|
|
|
std::fwprintf(stderr, L"Empty chain %p\n", this);
|
2012-08-15 07:57:56 +00:00
|
|
|
return;
|
|
|
|
}
|
2012-11-18 10:23:22 +00:00
|
|
|
|
2020-04-08 23:56:59 +00:00
|
|
|
std::fwprintf(stderr, L"Chain %p (%ld items):\n", this, static_cast<long>(this->size()));
|
2019-12-11 01:05:17 +00:00
|
|
|
for (size_t i = 0; i < this->size(); i++) {
|
|
|
|
const auto &io = this->at(i);
|
2019-12-21 20:42:12 +00:00
|
|
|
if (io == nullptr) {
|
2019-03-12 21:06:01 +00:00
|
|
|
std::fwprintf(stderr, L"\t(null)\n");
|
2019-12-11 01:21:03 +00:00
|
|
|
} else {
|
2020-04-08 23:56:59 +00:00
|
|
|
std::fwprintf(stderr, L"\t%lu: fd:%d, ", static_cast<unsigned long>(i), io->fd);
|
2013-02-22 21:20:27 +00:00
|
|
|
io->print();
|
|
|
|
}
|
2012-08-15 07:57:56 +00:00
|
|
|
}
|
|
|
|
}
|
2005-10-08 11:20:51 +00:00
|
|
|
|
2019-12-12 22:42:03 +00:00
|
|
|
fd_set_t io_chain_t::fd_set() const {
|
|
|
|
fd_set_t result;
|
|
|
|
for (const auto &io : *this) {
|
|
|
|
result.add(io->fd);
|
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2019-12-29 22:57:16 +00:00
|
|
|
autoclose_fd_t move_fd_to_unused(autoclose_fd_t fd, const fd_set_t &fdset) {
|
2019-12-12 22:42:03 +00:00
|
|
|
if (!fd.valid() || !fdset.contains(fd.fd())) {
|
2016-10-31 04:18:59 +00:00
|
|
|
return fd;
|
|
|
|
}
|
2016-05-02 03:32:40 +00:00
|
|
|
|
2016-10-31 04:18:59 +00:00
|
|
|
// We have fd >= 0, and it's a conflict. dup it and recurse. Note that we recurse before
|
|
|
|
// anything is closed; this forces the kernel to give us a new one (or report fd exhaustion).
|
|
|
|
int tmp_fd;
|
|
|
|
do {
|
2019-12-12 22:42:03 +00:00
|
|
|
tmp_fd = dup(fd.fd());
|
2016-10-31 04:18:59 +00:00
|
|
|
} while (tmp_fd < 0 && errno == EINTR);
|
|
|
|
|
2019-12-12 22:42:03 +00:00
|
|
|
assert(tmp_fd != fd.fd());
|
2016-10-31 04:18:59 +00:00
|
|
|
if (tmp_fd < 0) {
|
|
|
|
// Likely fd exhaustion.
|
2019-12-12 22:42:03 +00:00
|
|
|
return autoclose_fd_t{};
|
2015-01-08 02:07:06 +00:00
|
|
|
}
|
2019-12-12 22:42:03 +00:00
|
|
|
// Ok, we have a new candidate fd. Recurse.
|
2019-12-29 22:57:16 +00:00
|
|
|
set_cloexec(tmp_fd);
|
|
|
|
return move_fd_to_unused(autoclose_fd_t{tmp_fd}, fdset);
|
2015-01-08 02:07:06 +00:00
|
|
|
}
|
|
|
|
|
2019-12-12 22:42:03 +00:00
|
|
|
maybe_t<autoclose_pipes_t> make_autoclose_pipes(const fd_set_t &fdset) {
|
2019-02-01 00:05:42 +00:00
|
|
|
int pipes[2] = {-1, -1};
|
|
|
|
|
|
|
|
if (pipe(pipes) < 0) {
|
2020-01-19 12:38:47 +00:00
|
|
|
FLOGF(warning, PIPE_ERROR);
|
2019-02-01 00:05:42 +00:00
|
|
|
wperror(L"pipe");
|
|
|
|
return none();
|
|
|
|
}
|
|
|
|
set_cloexec(pipes[0]);
|
|
|
|
set_cloexec(pipes[1]);
|
|
|
|
|
2019-12-29 22:57:16 +00:00
|
|
|
auto read = move_fd_to_unused(autoclose_fd_t{pipes[0]}, fdset);
|
2019-12-12 22:42:03 +00:00
|
|
|
if (!read.valid()) return none();
|
|
|
|
|
2019-12-29 22:57:16 +00:00
|
|
|
auto write = move_fd_to_unused(autoclose_fd_t{pipes[1]}, fdset);
|
2019-12-12 22:42:03 +00:00
|
|
|
if (!write.valid()) return none();
|
|
|
|
|
|
|
|
return autoclose_pipes_t(std::move(read), std::move(write));
|
2019-02-01 00:05:42 +00:00
|
|
|
}
|
|
|
|
|
2019-11-10 22:00:30 +00:00
|
|
|
shared_ptr<const io_data_t> io_chain_t::io_for_fd(int fd) const {
|
|
|
|
for (auto iter = rbegin(); iter != rend(); ++iter) {
|
|
|
|
const auto &data = *iter;
|
2016-05-02 03:32:40 +00:00
|
|
|
if (data->fd == fd) {
|
2012-08-15 07:57:56 +00:00
|
|
|
return data;
|
|
|
|
}
|
|
|
|
}
|
2019-11-10 22:00:30 +00:00
|
|
|
return nullptr;
|
2012-08-15 07:57:56 +00:00
|
|
|
}
|
2020-04-26 02:15:08 +00:00
|
|
|
|
|
|
|
void output_stream_t::append_narrow_buffer(const separated_buffer_t<std::string> &buffer) {
|
|
|
|
for (const auto &rhs_elem : buffer.elements()) {
|
|
|
|
buffer_.append(str2wcstring(rhs_elem.contents), rhs_elem.separation);
|
|
|
|
}
|
|
|
|
}
|