2016-05-02 03:32:40 +00:00
|
|
|
// Utilities for io redirection.
|
2016-05-18 22:30:21 +00:00
|
|
|
#include "config.h" // IWYU pragma: keep
|
|
|
|
|
2019-10-13 22:50:48 +00:00
|
|
|
#include "io.h"
|
|
|
|
|
2016-05-02 03:32:40 +00:00
|
|
|
#include <errno.h>
|
2019-12-13 01:27:48 +00:00
|
|
|
#include <fcntl.h>
|
2016-04-21 06:00:54 +00:00
|
|
|
#include <stddef.h>
|
2016-05-02 03:32:40 +00:00
|
|
|
#include <stdio.h>
|
|
|
|
#include <unistd.h>
|
2019-10-13 22:50:48 +00:00
|
|
|
|
2019-05-05 10:09:25 +00:00
|
|
|
#include <cstring>
|
2019-03-12 21:06:01 +00:00
|
|
|
#include <cwchar>
|
2006-02-28 13:17:16 +00:00
|
|
|
|
2005-10-08 11:20:51 +00:00
|
|
|
#include "common.h"
|
2016-05-02 03:32:40 +00:00
|
|
|
#include "exec.h"
|
|
|
|
#include "fallback.h" // IWYU pragma: keep
|
2019-02-01 09:58:06 +00:00
|
|
|
#include "iothread.h"
|
2019-12-13 01:27:48 +00:00
|
|
|
#include "path.h"
|
2019-02-13 23:17:07 +00:00
|
|
|
#include "redirection.h"
|
2016-05-02 03:32:40 +00:00
|
|
|
#include "wutil.h" // IWYU pragma: keep
|
2006-07-19 22:55:49 +00:00
|
|
|
|
2019-12-13 01:27:48 +00:00
|
|
|
/// File redirection error message.
|
|
|
|
#define FILE_ERROR _(L"An error occurred while redirecting file '%ls'")
|
|
|
|
#define NOCLOB_ERROR _(L"The file '%ls' already exists")
|
|
|
|
|
|
|
|
/// Base open mode to pass to calls to open.
|
|
|
|
#define OPEN_MASK 0666
|
|
|
|
|
2018-02-19 02:44:58 +00:00
|
|
|
io_data_t::~io_data_t() = default;
|
2013-01-09 07:49:12 +00:00
|
|
|
|
2019-12-20 22:47:54 +00:00
|
|
|
io_file_t::io_file_t(int f, autoclose_fd_t file)
|
|
|
|
: io_data_t(io_mode_t::file, f), file_fd_(std::move(file)) {
|
2019-12-13 01:27:48 +00:00
|
|
|
assert(file_fd_.valid() && "File is not valid");
|
|
|
|
}
|
|
|
|
|
2019-03-12 21:06:01 +00:00
|
|
|
void io_close_t::print() const { std::fwprintf(stderr, L"close %d\n", fd); }
|
2013-01-09 08:02:04 +00:00
|
|
|
|
2019-03-12 21:06:01 +00:00
|
|
|
void io_fd_t::print() const { std::fwprintf(stderr, L"FD map %d -> %d\n", old_fd, fd); }
|
2013-01-15 07:37:33 +00:00
|
|
|
|
2019-12-13 01:27:48 +00:00
|
|
|
void io_file_t::print() const { std::fwprintf(stderr, L"file (%d)\n", file_fd_.fd()); }
|
2013-01-15 08:18:03 +00:00
|
|
|
|
2016-05-02 03:32:40 +00:00
|
|
|
void io_pipe_t::print() const {
|
2019-03-12 21:06:01 +00:00
|
|
|
std::fwprintf(stderr, L"pipe {%d} (input: %s)\n", pipe_fd(), is_input_ ? "yes" : "no");
|
2013-01-15 09:31:36 +00:00
|
|
|
}
|
|
|
|
|
2019-03-12 21:06:01 +00:00
|
|
|
void io_bufferfill_t::print() const { std::fwprintf(stderr, L"bufferfill {%d}\n", write_fd_.fd()); }
|
2013-01-15 08:44:31 +00:00
|
|
|
|
2018-05-28 06:56:20 +00:00
|
|
|
void io_buffer_t::append_from_stream(const output_stream_t &stream) {
|
2019-07-21 20:53:05 +00:00
|
|
|
const separated_buffer_t<wcstring> &input = stream.buffer();
|
|
|
|
if (input.elements().empty()) return;
|
2019-02-01 09:58:06 +00:00
|
|
|
scoped_lock locker(append_lock_);
|
2018-05-28 08:27:26 +00:00
|
|
|
if (buffer_.discarded()) return;
|
2019-07-21 20:53:05 +00:00
|
|
|
if (input.discarded()) {
|
2018-05-28 08:27:26 +00:00
|
|
|
buffer_.set_discard();
|
2018-05-28 06:56:20 +00:00
|
|
|
return;
|
|
|
|
}
|
2019-07-21 20:53:05 +00:00
|
|
|
buffer_.append_wide_buffer(input);
|
2018-05-28 06:56:20 +00:00
|
|
|
}
|
|
|
|
|
2019-02-01 09:58:06 +00:00
|
|
|
void io_buffer_t::run_background_fillthread(autoclose_fd_t readfd) {
|
|
|
|
// Here we are running the background fillthread, executing in a background thread.
|
|
|
|
// Our plan is:
|
|
|
|
// 1. poll via select() until the fd is readable.
|
|
|
|
// 2. Acquire the append lock.
|
|
|
|
// 3. read until EAGAIN (would block), appending
|
|
|
|
// 4. release the lock
|
|
|
|
// The purpose of holding the lock around the read calls is to ensure that data from background
|
2019-05-05 10:09:25 +00:00
|
|
|
// processes isn't weirdly interspersed with data directly transferred (from a builtin to a
|
|
|
|
// buffer).
|
2019-02-01 09:58:06 +00:00
|
|
|
|
|
|
|
const int fd = readfd.fd();
|
|
|
|
|
|
|
|
// 100 msec poll rate. Note that in most cases, the write end of the pipe will be closed so
|
|
|
|
// select() will return; the polling is important only for weird cases like a background process
|
|
|
|
// launched in a command substitution.
|
|
|
|
const long poll_timeout_usec = 100000;
|
|
|
|
struct timeval tv = {};
|
|
|
|
tv.tv_usec = poll_timeout_usec;
|
|
|
|
|
|
|
|
bool shutdown = false;
|
|
|
|
while (!shutdown) {
|
|
|
|
bool readable = false;
|
|
|
|
|
|
|
|
// Poll if our fd is readable.
|
|
|
|
// Do this even if the shutdown flag is set. It's important we wait for the fd at least
|
|
|
|
// once. For short-lived processes, it's possible for the process to execute, produce output
|
|
|
|
// (fits in the pipe buffer) and be reaped before we are even scheduled. So always wait at
|
|
|
|
// least once on the fd. Note that doesn't mean we will wait for the full poll duration;
|
|
|
|
// typically what will happen is our pipe will be widowed and so this will return quickly.
|
|
|
|
// It's only for weird cases (e.g. a background process launched inside a command
|
|
|
|
// substitution) that we'll wait out the entire poll time.
|
|
|
|
fd_set fds;
|
|
|
|
FD_ZERO(&fds);
|
|
|
|
FD_SET(fd, &fds);
|
2019-11-19 02:34:50 +00:00
|
|
|
int ret = select(fd + 1, &fds, nullptr, nullptr, &tv);
|
2019-04-10 02:09:27 +00:00
|
|
|
// select(2) is allowed to (and does) update `tv` to indicate how much time was left, so we
|
|
|
|
// need to restore the desired value each time.
|
|
|
|
tv.tv_usec = poll_timeout_usec;
|
2019-02-01 09:58:06 +00:00
|
|
|
readable = ret > 0;
|
|
|
|
if (ret < 0 && errno != EINTR) {
|
|
|
|
// Surprising error.
|
|
|
|
wperror(L"select");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Only check the shutdown flag if we timed out.
|
|
|
|
// It's important that if select() indicated we were readable, that we call select() again
|
|
|
|
// allowing it to time out. Note the typical case is that the fd will be closed, in which
|
|
|
|
// case select will return immediately.
|
2019-05-05 10:09:25 +00:00
|
|
|
if (!readable) {
|
2019-11-23 22:11:07 +00:00
|
|
|
shutdown = this->shutdown_fillthread_;
|
2019-02-01 09:58:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (readable || shutdown) {
|
|
|
|
// Now either our fd is readable, or we have set the shutdown flag.
|
|
|
|
// Either way acquire the lock and read until we reach EOF, or EAGAIN / EINTR.
|
|
|
|
scoped_lock locker(append_lock_);
|
|
|
|
ssize_t ret;
|
|
|
|
do {
|
2019-02-13 19:15:09 +00:00
|
|
|
errno = 0;
|
2019-02-12 23:50:43 +00:00
|
|
|
char buff[4096];
|
2019-02-01 09:58:06 +00:00
|
|
|
ret = read(fd, buff, sizeof buff);
|
|
|
|
if (ret > 0) {
|
|
|
|
buffer_.append(&buff[0], &buff[ret]);
|
|
|
|
} else if (ret == 0) {
|
|
|
|
shutdown = true;
|
2019-02-13 19:15:09 +00:00
|
|
|
} else if (ret == -1 && errno == 0) {
|
|
|
|
// No specific error. We assume we just return,
|
|
|
|
// since that's what we do in read_blocked.
|
|
|
|
return;
|
2019-02-01 09:58:06 +00:00
|
|
|
} else if (errno != EINTR && errno != EAGAIN) {
|
|
|
|
wperror(L"read");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
} while (ret > 0);
|
|
|
|
}
|
2012-11-18 10:23:22 +00:00
|
|
|
}
|
2019-02-01 09:58:06 +00:00
|
|
|
assert(shutdown && "Should only exit loop if shutdown flag is set");
|
2005-10-08 11:20:51 +00:00
|
|
|
}
|
|
|
|
|
2019-02-01 09:58:06 +00:00
|
|
|
void io_buffer_t::begin_background_fillthread(autoclose_fd_t fd) {
|
|
|
|
ASSERT_IS_MAIN_THREAD();
|
2019-11-23 23:52:53 +00:00
|
|
|
assert(!fillthread_running() && "Already have a fillthread");
|
2019-02-01 09:58:06 +00:00
|
|
|
|
|
|
|
// We want our background thread to own the fd but it's not easy to move into a std::function.
|
|
|
|
// Use a shared_ptr.
|
2019-02-13 23:17:07 +00:00
|
|
|
auto fdref = move_to_sharedptr(std::move(fd));
|
2019-02-01 09:58:06 +00:00
|
|
|
|
2019-11-23 23:52:53 +00:00
|
|
|
// Construct a promise that can go into our background thread.
|
|
|
|
auto promise = std::make_shared<std::promise<void>>();
|
|
|
|
|
|
|
|
// Get the future associated with our promise.
|
|
|
|
// Note this should only ever be called once.
|
|
|
|
fillthread_waiter_ = promise->get_future();
|
|
|
|
|
|
|
|
// Run our function to read until the receiver is closed.
|
|
|
|
// It's OK to capture 'this' by value because 'this' owns the background thread and waits for it
|
2019-02-01 09:58:06 +00:00
|
|
|
// before dtor.
|
2019-11-23 23:52:53 +00:00
|
|
|
iothread_perform([this, promise, fdref]() {
|
2019-02-01 09:58:06 +00:00
|
|
|
this->run_background_fillthread(std::move(*fdref));
|
2019-11-23 23:52:53 +00:00
|
|
|
promise->set_value();
|
|
|
|
});
|
2015-01-08 02:07:06 +00:00
|
|
|
}
|
2005-10-08 11:20:51 +00:00
|
|
|
|
2019-02-01 09:58:06 +00:00
|
|
|
void io_buffer_t::complete_background_fillthread() {
|
|
|
|
ASSERT_IS_MAIN_THREAD();
|
2019-11-23 23:52:53 +00:00
|
|
|
assert(fillthread_running() && "Should have a fillthread");
|
2019-11-23 22:11:07 +00:00
|
|
|
shutdown_fillthread_ = true;
|
2019-11-23 23:52:53 +00:00
|
|
|
|
|
|
|
// Wait for the fillthread to fulfill its promise, and then clear the future so we know we no
|
|
|
|
// longer have one.
|
|
|
|
fillthread_waiter_.wait();
|
|
|
|
fillthread_waiter_ = {};
|
2019-02-01 00:05:42 +00:00
|
|
|
}
|
2012-11-19 00:30:30 +00:00
|
|
|
|
2019-12-12 22:42:03 +00:00
|
|
|
shared_ptr<io_bufferfill_t> io_bufferfill_t::create(const fd_set_t &conflicts,
|
2019-02-01 00:05:42 +00:00
|
|
|
size_t buffer_limit) {
|
|
|
|
// Construct our pipes.
|
|
|
|
auto pipes = make_autoclose_pipes(conflicts);
|
|
|
|
if (!pipes) {
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
// Our buffer will read from the read end of the pipe. This end must be non-blocking. This is
|
2019-02-01 09:58:06 +00:00
|
|
|
// because our fillthread needs to poll to decide if it should shut down, and also accept input
|
|
|
|
// from direct buffer transfers.
|
2019-02-01 00:05:42 +00:00
|
|
|
if (make_fd_nonblocking(pipes->read.fd())) {
|
2012-11-19 00:30:30 +00:00
|
|
|
debug(1, PIPE_ERROR);
|
|
|
|
wperror(L"fcntl");
|
2019-02-01 00:05:42 +00:00
|
|
|
return nullptr;
|
2012-11-19 00:30:30 +00:00
|
|
|
}
|
2019-02-01 09:58:06 +00:00
|
|
|
// Our fillthread gets the read end of the pipe; out_pipe gets the write end.
|
|
|
|
auto buffer = std::make_shared<io_buffer_t>(buffer_limit);
|
|
|
|
buffer->begin_background_fillthread(std::move(pipes->read));
|
2019-02-01 00:05:42 +00:00
|
|
|
return std::make_shared<io_bufferfill_t>(std::move(pipes->write), buffer);
|
2019-02-01 09:58:06 +00:00
|
|
|
}
|
|
|
|
|
2019-02-01 09:58:06 +00:00
|
|
|
std::shared_ptr<io_buffer_t> io_bufferfill_t::finish(std::shared_ptr<io_bufferfill_t> &&filler) {
|
|
|
|
// The io filler is passed in. This typically holds the only instance of the write side of the
|
|
|
|
// pipe used by the buffer's fillthread (except for that side held by other processes). Get the
|
|
|
|
// buffer out of the bufferfill and clear the shared_ptr; this will typically widow the pipe.
|
|
|
|
// Then allow the buffer to finish.
|
|
|
|
assert(filler && "Null pointer in finish");
|
|
|
|
auto buffer = filler->buffer();
|
|
|
|
filler.reset();
|
|
|
|
buffer->complete_background_fillthread();
|
|
|
|
return buffer;
|
|
|
|
}
|
|
|
|
|
2019-02-01 00:05:42 +00:00
|
|
|
io_pipe_t::~io_pipe_t() = default;
|
2019-12-13 01:37:36 +00:00
|
|
|
io_fd_t::~io_fd_t() = default;
|
|
|
|
io_close_t::~io_close_t() = default;
|
2019-12-13 01:27:48 +00:00
|
|
|
io_file_t::~io_file_t() = default;
|
2019-02-01 00:05:42 +00:00
|
|
|
io_bufferfill_t::~io_bufferfill_t() = default;
|
|
|
|
|
2019-02-01 09:58:06 +00:00
|
|
|
io_buffer_t::~io_buffer_t() {
|
2019-11-23 23:52:53 +00:00
|
|
|
assert(!fillthread_running() && "io_buffer_t destroyed with outstanding fillthread");
|
2019-02-01 09:58:06 +00:00
|
|
|
}
|
2005-10-08 11:20:51 +00:00
|
|
|
|
2016-05-02 03:32:40 +00:00
|
|
|
void io_chain_t::remove(const shared_ptr<const io_data_t> &element) {
|
|
|
|
// See if you can guess why std::find doesn't work here.
|
|
|
|
for (io_chain_t::iterator iter = this->begin(); iter != this->end(); ++iter) {
|
|
|
|
if (*iter == element) {
|
2012-08-15 07:57:56 +00:00
|
|
|
this->erase(iter);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2005-10-08 11:20:51 +00:00
|
|
|
|
2019-11-10 22:00:30 +00:00
|
|
|
void io_chain_t::push_back(io_data_ref_t element) {
|
2016-05-02 03:32:40 +00:00
|
|
|
// Ensure we never push back NULL.
|
2019-02-01 02:49:52 +00:00
|
|
|
assert(element.get() != nullptr);
|
2019-11-10 22:00:30 +00:00
|
|
|
std::vector<io_data_ref_t>::push_back(std::move(element));
|
2013-01-30 10:22:38 +00:00
|
|
|
}
|
|
|
|
|
2016-05-02 03:32:40 +00:00
|
|
|
void io_chain_t::append(const io_chain_t &chain) {
|
2019-11-10 21:31:47 +00:00
|
|
|
assert(&chain != this && "Cannot append self to self");
|
Big fat refactoring of how redirections work. In fish 1.x and 2.0.0, the redirections for a process were flattened into a big list associated with the job, so there was no way to tell which redirections applied to each process. Each process therefore got all the redirections associated with the job. See https://github.com/fish-shell/fish-shell/issues/877 for how this could manifest.
With this change, jobs only track their block-level redirections. Process level redirections are correctly associated with the process, and at exec time we stitch them together (block, pipe, and process redirects).
This fixes the weird issues where redirects bleed across pipelines (like #877), and also allows us to play with the order in which redirections are applied, since the final list is constructed right before it's needed. This lets us put pipes after block level redirections but before process level redirections, so that a 2>&1-type redirection gets picked up after the pipe, i.e. it should fix https://github.com/fish-shell/fish-shell/issues/110
This is a significant change. The tests all pass. Cross your fingers.
2013-08-19 23:16:41 +00:00
|
|
|
this->insert(this->end(), chain.begin(), chain.end());
|
|
|
|
}
|
|
|
|
|
2019-12-13 01:27:48 +00:00
|
|
|
bool io_chain_t::append_from_specs(const redirection_spec_list_t &specs, const wcstring &pwd) {
|
2019-12-13 00:44:24 +00:00
|
|
|
for (const auto &spec : specs) {
|
|
|
|
switch (spec.mode) {
|
|
|
|
case redirection_mode_t::fd: {
|
|
|
|
if (spec.is_close()) {
|
|
|
|
this->push_back(make_unique<io_close_t>(spec.fd));
|
|
|
|
} else {
|
|
|
|
auto target_fd = spec.get_target_as_fd();
|
|
|
|
assert(target_fd.has_value() &&
|
|
|
|
"fd redirection should have been validated already");
|
2019-12-19 22:14:23 +00:00
|
|
|
this->push_back(make_unique<io_fd_t>(spec.fd, *target_fd));
|
2019-12-13 00:44:24 +00:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
default: {
|
2019-12-13 01:27:48 +00:00
|
|
|
// We have a path-based redireciton. Resolve it to a file.
|
2019-12-14 00:16:19 +00:00
|
|
|
// Mark it as CLO_EXEC because we don't want it to be open in any child.
|
2019-12-13 01:27:48 +00:00
|
|
|
wcstring path = path_apply_working_directory(spec.target, pwd);
|
|
|
|
int oflags = spec.oflags();
|
2019-12-14 00:16:19 +00:00
|
|
|
autoclose_fd_t file{wopen_cloexec(path, oflags, OPEN_MASK)};
|
2019-12-13 01:27:48 +00:00
|
|
|
if (!file.valid()) {
|
|
|
|
if ((oflags & O_EXCL) && (errno == EEXIST)) {
|
|
|
|
debug(1, NOCLOB_ERROR, spec.target.c_str());
|
|
|
|
} else {
|
|
|
|
debug(1, FILE_ERROR, spec.target.c_str());
|
|
|
|
if (should_debug(1)) wperror(L"open");
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
2019-12-20 22:47:54 +00:00
|
|
|
this->push_back(std::make_shared<io_file_t>(spec.fd, std::move(file)));
|
2019-12-13 00:44:24 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-12-11 01:05:17 +00:00
|
|
|
void io_chain_t::print() const {
|
|
|
|
if (this->empty()) {
|
|
|
|
std::fwprintf(stderr, L"Empty chain %p\n", this);
|
2012-08-15 07:57:56 +00:00
|
|
|
return;
|
|
|
|
}
|
2012-11-18 10:23:22 +00:00
|
|
|
|
2019-12-11 01:05:17 +00:00
|
|
|
std::fwprintf(stderr, L"Chain %p (%ld items):\n", this, (long)this->size());
|
|
|
|
for (size_t i = 0; i < this->size(); i++) {
|
|
|
|
const auto &io = this->at(i);
|
2019-12-11 01:21:03 +00:00
|
|
|
if (io.get() == nullptr) {
|
2019-03-12 21:06:01 +00:00
|
|
|
std::fwprintf(stderr, L"\t(null)\n");
|
2019-12-11 01:21:03 +00:00
|
|
|
} else {
|
2019-03-12 21:06:01 +00:00
|
|
|
std::fwprintf(stderr, L"\t%lu: fd:%d, ", (unsigned long)i, io->fd);
|
2013-02-22 21:20:27 +00:00
|
|
|
io->print();
|
|
|
|
}
|
2012-08-15 07:57:56 +00:00
|
|
|
}
|
|
|
|
}
|
2005-10-08 11:20:51 +00:00
|
|
|
|
2019-12-12 22:42:03 +00:00
|
|
|
fd_set_t io_chain_t::fd_set() const {
|
|
|
|
fd_set_t result;
|
|
|
|
for (const auto &io : *this) {
|
|
|
|
result.add(io->fd);
|
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
autoclose_fd_t move_fd_to_unused(autoclose_fd_t fd, const fd_set_t &fdset, bool cloexec) {
|
|
|
|
if (!fd.valid() || !fdset.contains(fd.fd())) {
|
2016-10-31 04:18:59 +00:00
|
|
|
return fd;
|
|
|
|
}
|
2016-05-02 03:32:40 +00:00
|
|
|
|
2016-10-31 04:18:59 +00:00
|
|
|
// We have fd >= 0, and it's a conflict. dup it and recurse. Note that we recurse before
|
|
|
|
// anything is closed; this forces the kernel to give us a new one (or report fd exhaustion).
|
|
|
|
int tmp_fd;
|
|
|
|
do {
|
2019-12-12 22:42:03 +00:00
|
|
|
tmp_fd = dup(fd.fd());
|
2016-10-31 04:18:59 +00:00
|
|
|
} while (tmp_fd < 0 && errno == EINTR);
|
|
|
|
|
2019-12-12 22:42:03 +00:00
|
|
|
assert(tmp_fd != fd.fd());
|
2016-10-31 04:18:59 +00:00
|
|
|
if (tmp_fd < 0) {
|
|
|
|
// Likely fd exhaustion.
|
2019-12-12 22:42:03 +00:00
|
|
|
return autoclose_fd_t{};
|
2015-01-08 02:07:06 +00:00
|
|
|
}
|
2019-12-12 22:42:03 +00:00
|
|
|
// Ok, we have a new candidate fd. Recurse.
|
|
|
|
if (cloexec) set_cloexec(tmp_fd);
|
|
|
|
return move_fd_to_unused(autoclose_fd_t{tmp_fd}, fdset, cloexec);
|
2015-01-08 02:07:06 +00:00
|
|
|
}
|
|
|
|
|
2019-12-12 22:42:03 +00:00
|
|
|
maybe_t<autoclose_pipes_t> make_autoclose_pipes(const fd_set_t &fdset) {
|
2019-02-01 00:05:42 +00:00
|
|
|
int pipes[2] = {-1, -1};
|
|
|
|
|
|
|
|
if (pipe(pipes) < 0) {
|
|
|
|
debug(1, PIPE_ERROR);
|
|
|
|
wperror(L"pipe");
|
|
|
|
return none();
|
|
|
|
}
|
|
|
|
set_cloexec(pipes[0]);
|
|
|
|
set_cloexec(pipes[1]);
|
|
|
|
|
2019-12-12 22:42:03 +00:00
|
|
|
auto read = move_fd_to_unused(autoclose_fd_t{pipes[0]}, fdset, true);
|
|
|
|
if (!read.valid()) return none();
|
|
|
|
|
|
|
|
auto write = move_fd_to_unused(autoclose_fd_t{pipes[1]}, fdset, true);
|
|
|
|
if (!write.valid()) return none();
|
|
|
|
|
|
|
|
return autoclose_pipes_t(std::move(read), std::move(write));
|
2019-02-01 00:05:42 +00:00
|
|
|
}
|
|
|
|
|
2019-11-10 22:00:30 +00:00
|
|
|
shared_ptr<const io_data_t> io_chain_t::io_for_fd(int fd) const {
|
|
|
|
for (auto iter = rbegin(); iter != rend(); ++iter) {
|
|
|
|
const auto &data = *iter;
|
2016-05-02 03:32:40 +00:00
|
|
|
if (data->fd == fd) {
|
2012-08-15 07:57:56 +00:00
|
|
|
return data;
|
|
|
|
}
|
|
|
|
}
|
2019-11-10 22:00:30 +00:00
|
|
|
return nullptr;
|
2012-08-15 07:57:56 +00:00
|
|
|
}
|