2016-05-02 03:32:40 +00:00
|
|
|
// Utilities for io redirection.
|
2016-05-18 22:30:21 +00:00
|
|
|
#include "config.h" // IWYU pragma: keep
|
|
|
|
|
2016-05-02 03:32:40 +00:00
|
|
|
#include <errno.h>
|
2016-04-21 06:00:54 +00:00
|
|
|
#include <stddef.h>
|
2016-05-02 03:32:40 +00:00
|
|
|
#include <stdio.h>
|
2019-03-12 22:07:07 +00:00
|
|
|
#include <cstring>
|
2016-05-02 03:32:40 +00:00
|
|
|
#include <unistd.h>
|
2019-03-12 21:06:01 +00:00
|
|
|
#include <cwchar>
|
2006-02-28 13:17:16 +00:00
|
|
|
|
2005-10-08 11:20:51 +00:00
|
|
|
#include "common.h"
|
2016-05-02 03:32:40 +00:00
|
|
|
#include "exec.h"
|
|
|
|
#include "fallback.h" // IWYU pragma: keep
|
2005-10-08 11:20:51 +00:00
|
|
|
#include "io.h"
|
2019-02-01 09:58:06 +00:00
|
|
|
#include "iothread.h"
|
2019-02-13 23:17:07 +00:00
|
|
|
#include "redirection.h"
|
2016-05-02 03:32:40 +00:00
|
|
|
#include "wutil.h" // IWYU pragma: keep
|
2006-07-19 22:55:49 +00:00
|
|
|
|
2018-02-19 02:44:58 +00:00
|
|
|
io_data_t::~io_data_t() = default;
|
2013-01-09 07:49:12 +00:00
|
|
|
|
2019-03-12 21:06:01 +00:00
|
|
|
void io_close_t::print() const { std::fwprintf(stderr, L"close %d\n", fd); }
|
2013-01-09 08:02:04 +00:00
|
|
|
|
2019-03-12 21:06:01 +00:00
|
|
|
void io_fd_t::print() const { std::fwprintf(stderr, L"FD map %d -> %d\n", old_fd, fd); }
|
2013-01-15 07:37:33 +00:00
|
|
|
|
2019-03-12 21:06:01 +00:00
|
|
|
void io_file_t::print() const { std::fwprintf(stderr, L"file (%s)\n", filename_cstr); }
|
2013-01-15 08:18:03 +00:00
|
|
|
|
2016-05-02 03:32:40 +00:00
|
|
|
void io_pipe_t::print() const {
|
2019-03-12 21:06:01 +00:00
|
|
|
std::fwprintf(stderr, L"pipe {%d} (input: %s)\n", pipe_fd(), is_input_ ? "yes" : "no");
|
2013-01-15 09:31:36 +00:00
|
|
|
}
|
|
|
|
|
2019-03-12 21:06:01 +00:00
|
|
|
void io_bufferfill_t::print() const { std::fwprintf(stderr, L"bufferfill {%d}\n", write_fd_.fd()); }
|
2013-01-15 08:44:31 +00:00
|
|
|
|
2018-05-28 06:56:20 +00:00
|
|
|
void io_buffer_t::append_from_stream(const output_stream_t &stream) {
|
2019-02-17 22:16:47 +00:00
|
|
|
if (stream.empty()) return;
|
2019-02-01 09:58:06 +00:00
|
|
|
scoped_lock locker(append_lock_);
|
2018-05-28 08:27:26 +00:00
|
|
|
if (buffer_.discarded()) return;
|
2018-05-30 04:11:34 +00:00
|
|
|
if (stream.buffer().discarded()) {
|
2018-05-28 08:27:26 +00:00
|
|
|
buffer_.set_discard();
|
2018-05-28 06:56:20 +00:00
|
|
|
return;
|
|
|
|
}
|
2018-05-28 08:27:26 +00:00
|
|
|
buffer_.append_wide_buffer(stream.buffer());
|
2018-05-28 06:56:20 +00:00
|
|
|
}
|
|
|
|
|
2019-02-01 09:58:06 +00:00
|
|
|
void io_buffer_t::run_background_fillthread(autoclose_fd_t readfd) {
|
|
|
|
// Here we are running the background fillthread, executing in a background thread.
|
|
|
|
// Our plan is:
|
|
|
|
// 1. poll via select() until the fd is readable.
|
|
|
|
// 2. Acquire the append lock.
|
|
|
|
// 3. read until EAGAIN (would block), appending
|
|
|
|
// 4. release the lock
|
|
|
|
// The purpose of holding the lock around the read calls is to ensure that data from background
|
|
|
|
// processes isn't weirdly interspersed with data directly transferred (from a builtin to a buffer).
|
|
|
|
|
|
|
|
const int fd = readfd.fd();
|
|
|
|
|
|
|
|
// 100 msec poll rate. Note that in most cases, the write end of the pipe will be closed so
|
|
|
|
// select() will return; the polling is important only for weird cases like a background process
|
|
|
|
// launched in a command substitution.
|
|
|
|
const long poll_timeout_usec = 100000;
|
|
|
|
struct timeval tv = {};
|
|
|
|
tv.tv_usec = poll_timeout_usec;
|
|
|
|
|
|
|
|
bool shutdown = false;
|
|
|
|
while (!shutdown) {
|
|
|
|
bool readable = false;
|
|
|
|
|
|
|
|
// Poll if our fd is readable.
|
|
|
|
// Do this even if the shutdown flag is set. It's important we wait for the fd at least
|
|
|
|
// once. For short-lived processes, it's possible for the process to execute, produce output
|
|
|
|
// (fits in the pipe buffer) and be reaped before we are even scheduled. So always wait at
|
|
|
|
// least once on the fd. Note that doesn't mean we will wait for the full poll duration;
|
|
|
|
// typically what will happen is our pipe will be widowed and so this will return quickly.
|
|
|
|
// It's only for weird cases (e.g. a background process launched inside a command
|
|
|
|
// substitution) that we'll wait out the entire poll time.
|
|
|
|
fd_set fds;
|
|
|
|
FD_ZERO(&fds);
|
|
|
|
FD_SET(fd, &fds);
|
|
|
|
int ret = select(fd + 1, &fds, NULL, NULL, &tv);
|
|
|
|
readable = ret > 0;
|
|
|
|
if (ret < 0 && errno != EINTR) {
|
|
|
|
// Surprising error.
|
|
|
|
wperror(L"select");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Only check the shutdown flag if we timed out.
|
|
|
|
// It's important that if select() indicated we were readable, that we call select() again
|
|
|
|
// allowing it to time out. Note the typical case is that the fd will be closed, in which
|
|
|
|
// case select will return immediately.
|
|
|
|
if (! readable) {
|
|
|
|
shutdown = this->shutdown_fillthread_.load(std::memory_order_relaxed);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (readable || shutdown) {
|
|
|
|
// Now either our fd is readable, or we have set the shutdown flag.
|
|
|
|
// Either way acquire the lock and read until we reach EOF, or EAGAIN / EINTR.
|
|
|
|
scoped_lock locker(append_lock_);
|
|
|
|
ssize_t ret;
|
|
|
|
do {
|
2019-02-13 19:15:09 +00:00
|
|
|
errno = 0;
|
2019-02-12 23:50:43 +00:00
|
|
|
char buff[4096];
|
2019-02-01 09:58:06 +00:00
|
|
|
ret = read(fd, buff, sizeof buff);
|
|
|
|
if (ret > 0) {
|
|
|
|
buffer_.append(&buff[0], &buff[ret]);
|
|
|
|
} else if (ret == 0) {
|
|
|
|
shutdown = true;
|
2019-02-13 19:15:09 +00:00
|
|
|
} else if (ret == -1 && errno == 0) {
|
|
|
|
// No specific error. We assume we just return,
|
|
|
|
// since that's what we do in read_blocked.
|
|
|
|
return;
|
2019-02-01 09:58:06 +00:00
|
|
|
} else if (errno != EINTR && errno != EAGAIN) {
|
|
|
|
wperror(L"read");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
} while (ret > 0);
|
|
|
|
}
|
2012-11-18 10:23:22 +00:00
|
|
|
}
|
2019-02-01 09:58:06 +00:00
|
|
|
assert(shutdown && "Should only exit loop if shutdown flag is set");
|
2005-10-08 11:20:51 +00:00
|
|
|
}
|
|
|
|
|
2019-02-01 09:58:06 +00:00
|
|
|
void io_buffer_t::begin_background_fillthread(autoclose_fd_t fd) {
|
|
|
|
ASSERT_IS_MAIN_THREAD();
|
|
|
|
assert(!fillthread_ && "Already have a fillthread");
|
|
|
|
|
|
|
|
// We want our background thread to own the fd but it's not easy to move into a std::function.
|
|
|
|
// Use a shared_ptr.
|
2019-02-13 23:17:07 +00:00
|
|
|
auto fdref = move_to_sharedptr(std::move(fd));
|
2019-02-01 09:58:06 +00:00
|
|
|
|
|
|
|
// Our function to read until the receiver is closed.
|
|
|
|
// It's OK to capture 'this' by value because 'this' owns the background thread and joins it
|
|
|
|
// before dtor.
|
|
|
|
std::function<void(void)> func = [this, fdref]() {
|
|
|
|
this->run_background_fillthread(std::move(*fdref));
|
|
|
|
};
|
|
|
|
|
|
|
|
pthread_t fillthread{};
|
|
|
|
if (!make_pthread(&fillthread, std::move(func))) {
|
|
|
|
wperror(L"make_pthread");
|
2015-01-08 02:07:06 +00:00
|
|
|
}
|
2019-02-01 09:58:06 +00:00
|
|
|
fillthread_ = fillthread;
|
2015-01-08 02:07:06 +00:00
|
|
|
}
|
2005-10-08 11:20:51 +00:00
|
|
|
|
2019-02-01 09:58:06 +00:00
|
|
|
void io_buffer_t::complete_background_fillthread() {
|
|
|
|
ASSERT_IS_MAIN_THREAD();
|
|
|
|
assert(fillthread_ && "Should have a fillthread");
|
|
|
|
shutdown_fillthread_.store(true, std::memory_order_relaxed);
|
|
|
|
void *ignored = nullptr;
|
|
|
|
int err = pthread_join(*fillthread_, &ignored);
|
|
|
|
DIE_ON_FAILURE(err);
|
|
|
|
fillthread_.reset();
|
2019-02-01 00:05:42 +00:00
|
|
|
}
|
2012-11-19 00:30:30 +00:00
|
|
|
|
2019-02-01 00:05:42 +00:00
|
|
|
shared_ptr<io_bufferfill_t> io_bufferfill_t::create(const io_chain_t &conflicts,
|
|
|
|
size_t buffer_limit) {
|
|
|
|
// Construct our pipes.
|
|
|
|
auto pipes = make_autoclose_pipes(conflicts);
|
|
|
|
if (!pipes) {
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
// Our buffer will read from the read end of the pipe. This end must be non-blocking. This is
|
2019-02-01 09:58:06 +00:00
|
|
|
// because our fillthread needs to poll to decide if it should shut down, and also accept input
|
|
|
|
// from direct buffer transfers.
|
2019-02-01 00:05:42 +00:00
|
|
|
if (make_fd_nonblocking(pipes->read.fd())) {
|
2012-11-19 00:30:30 +00:00
|
|
|
debug(1, PIPE_ERROR);
|
|
|
|
wperror(L"fcntl");
|
2019-02-01 00:05:42 +00:00
|
|
|
return nullptr;
|
2012-11-19 00:30:30 +00:00
|
|
|
}
|
2019-02-01 09:58:06 +00:00
|
|
|
// Our fillthread gets the read end of the pipe; out_pipe gets the write end.
|
|
|
|
auto buffer = std::make_shared<io_buffer_t>(buffer_limit);
|
|
|
|
buffer->begin_background_fillthread(std::move(pipes->read));
|
2019-02-01 00:05:42 +00:00
|
|
|
return std::make_shared<io_bufferfill_t>(std::move(pipes->write), buffer);
|
2019-02-01 09:58:06 +00:00
|
|
|
}
|
|
|
|
|
2019-02-01 09:58:06 +00:00
|
|
|
std::shared_ptr<io_buffer_t> io_bufferfill_t::finish(std::shared_ptr<io_bufferfill_t> &&filler) {
|
|
|
|
// The io filler is passed in. This typically holds the only instance of the write side of the
|
|
|
|
// pipe used by the buffer's fillthread (except for that side held by other processes). Get the
|
|
|
|
// buffer out of the bufferfill and clear the shared_ptr; this will typically widow the pipe.
|
|
|
|
// Then allow the buffer to finish.
|
|
|
|
assert(filler && "Null pointer in finish");
|
|
|
|
auto buffer = filler->buffer();
|
|
|
|
filler.reset();
|
|
|
|
buffer->complete_background_fillthread();
|
|
|
|
return buffer;
|
|
|
|
}
|
|
|
|
|
2019-02-01 00:05:42 +00:00
|
|
|
io_pipe_t::~io_pipe_t() = default;
|
|
|
|
|
|
|
|
io_bufferfill_t::~io_bufferfill_t() = default;
|
|
|
|
|
2019-02-01 09:58:06 +00:00
|
|
|
io_buffer_t::~io_buffer_t() {
|
|
|
|
assert(! fillthread_ && "io_buffer_t destroyed with outstanding fillthread");
|
|
|
|
}
|
2005-10-08 11:20:51 +00:00
|
|
|
|
2016-05-02 03:32:40 +00:00
|
|
|
void io_chain_t::remove(const shared_ptr<const io_data_t> &element) {
|
|
|
|
// See if you can guess why std::find doesn't work here.
|
|
|
|
for (io_chain_t::iterator iter = this->begin(); iter != this->end(); ++iter) {
|
|
|
|
if (*iter == element) {
|
2012-08-15 07:57:56 +00:00
|
|
|
this->erase(iter);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2005-10-08 11:20:51 +00:00
|
|
|
|
2019-02-01 02:49:52 +00:00
|
|
|
void io_chain_t::push_back(shared_ptr<io_data_t> element) {
|
2016-05-02 03:32:40 +00:00
|
|
|
// Ensure we never push back NULL.
|
2019-02-01 02:49:52 +00:00
|
|
|
assert(element.get() != nullptr);
|
|
|
|
std::vector<shared_ptr<io_data_t> >::push_back(std::move(element));
|
2013-01-30 10:22:38 +00:00
|
|
|
}
|
|
|
|
|
2019-02-01 02:49:52 +00:00
|
|
|
void io_chain_t::push_front(shared_ptr<io_data_t> element) {
|
|
|
|
assert(element.get() != nullptr);
|
|
|
|
this->insert(this->begin(), std::move(element));
|
2013-08-18 23:55:01 +00:00
|
|
|
}
|
|
|
|
|
2016-05-02 03:32:40 +00:00
|
|
|
void io_chain_t::append(const io_chain_t &chain) {
|
Big fat refactoring of how redirections work. In fish 1.x and 2.0.0, the redirections for a process were flattened into a big list associated with the job, so there was no way to tell which redirections applied to each process. Each process therefore got all the redirections associated with the job. See https://github.com/fish-shell/fish-shell/issues/877 for how this could manifest.
With this change, jobs only track their block-level redirections. Process level redirections are correctly associated with the process, and at exec time we stitch them together (block, pipe, and process redirects).
This fixes the weird issues where redirects bleed across pipelines (like #877), and also allows us to play with the order in which redirections are applied, since the final list is constructed right before it's needed. This lets us put pipes after block level redirections but before process level redirections, so that a 2>&1-type redirection gets picked up after the pipe, i.e. it should fix https://github.com/fish-shell/fish-shell/issues/110
This is a significant change. The tests all pass. Cross your fingers.
2013-08-19 23:16:41 +00:00
|
|
|
this->insert(this->end(), chain.begin(), chain.end());
|
|
|
|
}
|
|
|
|
|
2016-04-04 02:02:46 +00:00
|
|
|
#if 0
|
|
|
|
// This isn't used so the lint tools were complaining about its presence. I'm keeping it in the
|
|
|
|
// source because it could be useful for debugging.
|
2012-08-15 07:57:56 +00:00
|
|
|
void io_print(const io_chain_t &chain)
|
2005-10-08 11:20:51 +00:00
|
|
|
{
|
2012-08-15 07:57:56 +00:00
|
|
|
if (chain.empty())
|
|
|
|
{
|
2019-03-12 21:06:01 +00:00
|
|
|
std::fwprintf(stderr, L"Empty chain %p\n", &chain);
|
2012-08-15 07:57:56 +00:00
|
|
|
return;
|
|
|
|
}
|
2012-11-18 10:23:22 +00:00
|
|
|
|
2019-03-12 21:06:01 +00:00
|
|
|
std::fwprintf(stderr, L"Chain %p (%ld items):\n", &chain, (long)chain.size());
|
2012-11-19 00:30:30 +00:00
|
|
|
for (size_t i=0; i < chain.size(); i++)
|
|
|
|
{
|
2016-02-02 23:39:35 +00:00
|
|
|
const shared_ptr<io_data_t> &io = chain.at(i);
|
2013-02-22 21:20:27 +00:00
|
|
|
if (io.get() == NULL)
|
|
|
|
{
|
2019-03-12 21:06:01 +00:00
|
|
|
std::fwprintf(stderr, L"\t(null)\n");
|
2013-02-22 21:20:27 +00:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2019-03-12 21:06:01 +00:00
|
|
|
std::fwprintf(stderr, L"\t%lu: fd:%d, ", (unsigned long)i, io->fd);
|
2013-02-22 21:20:27 +00:00
|
|
|
io->print();
|
|
|
|
}
|
2012-08-15 07:57:56 +00:00
|
|
|
}
|
|
|
|
}
|
2016-04-04 02:02:46 +00:00
|
|
|
#endif
|
2005-10-08 11:20:51 +00:00
|
|
|
|
2019-01-28 21:26:22 +00:00
|
|
|
|
|
|
|
int move_fd_to_unused(int fd, const io_chain_t &io_chain, bool cloexec) {
|
2016-10-31 04:18:59 +00:00
|
|
|
if (fd < 0 || io_chain.get_io_for_fd(fd).get() == NULL) {
|
|
|
|
return fd;
|
|
|
|
}
|
2016-05-02 03:32:40 +00:00
|
|
|
|
2016-10-31 04:18:59 +00:00
|
|
|
// We have fd >= 0, and it's a conflict. dup it and recurse. Note that we recurse before
|
|
|
|
// anything is closed; this forces the kernel to give us a new one (or report fd exhaustion).
|
|
|
|
int new_fd = fd;
|
|
|
|
int tmp_fd;
|
|
|
|
do {
|
|
|
|
tmp_fd = dup(fd);
|
|
|
|
} while (tmp_fd < 0 && errno == EINTR);
|
|
|
|
|
|
|
|
assert(tmp_fd != fd);
|
|
|
|
if (tmp_fd < 0) {
|
|
|
|
// Likely fd exhaustion.
|
|
|
|
new_fd = -1;
|
|
|
|
} else {
|
|
|
|
// Ok, we have a new candidate fd. Recurse. If we get a valid fd, either it's the same as
|
|
|
|
// what we gave it, or it's a new fd and what we gave it has been closed. If we get a
|
|
|
|
// negative value, the fd also has been closed.
|
2019-01-28 21:26:22 +00:00
|
|
|
if (cloexec) set_cloexec(tmp_fd);
|
2016-10-31 04:18:59 +00:00
|
|
|
new_fd = move_fd_to_unused(tmp_fd, io_chain);
|
2015-01-08 02:07:06 +00:00
|
|
|
}
|
2016-10-31 04:18:59 +00:00
|
|
|
|
|
|
|
// We're either returning a new fd or an error. In both cases, we promise to close the old one.
|
|
|
|
assert(new_fd != fd);
|
|
|
|
int saved_errno = errno;
|
|
|
|
exec_close(fd);
|
|
|
|
errno = saved_errno;
|
2015-01-08 02:07:06 +00:00
|
|
|
return new_fd;
|
|
|
|
}
|
|
|
|
|
2019-02-01 00:05:42 +00:00
|
|
|
static bool pipe_avoid_conflicts_with_io_chain(int fds[2], const io_chain_t &ios) {
|
2015-01-08 02:07:06 +00:00
|
|
|
bool success = true;
|
2016-05-02 03:32:40 +00:00
|
|
|
for (int i = 0; i < 2; i++) {
|
2015-01-08 02:07:06 +00:00
|
|
|
fds[i] = move_fd_to_unused(fds[i], ios);
|
2016-05-02 03:32:40 +00:00
|
|
|
if (fds[i] < 0) {
|
2015-01-08 02:07:06 +00:00
|
|
|
success = false;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2016-05-02 03:32:40 +00:00
|
|
|
|
|
|
|
// If any fd failed, close all valid fds.
|
|
|
|
if (!success) {
|
2015-01-08 02:07:06 +00:00
|
|
|
int saved_errno = errno;
|
2016-05-02 03:32:40 +00:00
|
|
|
for (int i = 0; i < 2; i++) {
|
|
|
|
if (fds[i] >= 0) {
|
2015-01-08 02:07:06 +00:00
|
|
|
exec_close(fds[i]);
|
|
|
|
fds[i] = -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
errno = saved_errno;
|
|
|
|
}
|
|
|
|
return success;
|
|
|
|
}
|
|
|
|
|
2019-02-01 00:05:42 +00:00
|
|
|
maybe_t<autoclose_pipes_t> make_autoclose_pipes(const io_chain_t &ios) {
|
|
|
|
int pipes[2] = {-1, -1};
|
|
|
|
|
|
|
|
if (pipe(pipes) < 0) {
|
|
|
|
debug(1, PIPE_ERROR);
|
|
|
|
wperror(L"pipe");
|
|
|
|
return none();
|
|
|
|
}
|
|
|
|
set_cloexec(pipes[0]);
|
|
|
|
set_cloexec(pipes[1]);
|
|
|
|
|
|
|
|
if (!pipe_avoid_conflicts_with_io_chain(pipes, ios)) {
|
|
|
|
// The pipes are closed on failure here.
|
|
|
|
return none();
|
|
|
|
}
|
|
|
|
autoclose_pipes_t result;
|
|
|
|
result.read = autoclose_fd_t(pipes[0]);
|
|
|
|
result.write = autoclose_fd_t(pipes[1]);
|
|
|
|
return {std::move(result)};
|
|
|
|
}
|
|
|
|
|
2016-05-02 03:32:40 +00:00
|
|
|
/// Return the last IO for the given fd.
|
|
|
|
shared_ptr<const io_data_t> io_chain_t::get_io_for_fd(int fd) const {
|
2012-08-15 07:57:56 +00:00
|
|
|
size_t idx = this->size();
|
2016-05-02 03:32:40 +00:00
|
|
|
while (idx--) {
|
2016-02-02 23:39:35 +00:00
|
|
|
const shared_ptr<io_data_t> &data = this->at(idx);
|
2016-05-02 03:32:40 +00:00
|
|
|
if (data->fd == fd) {
|
2012-08-15 07:57:56 +00:00
|
|
|
return data;
|
|
|
|
}
|
|
|
|
}
|
2013-01-07 15:04:55 +00:00
|
|
|
return shared_ptr<const io_data_t>();
|
2012-08-15 07:57:56 +00:00
|
|
|
}
|
2005-10-09 11:48:16 +00:00
|
|
|
|
2016-05-02 03:32:40 +00:00
|
|
|
shared_ptr<io_data_t> io_chain_t::get_io_for_fd(int fd) {
|
2012-08-15 07:57:56 +00:00
|
|
|
size_t idx = this->size();
|
2016-05-02 03:32:40 +00:00
|
|
|
while (idx--) {
|
2013-01-07 15:04:55 +00:00
|
|
|
const shared_ptr<io_data_t> &data = this->at(idx);
|
2016-05-02 03:32:40 +00:00
|
|
|
if (data->fd == fd) {
|
2012-08-15 07:57:56 +00:00
|
|
|
return data;
|
|
|
|
}
|
|
|
|
}
|
2013-01-07 15:04:55 +00:00
|
|
|
return shared_ptr<io_data_t>();
|
2012-08-15 07:57:56 +00:00
|
|
|
}
|
|
|
|
|
2016-05-02 03:32:40 +00:00
|
|
|
/// The old function returned the last match, so we mimic that.
|
|
|
|
shared_ptr<const io_data_t> io_chain_get(const io_chain_t &src, int fd) {
|
2012-08-15 07:57:56 +00:00
|
|
|
return src.get_io_for_fd(fd);
|
|
|
|
}
|
|
|
|
|
2016-05-02 03:32:40 +00:00
|
|
|
shared_ptr<io_data_t> io_chain_get(io_chain_t &src, int fd) { return src.get_io_for_fd(fd); }
|