mirror of
https://github.com/fish-shell/fish-shell
synced 2024-12-27 05:13:10 +00:00
Port make_autoclose_pipes, fd_event_signaller_t
This allows to get rid of the C++ autoclose_fd_t.
This commit is contained in:
parent
f2cd916f65
commit
f3dd8d306f
19 changed files with 278 additions and 496 deletions
|
@ -112,7 +112,6 @@ set(FISH_SRCS
|
|||
src/event.cpp
|
||||
src/expand.cpp
|
||||
src/fallback.cpp
|
||||
src/fds.cpp
|
||||
src/fish_version.cpp
|
||||
src/flog.cpp
|
||||
src/highlight.cpp
|
||||
|
|
|
@ -152,8 +152,6 @@ SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror")
|
|||
check_include_files("sys/types.h;sys/sysctl.h" HAVE_SYS_SYSCTL_H)
|
||||
SET(CMAKE_C_FLAGS "${OLD_CMAKE_C_FLAGS}")
|
||||
|
||||
check_cxx_symbol_exists(eventfd sys/eventfd.h HAVE_EVENTFD)
|
||||
check_cxx_symbol_exists(pipe2 unistd.h HAVE_PIPE2)
|
||||
check_cxx_symbol_exists(wcscasecmp wchar.h HAVE_WCSCASECMP)
|
||||
check_cxx_symbol_exists(wcsncasecmp wchar.h HAVE_WCSNCASECMP)
|
||||
|
||||
|
@ -293,4 +291,3 @@ IF (APPLE)
|
|||
SET(HAVE_BROKEN_MBRTOWC_UTF8 1)
|
||||
ENDIF()
|
||||
ENDIF()
|
||||
|
||||
|
|
|
@ -43,12 +43,6 @@
|
|||
/* Define to 1 if you have the <ncurses/term.h> header file. */
|
||||
#cmakedefine HAVE_NCURSES_TERM_H 1
|
||||
|
||||
/* Define to 1 if you have the 'eventfd' function. */
|
||||
#cmakedefine HAVE_EVENTFD 1
|
||||
|
||||
/* Define to 1 if you have the 'pipe2' function. */
|
||||
#cmakedefine HAVE_PIPE2 1
|
||||
|
||||
/* Define to 1 if you have the <siginfo.h> header file. */
|
||||
#cmakedefine HAVE_SIGINFO_H 1
|
||||
|
||||
|
|
|
@ -23,7 +23,12 @@ fn main() {
|
|||
if compiles("fish-rust/src/cfg/w_exitcode.cpp") {
|
||||
println!("cargo:rustc-cfg=HAVE_WAITSTATUS_SIGNAL_RET");
|
||||
}
|
||||
|
||||
if compiles("fish-rust/src/cfg/eventfd.c") {
|
||||
println!("cargo:rustc-cfg=HAVE_EVENTFD");
|
||||
}
|
||||
if compiles("fish-rust/src/cfg/pipe2.c") {
|
||||
println!("cargo:rustc-cfg=HAVE_PIPE2");
|
||||
}
|
||||
if compiles("fish-rust/src/cfg/spawn.c") {
|
||||
println!("cargo:rustc-cfg=FISH_USE_POSIX_SPAWN");
|
||||
}
|
||||
|
|
1
fish-rust/src/cfg/eventfd.c
Normal file
1
fish-rust/src/cfg/eventfd.c
Normal file
|
@ -0,0 +1 @@
|
|||
#include <sys/eventfd.h>
|
2
fish-rust/src/cfg/pipe2.c
Normal file
2
fish-rust/src/cfg/pipe2.c
Normal file
|
@ -0,0 +1,2 @@
|
|||
#include <unistd.h>
|
||||
int ok = pipe2(0, 0);
|
|
@ -1,17 +1,23 @@
|
|||
use std::os::unix::prelude::*;
|
||||
use std::sync::atomic::{AtomicU64, Ordering};
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::sync::{Arc, Mutex, Weak};
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
pub use self::fd_monitor_ffi::ItemWakeReason;
|
||||
pub use self::fd_monitor_ffi::{new_fd_event_signaller, FdEventSignaller};
|
||||
use crate::common::exit_without_destructors;
|
||||
use crate::fd_readable_set::FdReadableSet;
|
||||
use crate::fds::AutoCloseFd;
|
||||
use crate::ffi::void_ptr;
|
||||
use crate::flog::FLOG;
|
||||
use crate::threads::assert_is_background_thread;
|
||||
use crate::wutil::perror;
|
||||
use cxx::SharedPtr;
|
||||
use errno::errno;
|
||||
use libc::{self, c_void, EAGAIN, EINTR, EWOULDBLOCK};
|
||||
|
||||
#[cfg(not(HAVE_EVENTFD))]
|
||||
use crate::fds::{make_autoclose_pipes, make_fd_nonblocking};
|
||||
#[cfg(HAVE_EVENTFD)]
|
||||
use libc::{EFD_CLOEXEC, EFD_NONBLOCK};
|
||||
|
||||
#[cxx::bridge]
|
||||
mod fd_monitor_ffi {
|
||||
|
@ -28,22 +34,6 @@ mod fd_monitor_ffi {
|
|||
Poke,
|
||||
}
|
||||
|
||||
unsafe extern "C++" {
|
||||
include!("fds.h");
|
||||
|
||||
/// An event signaller implemented using a file descriptor, so it can plug into
|
||||
/// [`select()`](libc::select).
|
||||
///
|
||||
/// This is like a binary semaphore. A call to [`post()`](FdEventSignaller::post) will
|
||||
/// signal an event, making the fd readable. Multiple calls to `post()` may be coalesced.
|
||||
/// On Linux this uses [`eventfd()`](libc::eventfd), on other systems this uses a pipe.
|
||||
/// [`try_consume()`](FdEventSignaller::try_consume) may be used to consume the event.
|
||||
/// Importantly this is async signal safe. Of course it is `CLO_EXEC` as well.
|
||||
#[rust_name = "FdEventSignaller"]
|
||||
type fd_event_signaller_t = crate::ffi::fd_event_signaller_t;
|
||||
#[rust_name = "new_fd_event_signaller"]
|
||||
fn ffi_new_fd_event_signaller_t() -> SharedPtr<FdEventSignaller>;
|
||||
}
|
||||
extern "Rust" {
|
||||
#[cxx_name = "fd_monitor_item_id_t"]
|
||||
type FdMonitorItemId;
|
||||
|
@ -86,9 +76,151 @@ mod fd_monitor_ffi {
|
|||
}
|
||||
}
|
||||
|
||||
// TODO: Remove once we're no longer using the FFI variant of FdEventSignaller
|
||||
unsafe impl Sync for FdEventSignaller {}
|
||||
unsafe impl Send for FdEventSignaller {}
|
||||
/// An event signaller implemented using a file descriptor, so it can plug into
|
||||
/// [`select()`](libc::select).
|
||||
///
|
||||
/// This is like a binary semaphore. A call to [`post()`](FdEventSignaller::post) will
|
||||
/// signal an event, making the fd readable. Multiple calls to `post()` may be coalesced.
|
||||
/// On Linux this uses [`eventfd()`](libc::eventfd), on other systems this uses a pipe.
|
||||
/// [`try_consume()`](FdEventSignaller::try_consume) may be used to consume the event.
|
||||
/// Importantly this is async signal safe. Of course it is `CLO_EXEC` as well.
|
||||
pub struct FdEventSignaller {
|
||||
// Always the read end of the fd; maybe the write end as well.
|
||||
fd: AutoCloseFd,
|
||||
#[cfg(not(HAVE_EVENTFD))]
|
||||
write: AutoCloseFd,
|
||||
}
|
||||
|
||||
impl FdEventSignaller {
|
||||
/// The default constructor will abort on failure (fd exhaustion).
|
||||
/// This should only be used during startup.
|
||||
pub fn new() -> Self {
|
||||
#[cfg(HAVE_EVENTFD)]
|
||||
{
|
||||
// Note we do not want to use EFD_SEMAPHORE because we are binary (not counting) semaphore.
|
||||
let fd = unsafe { libc::eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK) };
|
||||
if fd < 0 {
|
||||
perror("eventfd");
|
||||
exit_without_destructors(1);
|
||||
}
|
||||
Self {
|
||||
fd: AutoCloseFd::new(fd),
|
||||
}
|
||||
}
|
||||
#[cfg(not(HAVE_EVENTFD))]
|
||||
{
|
||||
// Implementation using pipes.
|
||||
let Some(pipes) = make_autoclose_pipes() else {
|
||||
perror("pipe");
|
||||
exit_without_destructors(1);
|
||||
};
|
||||
make_fd_nonblocking(pipes.read.fd()).unwrap();
|
||||
make_fd_nonblocking(pipes.write.fd()).unwrap();
|
||||
Self {
|
||||
fd: pipes.read,
|
||||
write: pipes.write,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// \return the fd to read from, for notification.
|
||||
pub fn read_fd(&self) -> RawFd {
|
||||
self.fd.fd()
|
||||
}
|
||||
|
||||
/// If an event is signalled, consume it; otherwise return.
|
||||
/// This does not block.
|
||||
/// This retries on EINTR.
|
||||
pub fn try_consume(&self) -> bool {
|
||||
// If we are using eventfd, we want to read a single uint64.
|
||||
// If we are using pipes, read a lot; note this may leave data on the pipe if post has been
|
||||
// called many more times. In no case do we care about the data which is read.
|
||||
#[cfg(HAVE_EVENTFD)]
|
||||
let mut buff = [0_u64; 1];
|
||||
#[cfg(not(HAVE_EVENTFD))]
|
||||
let mut buff = [0_u8; 1024];
|
||||
let mut ret;
|
||||
loop {
|
||||
ret = unsafe {
|
||||
libc::read(
|
||||
self.read_fd(),
|
||||
&mut buff as *mut _ as *mut c_void,
|
||||
std::mem::size_of_val(&buff),
|
||||
)
|
||||
};
|
||||
if ret >= 0 || errno().0 != EINTR {
|
||||
break;
|
||||
}
|
||||
}
|
||||
if ret < 0 && ![EAGAIN, EWOULDBLOCK].contains(&errno().0) {
|
||||
perror("read");
|
||||
}
|
||||
ret > 0
|
||||
}
|
||||
|
||||
/// Mark that an event has been received. This may be coalesced.
|
||||
/// This retries on EINTR.
|
||||
pub fn post(&self) {
|
||||
// eventfd writes uint64; pipes write 1 byte.
|
||||
#[cfg(HAVE_EVENTFD)]
|
||||
let c = 1_u64;
|
||||
#[cfg(not(HAVE_EVENTFD))]
|
||||
let c = 1_u8;
|
||||
let mut ret;
|
||||
loop {
|
||||
ret = unsafe {
|
||||
libc::write(
|
||||
self.write_fd(),
|
||||
&c as *const _ as *const c_void,
|
||||
std::mem::size_of_val(&c),
|
||||
)
|
||||
};
|
||||
if ret >= 0 || errno().0 != EINTR {
|
||||
break;
|
||||
}
|
||||
}
|
||||
// EAGAIN occurs if either the pipe buffer is full or the eventfd overflows (very unlikely).
|
||||
if ret < 0 && ![EAGAIN, EWOULDBLOCK].contains(&errno().0) {
|
||||
perror("write");
|
||||
}
|
||||
}
|
||||
|
||||
/// Perform a poll to see if an event is received.
|
||||
/// If \p wait is set, wait until it is readable; this does not consume the event
|
||||
/// but guarantees that the next call to wait() will not block.
|
||||
/// \return true if readable, false if not readable, or not interrupted by a signal.
|
||||
pub fn poll(&self, wait: bool /* = false */) -> bool {
|
||||
let mut timeout = libc::timeval {
|
||||
tv_sec: 0,
|
||||
tv_usec: 0,
|
||||
};
|
||||
let mut fds: libc::fd_set = unsafe { std::mem::zeroed() };
|
||||
unsafe { libc::FD_ZERO(&mut fds) };
|
||||
unsafe { libc::FD_SET(self.read_fd(), &mut fds) };
|
||||
let res = unsafe {
|
||||
libc::select(
|
||||
self.read_fd() + 1,
|
||||
&mut fds,
|
||||
std::ptr::null_mut(),
|
||||
std::ptr::null_mut(),
|
||||
if wait {
|
||||
std::ptr::null_mut()
|
||||
} else {
|
||||
&mut timeout
|
||||
},
|
||||
)
|
||||
};
|
||||
res > 0
|
||||
}
|
||||
|
||||
/// \return the fd to write to.
|
||||
fn write_fd(&self) -> RawFd {
|
||||
#[cfg(HAVE_EVENTFD)]
|
||||
return self.fd.fd();
|
||||
#[cfg(not(HAVE_EVENTFD))]
|
||||
return self.write.fd();
|
||||
}
|
||||
}
|
||||
|
||||
/// Each item added to fd_monitor_t is assigned a unique ID, which is not recycled. Items may have
|
||||
/// their callback triggered immediately by passing the ID. Zero is a sentinel.
|
||||
|
@ -301,7 +433,7 @@ fn new_fd_monitor_item_ffi(
|
|||
pub struct FdMonitor {
|
||||
/// Our self-signaller. When this is written to, it means there are new items pending, new items
|
||||
/// in the poke list, or terminate has been set.
|
||||
change_signaller: SharedPtr<FdEventSignaller>,
|
||||
change_signaller: Arc<FdEventSignaller>,
|
||||
/// The data shared between the background thread and the `FdMonitor` instance.
|
||||
data: Arc<Mutex<SharedData>>,
|
||||
/// The last ID assigned or `0` if none.
|
||||
|
@ -337,7 +469,7 @@ struct BackgroundFdMonitor {
|
|||
items: Vec<FdMonitorItem>,
|
||||
/// Our self-signaller. When this is written to, it means there are new items pending, new items
|
||||
/// in the poke list, or terminate has been set.
|
||||
change_signaller: SharedPtr<FdEventSignaller>,
|
||||
change_signaller: Weak<FdEventSignaller>,
|
||||
/// The data shared between the background thread and the `FdMonitor` instance.
|
||||
data: Arc<Mutex<SharedData>>,
|
||||
}
|
||||
|
@ -377,7 +509,7 @@ impl FdMonitor {
|
|||
FLOG!(fd_monitor, "Thread starting");
|
||||
let background_monitor = BackgroundFdMonitor {
|
||||
data: Arc::clone(&self.data),
|
||||
change_signaller: SharedPtr::clone(&self.change_signaller),
|
||||
change_signaller: Arc::downgrade(&self.change_signaller),
|
||||
items: Vec::new(),
|
||||
};
|
||||
crate::threads::spawn(move || {
|
||||
|
@ -441,7 +573,7 @@ impl FdMonitor {
|
|||
running: false,
|
||||
terminate: false,
|
||||
})),
|
||||
change_signaller: new_fd_event_signaller(),
|
||||
change_signaller: Arc::new(FdEventSignaller::new()),
|
||||
last_id: AtomicU64::new(0),
|
||||
}
|
||||
}
|
||||
|
@ -465,7 +597,7 @@ impl BackgroundFdMonitor {
|
|||
fds.clear();
|
||||
|
||||
// Our change_signaller is special-cased
|
||||
let change_signal_fd = self.change_signaller.read_fd().into();
|
||||
let change_signal_fd = self.change_signaller.upgrade().unwrap().read_fd();
|
||||
fds.add(change_signal_fd);
|
||||
|
||||
let mut now = Instant::now();
|
||||
|
@ -535,7 +667,7 @@ impl BackgroundFdMonitor {
|
|||
let change_signalled = fds.test(change_signal_fd);
|
||||
if change_signalled || is_wait_lap {
|
||||
// Clear the change signaller before processing incoming changes
|
||||
self.change_signaller.try_consume();
|
||||
self.change_signaller.upgrade().unwrap().try_consume();
|
||||
let mut data = self.data.lock().expect("Mutex poisoned!");
|
||||
|
||||
// Move from `pending` to the end of `items`
|
||||
|
|
|
@ -1,8 +1,11 @@
|
|||
use crate::common::wcs2zstring;
|
||||
use crate::ffi;
|
||||
use crate::flog::FLOG;
|
||||
use crate::wchar::prelude::*;
|
||||
use crate::wutil::perror;
|
||||
use libc::{c_int, EINTR, FD_CLOEXEC, F_GETFD, F_GETFL, F_SETFD, F_SETFL, O_CLOEXEC, O_NONBLOCK};
|
||||
use libc::{
|
||||
c_int, EINTR, FD_CLOEXEC, F_DUPFD_CLOEXEC, F_GETFD, F_GETFL, F_SETFD, F_SETFL, O_CLOEXEC,
|
||||
O_NONBLOCK,
|
||||
};
|
||||
use nix::unistd;
|
||||
use std::ffi::CStr;
|
||||
use std::io::{self, Read, Write};
|
||||
|
@ -59,12 +62,16 @@ mod autoclose_fd_t {
|
|||
#[cxx_name = "autoclose_fd_t2"]
|
||||
type AutoCloseFd;
|
||||
|
||||
fn new_autoclose_fd(fd: i32) -> Box<AutoCloseFd>;
|
||||
#[cxx_name = "valid"]
|
||||
fn is_valid(&self) -> bool;
|
||||
fn close(&mut self);
|
||||
fn fd(&self) -> i32;
|
||||
}
|
||||
}
|
||||
fn new_autoclose_fd(fd: i32) -> Box<AutoCloseFd> {
|
||||
Box::new(AutoCloseFd::new(fd))
|
||||
}
|
||||
|
||||
impl AutoCloseFd {
|
||||
// Closes the fd if not already closed.
|
||||
|
@ -149,18 +156,69 @@ pub struct AutoClosePipes {
|
|||
/// Construct a pair of connected pipes, set to close-on-exec.
|
||||
/// \return None on fd exhaustion.
|
||||
pub fn make_autoclose_pipes() -> Option<AutoClosePipes> {
|
||||
let pipes = ffi::make_pipes_ffi();
|
||||
let mut pipes: [c_int; 2] = [-1, -1];
|
||||
|
||||
let already_cloexec = false;
|
||||
#[cfg(HAVE_PIPE2)]
|
||||
{
|
||||
if unsafe { libc::pipe2(&mut pipes[0], O_CLOEXEC) } < 0 {
|
||||
FLOG!(warning, PIPE_ERROR);
|
||||
perror("pipe2");
|
||||
return None;
|
||||
}
|
||||
already_cloexec = true;
|
||||
}
|
||||
#[cfg(not(HAVE_PIPE2))]
|
||||
if unsafe { libc::pipe(&mut pipes[0]) } < 0 {
|
||||
FLOG!(warning, PIPE_ERROR);
|
||||
perror("pipe2");
|
||||
return None;
|
||||
}
|
||||
|
||||
let readp = AutoCloseFd::new(pipes[0]);
|
||||
let writep = AutoCloseFd::new(pipes[1]);
|
||||
|
||||
// Ensure our fds are out of the user range.
|
||||
let readp = heightenize_fd(readp, already_cloexec);
|
||||
if !readp.is_valid() {
|
||||
return None;
|
||||
}
|
||||
|
||||
let writep = heightenize_fd(writep, already_cloexec);
|
||||
if !writep.is_valid() {
|
||||
return None;
|
||||
}
|
||||
|
||||
let readp = AutoCloseFd::new(pipes.read);
|
||||
let writep = AutoCloseFd::new(pipes.write);
|
||||
if !readp.is_valid() || !writep.is_valid() {
|
||||
None
|
||||
} else {
|
||||
Some(AutoClosePipes {
|
||||
read: readp,
|
||||
write: writep,
|
||||
})
|
||||
}
|
||||
|
||||
/// If the given fd is in the "user range", move it to a new fd in the "high range".
|
||||
/// zsh calls this movefd().
|
||||
/// \p input_has_cloexec describes whether the input has CLOEXEC already set, so we can avoid
|
||||
/// setting it again.
|
||||
/// \return the fd, which always has CLOEXEC set; or an invalid fd on failure, in
|
||||
/// which case an error will have been printed, and the input fd closed.
|
||||
fn heightenize_fd(fd: AutoCloseFd, input_has_cloexec: bool) -> AutoCloseFd {
|
||||
// Check if the fd is invalid or already in our high range.
|
||||
if !fd.is_valid() {
|
||||
return fd;
|
||||
}
|
||||
if fd.fd() >= FIRST_HIGH_FD {
|
||||
if !input_has_cloexec {
|
||||
set_cloexec(fd.fd(), true);
|
||||
}
|
||||
return fd;
|
||||
}
|
||||
// Here we are asking the kernel to give us a cloexec fd.
|
||||
let newfd = unsafe { libc::fcntl(fd.fd(), F_DUPFD_CLOEXEC, FIRST_HIGH_FD) };
|
||||
if newfd < 0 {
|
||||
perror("fcntl");
|
||||
return AutoCloseFd::default();
|
||||
}
|
||||
return AutoCloseFd::new(newfd);
|
||||
}
|
||||
|
||||
/// Sets CLO_EXEC on a given fd according to the value of \p should_set.
|
||||
|
|
|
@ -66,24 +66,18 @@ include_cpp! {
|
|||
generate!("activate_flog_categories_by_pattern")
|
||||
generate!("save_term_foreground_process_group")
|
||||
generate!("restore_term_foreground_process_group_for_exit")
|
||||
generate!("set_cloexec")
|
||||
|
||||
generate!("builtin_bind")
|
||||
generate!("builtin_commandline")
|
||||
|
||||
generate!("init_input")
|
||||
|
||||
generate_pod!("pipes_ffi_t")
|
||||
|
||||
generate!("shell_modes_ffi")
|
||||
generate!("make_pipes_ffi")
|
||||
|
||||
generate!("log_extra_to_flog_file")
|
||||
|
||||
generate!("wgettext_ptr")
|
||||
|
||||
generate!("fd_event_signaller_t")
|
||||
|
||||
generate!("highlight_role_t")
|
||||
generate!("highlight_spec_t")
|
||||
|
||||
|
|
|
@ -34,6 +34,7 @@ use crate::{
|
|||
ConfigPaths, EnvMode,
|
||||
},
|
||||
event::{self, Event},
|
||||
fds::set_cloexec,
|
||||
ffi::{self},
|
||||
flog::{self, activate_flog_categories_by_pattern, set_flog_file_fd, FLOG, FLOGF},
|
||||
function, future_feature_flags as features, history,
|
||||
|
@ -550,7 +551,7 @@ fn main() -> i32 {
|
|||
std::process::exit(-1);
|
||||
}
|
||||
|
||||
unsafe { ffi::set_cloexec(c_int(libc::fileno(debug_file)), true) };
|
||||
set_cloexec(unsafe { libc::fileno(debug_file) }, true);
|
||||
ffi::flog_setlinebuf_ffi(debug_file as *mut _);
|
||||
ffi::set_flog_output_file_ffi(debug_file as *mut _);
|
||||
set_flog_file_fd(unsafe { libc::fileno(debug_file) });
|
||||
|
|
|
@ -4,7 +4,9 @@ use std::sync::atomic::{AtomicBool, AtomicU64, AtomicUsize, Ordering};
|
|||
use std::sync::{Arc, Mutex};
|
||||
use std::time::Duration;
|
||||
|
||||
use crate::fd_monitor::{FdMonitor, FdMonitorItem, FdMonitorItemId, ItemWakeReason};
|
||||
use crate::fd_monitor::{
|
||||
FdEventSignaller, FdMonitor, FdMonitorItem, FdMonitorItemId, ItemWakeReason,
|
||||
};
|
||||
use crate::fds::{make_autoclose_pipes, AutoCloseFd};
|
||||
use crate::ffi_tests::add_test;
|
||||
|
||||
|
@ -188,3 +190,28 @@ add_test!("fd_monitor_items", || {
|
|||
assert_eq!(item_pokee.total_calls.load(Ordering::Relaxed), 1);
|
||||
assert_eq!(item_pokee.pokes.load(Ordering::Relaxed), 1);
|
||||
});
|
||||
|
||||
#[test]
|
||||
fn test_fd_event_signaller() {
|
||||
let sema = FdEventSignaller::new();
|
||||
assert!(!sema.try_consume());
|
||||
assert!(!sema.poll(false));
|
||||
|
||||
// Post once.
|
||||
sema.post();
|
||||
assert!(sema.poll(false));
|
||||
assert!(sema.poll(false));
|
||||
assert!(sema.try_consume());
|
||||
assert!(!sema.poll(false));
|
||||
assert!(!sema.try_consume());
|
||||
|
||||
// Posts are coalesced.
|
||||
sema.post();
|
||||
sema.post();
|
||||
sema.post();
|
||||
assert!(sema.poll(false));
|
||||
assert!(sema.poll(false));
|
||||
assert!(sema.try_consume());
|
||||
assert!(!sema.poll(false));
|
||||
assert!(!sema.try_consume());
|
||||
}
|
||||
|
|
|
@ -38,16 +38,8 @@ const IO_WAIT_FOR_WORK_DURATION: Duration = Duration::from_millis(500);
|
|||
static IO_THREAD_POOL: OnceBox<Mutex<ThreadPool>> = OnceBox::new();
|
||||
|
||||
/// The event signaller singleton used for completions and queued main thread requests.
|
||||
static NOTIFY_SIGNALLER: once_cell::sync::Lazy<&'static crate::fd_monitor::FdEventSignaller> =
|
||||
once_cell::sync::Lazy::new(|| unsafe {
|
||||
// This is leaked to avoid C++-side destructors. When ported fully to rust, we won't need to
|
||||
// leak anything.
|
||||
let signaller = crate::fd_monitor::new_fd_event_signaller();
|
||||
let signaller_ref: &crate::fd_monitor::FdEventSignaller = signaller.as_ref().unwrap();
|
||||
let result = std::mem::transmute(signaller_ref);
|
||||
std::mem::forget(signaller);
|
||||
result
|
||||
});
|
||||
static NOTIFY_SIGNALLER: once_cell::sync::Lazy<crate::fd_monitor::FdEventSignaller> =
|
||||
once_cell::sync::Lazy::new(crate::fd_monitor::FdEventSignaller::new);
|
||||
|
||||
#[cxx::bridge]
|
||||
mod ffi {
|
||||
|
@ -557,7 +549,7 @@ pub fn iothread_perform_cant_wait(f: impl FnOnce() + 'static + Send) {
|
|||
}
|
||||
|
||||
pub fn iothread_port() -> i32 {
|
||||
i32::from(NOTIFY_SIGNALLER.read_fd())
|
||||
NOTIFY_SIGNALLER.read_fd()
|
||||
}
|
||||
|
||||
pub fn iothread_service_main_with_timeout(timeout: Duration) {
|
||||
|
|
249
src/fds.cpp
249
src/fds.cpp
|
@ -1,249 +0,0 @@
|
|||
/** Facilities for working with file descriptors. */
|
||||
|
||||
#include "config.h" // IWYU pragma: keep
|
||||
|
||||
#include "fds.h"
|
||||
|
||||
#include <errno.h>
|
||||
#include <fcntl.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#include <algorithm>
|
||||
|
||||
#include "flog.h"
|
||||
#include "wutil.h"
|
||||
|
||||
#ifdef HAVE_EVENTFD
|
||||
#include <sys/eventfd.h>
|
||||
#endif
|
||||
|
||||
// The first fd in the "high range." fds below this are allowed to be used directly by users in
|
||||
// redirections, e.g. >&3
|
||||
const int k_first_high_fd = 10;
|
||||
static constexpr uint64_t kUsecPerMsec = 1000;
|
||||
static constexpr uint64_t kUsecPerSec [[gnu::unused]] = 1000 * kUsecPerMsec;
|
||||
|
||||
void autoclose_fd_t::close() {
|
||||
if (fd_ < 0) return;
|
||||
exec_close(fd_);
|
||||
fd_ = -1;
|
||||
}
|
||||
|
||||
std::shared_ptr<fd_event_signaller_t> ffi_new_fd_event_signaller_t() {
|
||||
return std::make_shared<fd_event_signaller_t>();
|
||||
}
|
||||
|
||||
#ifdef HAVE_EVENTFD
|
||||
// Note we do not want to use EFD_SEMAPHORE because we are binary (not counting) semaphore.
|
||||
fd_event_signaller_t::fd_event_signaller_t() {
|
||||
int fd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
|
||||
if (fd < 0) {
|
||||
wperror(L"eventfd");
|
||||
exit_without_destructors(1);
|
||||
}
|
||||
fd_.reset(fd);
|
||||
};
|
||||
|
||||
int fd_event_signaller_t::write_fd() const { return fd_.fd(); }
|
||||
|
||||
#else
|
||||
// Implementation using pipes.
|
||||
fd_event_signaller_t::fd_event_signaller_t() {
|
||||
auto pipes = make_autoclose_pipes();
|
||||
if (!pipes) {
|
||||
wperror(L"pipe");
|
||||
exit_without_destructors(1);
|
||||
}
|
||||
DIE_ON_FAILURE(make_fd_nonblocking(pipes->read.fd()));
|
||||
DIE_ON_FAILURE(make_fd_nonblocking(pipes->write.fd()));
|
||||
fd_ = std::move(pipes->read);
|
||||
write_ = std::move(pipes->write);
|
||||
}
|
||||
|
||||
int fd_event_signaller_t::write_fd() const { return write_.fd(); }
|
||||
#endif
|
||||
|
||||
bool fd_event_signaller_t::try_consume() const {
|
||||
// If we are using eventfd, we want to read a single uint64.
|
||||
// If we are using pipes, read a lot; note this may leave data on the pipe if post has been
|
||||
// called many more times. In no case do we care about the data which is read.
|
||||
#ifdef HAVE_EVENTFD
|
||||
uint64_t buff[1];
|
||||
#else
|
||||
uint8_t buff[1024];
|
||||
#endif
|
||||
ssize_t ret;
|
||||
do {
|
||||
ret = read(read_fd(), buff, sizeof buff);
|
||||
} while (ret < 0 && errno == EINTR);
|
||||
if (ret < 0 && errno != EAGAIN && errno != EWOULDBLOCK) {
|
||||
wperror(L"read");
|
||||
}
|
||||
return ret > 0;
|
||||
}
|
||||
|
||||
void fd_event_signaller_t::post() const {
|
||||
// eventfd writes uint64; pipes write 1 byte.
|
||||
#ifdef HAVE_EVENTFD
|
||||
const uint64_t c = 1;
|
||||
#else
|
||||
const uint8_t c = 1;
|
||||
#endif
|
||||
ssize_t ret;
|
||||
do {
|
||||
ret = write(write_fd(), &c, sizeof c);
|
||||
} while (ret < 0 && errno == EINTR);
|
||||
// EAGAIN occurs if either the pipe buffer is full or the eventfd overflows (very unlikely).
|
||||
if (ret < 0 && errno != EAGAIN && errno != EWOULDBLOCK) {
|
||||
wperror(L"write");
|
||||
}
|
||||
}
|
||||
|
||||
bool fd_event_signaller_t::poll(bool wait) const {
|
||||
struct timeval timeout = {0, 0};
|
||||
fd_set fds;
|
||||
FD_ZERO(&fds);
|
||||
FD_SET(read_fd(), &fds);
|
||||
int res = select(read_fd() + 1, &fds, nullptr, nullptr, wait ? nullptr : &timeout);
|
||||
return res > 0;
|
||||
}
|
||||
|
||||
fd_event_signaller_t::~fd_event_signaller_t() = default;
|
||||
|
||||
/// If the given fd is in the "user range", move it to a new fd in the "high range".
|
||||
/// zsh calls this movefd().
|
||||
/// \p input_has_cloexec describes whether the input has CLOEXEC already set, so we can avoid
|
||||
/// setting it again.
|
||||
/// \return the fd, which always has CLOEXEC set; or an invalid fd on failure, in
|
||||
/// which case an error will have been printed, and the input fd closed.
|
||||
static autoclose_fd_t heightenize_fd(autoclose_fd_t fd, bool input_has_cloexec) {
|
||||
// Check if the fd is invalid or already in our high range.
|
||||
if (!fd.valid()) {
|
||||
return fd;
|
||||
}
|
||||
if (fd.fd() >= k_first_high_fd) {
|
||||
if (!input_has_cloexec) set_cloexec(fd.fd());
|
||||
return fd;
|
||||
}
|
||||
#if defined(F_DUPFD_CLOEXEC)
|
||||
// Here we are asking the kernel to give us a
|
||||
int newfd = fcntl(fd.fd(), F_DUPFD_CLOEXEC, k_first_high_fd);
|
||||
if (newfd < 0) {
|
||||
wperror(L"fcntl");
|
||||
return autoclose_fd_t{};
|
||||
}
|
||||
return autoclose_fd_t(newfd);
|
||||
#elif defined(F_DUPFD)
|
||||
int newfd = fcntl(fd.fd(), F_DUPFD, k_first_high_fd);
|
||||
if (newfd < 0) {
|
||||
wperror(L"fcntl");
|
||||
return autoclose_fd_t{};
|
||||
}
|
||||
set_cloexec(newfd);
|
||||
return autoclose_fd_t(newfd);
|
||||
#else
|
||||
// We have fd >= 0, and it's in the user range. dup it and recurse. Note that we recurse before
|
||||
// anything is closed; this forces the kernel to give us a new one (or report fd exhaustion).
|
||||
int tmp_fd;
|
||||
do {
|
||||
tmp_fd = dup(fd.fd());
|
||||
} while (tmp_fd < 0 && errno == EINTR);
|
||||
// Ok, we have a new candidate fd. Recurse.
|
||||
return heightenize_fd(autoclose_fd_t{tmp_fd}, false);
|
||||
#endif
|
||||
}
|
||||
|
||||
maybe_t<autoclose_pipes_t> make_autoclose_pipes() {
|
||||
int pipes[2] = {-1, -1};
|
||||
|
||||
bool already_cloexec = false;
|
||||
#ifdef HAVE_PIPE2
|
||||
if (pipe2(pipes, O_CLOEXEC) < 0) {
|
||||
FLOGF(warning, PIPE_ERROR);
|
||||
wperror(L"pipe2");
|
||||
return none();
|
||||
}
|
||||
already_cloexec = true;
|
||||
#else
|
||||
if (pipe(pipes) < 0) {
|
||||
FLOGF(warning, PIPE_ERROR);
|
||||
wperror(L"pipe");
|
||||
return none();
|
||||
}
|
||||
#endif
|
||||
|
||||
autoclose_fd_t read_end{pipes[0]};
|
||||
autoclose_fd_t write_end{pipes[1]};
|
||||
|
||||
// Ensure our fds are out of the user range.
|
||||
read_end = heightenize_fd(std::move(read_end), already_cloexec);
|
||||
if (!read_end.valid()) return none();
|
||||
|
||||
write_end = heightenize_fd(std::move(write_end), already_cloexec);
|
||||
if (!write_end.valid()) return none();
|
||||
|
||||
return autoclose_pipes_t(std::move(read_end), std::move(write_end));
|
||||
}
|
||||
|
||||
pipes_ffi_t make_pipes_ffi() {
|
||||
pipes_ffi_t res = {-1, -1};
|
||||
if (auto pipes = make_autoclose_pipes()) {
|
||||
res.read = pipes->read.acquire();
|
||||
res.write = pipes->write.acquire();
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
int set_cloexec(int fd, bool should_set) {
|
||||
// Note we don't want to overwrite existing flags like O_NONBLOCK which may be set. So fetch the
|
||||
// existing flags and modify them.
|
||||
int flags = fcntl(fd, F_GETFD, 0);
|
||||
if (flags < 0) {
|
||||
return -1;
|
||||
}
|
||||
int new_flags = flags;
|
||||
if (should_set) {
|
||||
new_flags |= FD_CLOEXEC;
|
||||
} else {
|
||||
new_flags &= ~FD_CLOEXEC;
|
||||
}
|
||||
if (flags == new_flags) {
|
||||
return 0;
|
||||
} else {
|
||||
return fcntl(fd, F_SETFD, new_flags);
|
||||
}
|
||||
}
|
||||
|
||||
int open_cloexec(const std::string &path, int flags, mode_t mode) {
|
||||
return open_cloexec(path.c_str(), flags, mode);
|
||||
}
|
||||
|
||||
int open_cloexec(const char *path, int flags, mode_t mode) {
|
||||
int fd;
|
||||
|
||||
// Prefer to use O_CLOEXEC.
|
||||
#ifdef O_CLOEXEC
|
||||
fd = open(path, flags | O_CLOEXEC, mode);
|
||||
#else
|
||||
fd = open(path, flags, mode);
|
||||
if (fd >= 0 && set_cloexec(fd)) {
|
||||
exec_close(fd);
|
||||
fd = -1;
|
||||
}
|
||||
#endif
|
||||
return fd;
|
||||
}
|
||||
|
||||
int wopen_cloexec(const wcstring &pathname, int flags, mode_t mode) {
|
||||
return open_cloexec(wcs2zstring(pathname), flags, mode);
|
||||
}
|
||||
|
||||
void exec_close(int fd) {
|
||||
assert(fd >= 0 && "Invalid fd");
|
||||
while (close(fd) == -1) {
|
||||
if (errno != EINTR) {
|
||||
wperror(L"close");
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
136
src/fds.h
136
src/fds.h
|
@ -17,144 +17,14 @@
|
|||
#include "common.h"
|
||||
#include "maybe.h"
|
||||
|
||||
/// Pipe redirection error message.
|
||||
#define PIPE_ERROR _(L"An error occurred while setting up pipe")
|
||||
|
||||
/// The first "high fd", which is considered outside the range of valid user-specified redirections
|
||||
/// (like >&5).
|
||||
extern const int k_first_high_fd;
|
||||
#if INCLUDE_RUST_HEADERS
|
||||
#include "fds.rs.h"
|
||||
#endif
|
||||
|
||||
/// A sentinel value indicating no timeout.
|
||||
#define kNoTimeout (std::numeric_limits<uint64_t>::max())
|
||||
|
||||
/// A helper class for managing and automatically closing a file descriptor.
|
||||
class autoclose_fd_t : noncopyable_t {
|
||||
int fd_;
|
||||
|
||||
public:
|
||||
// Closes the fd if not already closed.
|
||||
void close();
|
||||
|
||||
// Returns the fd.
|
||||
int fd() const { return fd_; }
|
||||
|
||||
// Returns the fd, transferring ownership to the caller.
|
||||
int acquire() {
|
||||
int temp = fd_;
|
||||
fd_ = -1;
|
||||
return temp;
|
||||
}
|
||||
|
||||
// Resets to a new fd, taking ownership.
|
||||
void reset(int fd) {
|
||||
if (fd == fd_) return;
|
||||
close();
|
||||
fd_ = fd;
|
||||
}
|
||||
|
||||
// \return if this has a valid fd.
|
||||
bool valid() const { return fd_ >= 0; }
|
||||
|
||||
autoclose_fd_t(autoclose_fd_t &&rhs) : fd_(rhs.fd_) { rhs.fd_ = -1; }
|
||||
|
||||
void operator=(autoclose_fd_t &&rhs) {
|
||||
close();
|
||||
std::swap(this->fd_, rhs.fd_);
|
||||
}
|
||||
|
||||
explicit autoclose_fd_t(int fd = -1) : fd_(fd) {}
|
||||
~autoclose_fd_t() { close(); }
|
||||
};
|
||||
|
||||
/// Helper type returned from making autoclose pipes.
|
||||
struct autoclose_pipes_t {
|
||||
/// Read end of the pipe.
|
||||
autoclose_fd_t read;
|
||||
|
||||
/// Write end of the pipe.
|
||||
autoclose_fd_t write;
|
||||
|
||||
autoclose_pipes_t() = default;
|
||||
autoclose_pipes_t(autoclose_fd_t r, autoclose_fd_t w)
|
||||
: read(std::move(r)), write(std::move(w)) {}
|
||||
};
|
||||
|
||||
/// Call pipe(), populating autoclose fds.
|
||||
/// The pipes are marked CLO_EXEC and are placed in the high fd range.
|
||||
/// \return pipes on success, none() on error.
|
||||
maybe_t<autoclose_pipes_t> make_autoclose_pipes();
|
||||
|
||||
/// Create pipes.
|
||||
/// Upon failure both values will be negative.
|
||||
struct pipes_ffi_t {
|
||||
int read;
|
||||
int write;
|
||||
};
|
||||
pipes_ffi_t make_pipes_ffi();
|
||||
|
||||
/// An event signaller implemented using a file descriptor, so it can plug into select().
|
||||
/// This is like a binary semaphore. A call to post() will signal an event, making the fd readable.
|
||||
/// Multiple calls to post() may be coalesced. On Linux this uses eventfd(); on other systems this
|
||||
/// uses a pipe. try_consume() may be used to consume the event. Importantly this is async signal
|
||||
/// safe. Of course it is CLO_EXEC as well.
|
||||
class fd_event_signaller_t {
|
||||
public:
|
||||
/// \return the fd to read from, for notification.
|
||||
int read_fd() const { return fd_.fd(); }
|
||||
|
||||
/// If an event is signalled, consume it; otherwise return.
|
||||
/// This does not block.
|
||||
/// This retries on EINTR.
|
||||
bool try_consume() const;
|
||||
|
||||
/// Mark that an event has been received. This may be coalesced.
|
||||
/// This retries on EINTR.
|
||||
void post() const;
|
||||
|
||||
/// Perform a poll to see if an event is received.
|
||||
/// If \p wait is set, wait until it is readable; this does not consume the event
|
||||
/// but guarantees that the next call to wait() will not block.
|
||||
/// \return true if readable, false if not readable, or not interrupted by a signal.
|
||||
bool poll(bool wait = false) const;
|
||||
|
||||
// The default constructor will abort on failure (fd exhaustion).
|
||||
// This should only be used during startup.
|
||||
fd_event_signaller_t();
|
||||
~fd_event_signaller_t();
|
||||
|
||||
private:
|
||||
// \return the fd to write to.
|
||||
int write_fd() const;
|
||||
|
||||
// Always the read end of the fd; maybe the write end as well.
|
||||
autoclose_fd_t fd_;
|
||||
|
||||
#ifndef HAVE_EVENTFD
|
||||
// If using a pipe, then this is its write end.
|
||||
autoclose_fd_t write_;
|
||||
#endif
|
||||
};
|
||||
|
||||
std::shared_ptr<fd_event_signaller_t> ffi_new_fd_event_signaller_t();
|
||||
|
||||
/// Sets CLO_EXEC on a given fd according to the value of \p should_set.
|
||||
int set_cloexec(int fd, bool should_set = true);
|
||||
|
||||
/// Wide character version of open() that also sets the close-on-exec flag (atomically when
|
||||
/// possible).
|
||||
int wopen_cloexec(const wcstring &pathname, int flags, mode_t mode = 0);
|
||||
|
||||
/// Narrow versions of wopen_cloexec.
|
||||
int open_cloexec(const std::string &path, int flags, mode_t mode = 0);
|
||||
int open_cloexec(const char *path, int flags, mode_t mode = 0);
|
||||
|
||||
/// Mark an fd as nonblocking; returns errno or 0 on success.
|
||||
int make_fd_nonblocking(int fd);
|
||||
|
||||
/// Mark an fd as blocking; returns errno or 0 on success.
|
||||
int make_fd_blocking(int fd);
|
||||
|
||||
/// Close a file descriptor \p fd, retrying on EINTR.
|
||||
void exec_close(int fd);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -21,7 +21,6 @@ void mark_as_used(const parser_t& parser, env_stack_t& env_stack) {
|
|||
get_history_variable_text_ffi({});
|
||||
highlight_spec_t{};
|
||||
init_input();
|
||||
make_pipes_ffi();
|
||||
reader_change_cursor_selection_mode(cursor_selection_mode_t::exclusive);
|
||||
reader_change_history({});
|
||||
reader_read_ffi({}, {}, {});
|
||||
|
|
|
@ -1167,32 +1167,6 @@ void test_dirname_basename() {
|
|||
do_test(wbasename(longpath) == L"overlong");
|
||||
}
|
||||
|
||||
// todo!("port this")
|
||||
static void test_fd_event_signaller() {
|
||||
say(L"Testing fd event signaller");
|
||||
fd_event_signaller_t sema;
|
||||
do_test(!sema.try_consume());
|
||||
do_test(!sema.poll());
|
||||
|
||||
// Post once.
|
||||
sema.post();
|
||||
do_test(sema.poll());
|
||||
do_test(sema.poll());
|
||||
do_test(sema.try_consume());
|
||||
do_test(!sema.poll());
|
||||
do_test(!sema.try_consume());
|
||||
|
||||
// Posts are coalesced.
|
||||
sema.post();
|
||||
sema.post();
|
||||
sema.post();
|
||||
do_test(sema.poll());
|
||||
do_test(sema.poll());
|
||||
do_test(sema.try_consume());
|
||||
do_test(!sema.poll());
|
||||
do_test(!sema.try_consume());
|
||||
}
|
||||
|
||||
void test_rust_smoke() {
|
||||
size_t x = rust::add(37, 5);
|
||||
do_test(x == 42);
|
||||
|
@ -1240,7 +1214,6 @@ static const test_t s_tests[]{
|
|||
{TEST_GROUP("maybe"), test_maybe},
|
||||
{TEST_GROUP("normalize"), test_normalize_path},
|
||||
{TEST_GROUP("dirname"), test_dirname_basename},
|
||||
{TEST_GROUP("fd_event"), test_fd_event_signaller},
|
||||
{TEST_GROUP("rust_smoke"), test_rust_smoke},
|
||||
{TEST_GROUP("rust_ffi"), test_rust_ffi},
|
||||
};
|
||||
|
|
|
@ -2323,14 +2323,14 @@ static bool check_for_orphaned_process(unsigned long loop_count, pid_t shell_pgi
|
|||
}
|
||||
|
||||
// Open the tty. Presumably this is stdin, but maybe not?
|
||||
autoclose_fd_t tty_fd{open(tty, O_RDONLY | O_NONBLOCK)};
|
||||
if (!tty_fd.valid()) {
|
||||
rust::Box<autoclose_fd_t2> tty_fd = new_autoclose_fd(open(tty, O_RDONLY | O_NONBLOCK));
|
||||
if (!tty_fd->valid()) {
|
||||
wperror(L"open");
|
||||
exit_without_destructors(1);
|
||||
}
|
||||
|
||||
char tmp;
|
||||
if (read(tty_fd.fd(), &tmp, 1) < 0 && errno == EIO) {
|
||||
if (read(tty_fd->fd(), &tmp, 1) < 0 && errno == EIO) {
|
||||
we_think_we_are_orphaned = true;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -258,16 +258,6 @@ void wperror(wcharz_t s) {
|
|||
std::fwprintf(stderr, L"%s\n", std::strerror(e));
|
||||
}
|
||||
|
||||
int make_fd_nonblocking(int fd) {
|
||||
int flags = fcntl(fd, F_GETFL, 0);
|
||||
int err = 0;
|
||||
bool nonblocking = flags & O_NONBLOCK;
|
||||
if (!nonblocking) {
|
||||
err = fcntl(fd, F_SETFL, flags | O_NONBLOCK);
|
||||
}
|
||||
return err == -1 ? errno : 0;
|
||||
}
|
||||
|
||||
int make_fd_blocking(int fd) {
|
||||
int flags = fcntl(fd, F_GETFL, 0);
|
||||
int err = 0;
|
||||
|
@ -663,8 +653,6 @@ file_id_t file_id_for_fd(int fd) {
|
|||
return result;
|
||||
}
|
||||
|
||||
file_id_t file_id_for_fd(const autoclose_fd_t &fd) { return file_id_for_fd(fd.fd()); }
|
||||
|
||||
bool file_id_t::operator==(const file_id_t &rhs) const { return this->compare_file_id(rhs) == 0; }
|
||||
|
||||
bool file_id_t::operator!=(const file_id_t &rhs) const { return !(*this == rhs); }
|
||||
|
|
|
@ -303,7 +303,6 @@ struct hash<file_id_t> {
|
|||
#endif
|
||||
|
||||
file_id_t file_id_for_fd(int fd);
|
||||
file_id_t file_id_for_fd(const autoclose_fd_t &fd);
|
||||
|
||||
extern const file_id_t kInvalidFileID;
|
||||
|
||||
|
|
Loading…
Reference in a new issue