diff --git a/fish-rust/src/tests/debounce.rs b/fish-rust/src/tests/debounce.rs new file mode 100644 index 000000000..ef03971ba --- /dev/null +++ b/fish-rust/src/tests/debounce.rs @@ -0,0 +1,123 @@ +use std::sync::{ + atomic::{AtomicU32, Ordering}, + Arc, Condvar, Mutex, +}; +use std::time::Duration; + +use crate::ffi_tests::add_test; +use crate::global_safety::RelaxedAtomicBool; +use crate::threads::{iothread_drain_all, iothread_service_main, Debounce}; +use crate::wchar::prelude::*; + +add_test!("test_debounce", || { + // Run 8 functions using a condition variable. + // Only the first and last should run. + let db = Debounce::new(Duration::from_secs(0)); + const count: usize = 8; + + struct Context { + handler_ran: [RelaxedAtomicBool; count], + completion_ran: [RelaxedAtomicBool; count], + ready_to_go: Mutex, + cv: Condvar, + } + + let ctx = Arc::new(Context { + handler_ran: std::array::from_fn(|_i| RelaxedAtomicBool::new(false)), + completion_ran: std::array::from_fn(|_i| RelaxedAtomicBool::new(false)), + ready_to_go: Mutex::new(false), + cv: Condvar::new(), + }); + + // "Enqueue" all functions. Each one waits until ready_to_go. + for idx in 0..count { + assert!(!ctx.handler_ran[idx].load()); + let performer = { + let ctx = ctx.clone(); + move || { + let guard = ctx.ready_to_go.lock().unwrap(); + let _guard = ctx.cv.wait_while(guard, |ready| !*ready).unwrap(); + ctx.handler_ran[idx].store(true); + idx + } + }; + let completer = { + let ctx = ctx.clone(); + move |idx: usize| { + ctx.completion_ran[idx].store(true); + } + }; + db.perform_with_completion(performer, completer); + } + + // We're ready to go. + *ctx.ready_to_go.lock().unwrap() = true; + ctx.cv.notify_all(); + + // Wait until the last completion is done. + while !ctx.completion_ran.last().unwrap().load() { + iothread_service_main(); + } + unsafe { iothread_drain_all() }; + + // Each perform() call may displace an existing queued operation. + // Each operation waits until all are queued. + // Therefore we expect the last perform() to have run, and at most one more. + assert!(ctx.handler_ran.last().unwrap().load()); + assert!(ctx.completion_ran.last().unwrap().load()); + + let mut total_ran = 0; + for idx in 0..count { + if ctx.handler_ran[idx].load() { + total_ran += 1; + } + assert_eq!(ctx.handler_ran[idx].load(), ctx.completion_ran[idx].load()); + } + assert!(total_ran <= 2); +}); + +add_test!("test_debounce_timeout", || { + // Verify that debounce doesn't wait forever. + // Use a shared_ptr so we don't have to join our threads. + let timeout = Duration::from_millis(500); + + struct Data { + db: Debounce, + exit_ok: Mutex, + cv: Condvar, + running: AtomicU32, + } + + let data = Arc::new(Data { + db: Debounce::new(timeout), + exit_ok: Mutex::new(false), + cv: Condvar::new(), + running: AtomicU32::new(0), + }); + + // Our background handler. Note this just blocks until exit_ok is set. + let handler = { + let data = data.clone(); + move || { + data.running.fetch_add(1, Ordering::Relaxed); + let guard = data.exit_ok.lock().unwrap(); + let _guard = data.cv.wait_while(guard, |exit_ok| !*exit_ok); + } + }; + + // Spawn the handler twice. This should not modify the thread token. + let token1 = data.db.perform(handler.clone()); + let token2 = data.db.perform(handler.clone()); + assert_eq!(token1, token2); + + // Wait 75 msec, then enqueue something else; this should spawn a new thread. + std::thread::sleep(timeout + timeout / 2); + assert!(data.running.load(Ordering::Relaxed) == 1); + let token3 = data.db.perform(handler.clone()); + assert!(token3 > token2); + + // Release all the threads. + let mut exit_ok = data.exit_ok.lock().unwrap(); + *exit_ok = true; + data.cv.notify_all(); +}); diff --git a/fish-rust/src/tests/mod.rs b/fish-rust/src/tests/mod.rs index dc33f2df5..69ad761b1 100644 --- a/fish-rust/src/tests/mod.rs +++ b/fish-rust/src/tests/mod.rs @@ -3,6 +3,7 @@ use crate::wchar::prelude::*; #[cfg(test)] mod common; mod complete; +mod debounce; #[cfg(test)] mod editable_line; mod env; diff --git a/src/fish_tests.cpp b/src/fish_tests.cpp index 6a8e35d7d..91ea37725 100644 --- a/src/fish_tests.cpp +++ b/src/fish_tests.cpp @@ -524,102 +524,6 @@ static void test_pthread() { do_test(val == 5); } -// todo!("port this"); -static void test_debounce() { - say(L"Testing debounce"); - // Run 8 functions using a condition variable. - // Only the first and last should run. - auto db = new_debounce_t(0); - constexpr size_t count = 8; - std::array handler_ran = {}; - std::array completion_ran = {}; - - bool ready_to_go = false; - std::mutex m; - std::condition_variable cv; - - // "Enqueue" all functions. Each one waits until ready_to_go. - for (size_t idx = 0; idx < count; idx++) { - do_test(handler_ran[idx] == false); - std::function performer = [&, idx] { - std::unique_lock lock(m); - cv.wait(lock, [&] { return ready_to_go; }); - handler_ran[idx] = true; - return idx; - }; - std::function completer = [&](size_t idx) { completion_ran[idx] = true; }; - debounce_perform_with_completion(*db, std::move(performer), std::move(completer)); - } - - // We're ready to go. - { - std::unique_lock lock(m); - ready_to_go = true; - } - cv.notify_all(); - - // Wait until the last completion is done. - while (!completion_ran.back()) { - iothread_service_main(); - } - iothread_drain_all(); - - // Each perform() call may displace an existing queued operation. - // Each operation waits until all are queued. - // Therefore we expect the last perform() to have run, and at most one more. - - do_test(handler_ran.back()); - do_test(completion_ran.back()); - - size_t total_ran = 0; - for (size_t idx = 0; idx < count; idx++) { - total_ran += (handler_ran[idx] ? 1 : 0); - do_test(handler_ran[idx] == completion_ran[idx]); - } - do_test(total_ran <= 2); -} - -// todo!("port this"); -static void test_debounce_timeout() { - using namespace std::chrono; - say(L"Testing debounce timeout"); - - // Verify that debounce doesn't wait forever. - // Use a shared_ptr so we don't have to join our threads. - const long timeout_ms = 500; - struct data_t { - rust::box db = new_debounce_t(timeout_ms); - bool exit_ok = false; - std::mutex m; - std::condition_variable cv; - relaxed_atomic_t running{0}; - }; - auto data = std::make_shared(); - - // Our background handler. Note this just blocks until exit_ok is set. - std::function handler = [data] { - data->running++; - std::unique_lock lock(data->m); - data->cv.wait(lock, [&] { return data->exit_ok; }); - }; - - // Spawn the handler twice. This should not modify the thread token. - uint64_t token1 = debounce_perform(*data->db, handler); - uint64_t token2 = debounce_perform(*data->db, handler); - do_test(token1 == token2); - - // Wait 75 msec, then enqueue something else; this should spawn a new thread. - std::this_thread::sleep_for(std::chrono::milliseconds(timeout_ms + timeout_ms / 2)); - do_test(data->running == 1); - uint64_t token3 = debounce_perform(*data->db, handler); - do_test(token3 > token2); - - // Release all the threads. - std::unique_lock lock(data->m); - data->exit_ok = true; - data->cv.notify_all(); -} - static parser_test_error_bits_t detect_argument_errors(const wcstring &src) { using namespace ast; auto ast = ast_parse_argument_list(src, parse_flag_none); @@ -2271,8 +2175,6 @@ static const test_t s_tests[]{ {TEST_GROUP("convert_nulls"), test_convert_nulls}, {TEST_GROUP("iothread"), test_iothread}, {TEST_GROUP("pthread"), test_pthread}, - {TEST_GROUP("debounce"), test_debounce}, - {TEST_GROUP("debounce"), test_debounce_timeout}, {TEST_GROUP("parser"), test_parser}, {TEST_GROUP("lru"), test_lru}, {TEST_GROUP("wcstod"), test_wcstod},