automatically wait for worker threads

closes #817
This commit is contained in:
Aleksey Kladov 2019-02-14 20:43:45 +03:00
parent 10bf61b83b
commit bf352cd251
10 changed files with 136 additions and 155 deletions

1
Cargo.lock generated
View file

@ -1651,7 +1651,6 @@ name = "thread_worker"
version = "0.1.0" version = "0.1.0"
dependencies = [ dependencies = [
"crossbeam-channel 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", "crossbeam-channel 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
"drop_bomb 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
] ]

View file

@ -121,7 +121,6 @@ impl BatchDatabase {
.collect(); .collect();
let db = BatchDatabase::load(crate_graph, &mut vfs); let db = BatchDatabase::load(crate_graph, &mut vfs);
let _ = vfs.shutdown();
Ok((db, local_roots)) Ok((db, local_roots))
} }
} }

View file

@ -54,19 +54,20 @@ pub fn main_loop(
) -> Result<()> { ) -> Result<()> {
let pool = ThreadPool::new(THREADPOOL_SIZE); let pool = ThreadPool::new(THREADPOOL_SIZE);
let (task_sender, task_receiver) = unbounded::<Task>(); let (task_sender, task_receiver) = unbounded::<Task>();
let (ws_worker, ws_watcher) = workspace_loader();
ws_worker.send(ws_root.clone()).unwrap();
// FIXME: support dynamic workspace loading. // FIXME: support dynamic workspace loading.
let workspaces = match ws_worker.recv().unwrap() { let workspaces = {
Ok(ws) => vec![ws], let ws_worker = workspace_loader();
Err(e) => { ws_worker.sender().send(ws_root.clone()).unwrap();
log::error!("loading workspace failed: {}", e); match ws_worker.receiver().recv().unwrap() {
Vec::new() Ok(ws) => vec![ws],
Err(e) => {
log::error!("loading workspace failed: {}", e);
Vec::new()
}
} }
}; };
ws_worker.shutdown();
ws_watcher.shutdown().map_err(|_| format_err!("ws watcher died"))?;
let mut state = ServerWorldState::new(ws_root.clone(), workspaces); let mut state = ServerWorldState::new(ws_root.clone(), workspaces);
log::info!("server initialized, serving requests"); log::info!("server initialized, serving requests");
@ -94,12 +95,9 @@ pub fn main_loop(
log::info!("...threadpool has finished"); log::info!("...threadpool has finished");
let vfs = Arc::try_unwrap(state.vfs).expect("all snapshots should be dead"); let vfs = Arc::try_unwrap(state.vfs).expect("all snapshots should be dead");
let vfs_res = vfs.into_inner().shutdown(); drop(vfs);
main_res?; main_res
vfs_res.map_err(|_| format_err!("fs watcher died"))?;
Ok(())
} }
enum Event { enum Event {

View file

@ -1,6 +1,6 @@
use std::path::PathBuf; use std::path::PathBuf;
use thread_worker::{WorkerHandle, Worker}; use thread_worker::Worker;
use crate::Result; use crate::Result;
@ -8,8 +8,8 @@ pub use ra_project_model::{
ProjectWorkspace, CargoWorkspace, Package, Target, TargetKind, Sysroot, ProjectWorkspace, CargoWorkspace, Package, Target, TargetKind, Sysroot,
}; };
pub fn workspace_loader() -> (Worker<PathBuf, Result<ProjectWorkspace>>, WorkerHandle) { pub fn workspace_loader() -> Worker<PathBuf, Result<ProjectWorkspace>> {
thread_worker::spawn::<PathBuf, Result<ProjectWorkspace>, _>( Worker::<PathBuf, Result<ProjectWorkspace>>::spawn(
"workspace loader", "workspace loader",
1, 1,
|input_receiver, output_sender| { |input_receiver, output_sender| {

View file

@ -17,7 +17,7 @@ use lsp_types::{
use serde::Serialize; use serde::Serialize;
use serde_json::{to_string_pretty, Value}; use serde_json::{to_string_pretty, Value};
use tempfile::TempDir; use tempfile::TempDir;
use thread_worker::{WorkerHandle, Worker}; use thread_worker::Worker;
use test_utils::{parse_fixture, find_mismatch}; use test_utils::{parse_fixture, find_mismatch};
use ra_lsp_server::{ use ra_lsp_server::{
@ -45,13 +45,12 @@ pub struct Server {
messages: RefCell<Vec<RawMessage>>, messages: RefCell<Vec<RawMessage>>,
dir: TempDir, dir: TempDir,
worker: Option<Worker<RawMessage, RawMessage>>, worker: Option<Worker<RawMessage, RawMessage>>,
watcher: Option<WorkerHandle>,
} }
impl Server { impl Server {
fn new(dir: TempDir, files: Vec<(PathBuf, String)>) -> Server { fn new(dir: TempDir, files: Vec<(PathBuf, String)>) -> Server {
let path = dir.path().to_path_buf(); let path = dir.path().to_path_buf();
let (worker, watcher) = thread_worker::spawn::<RawMessage, RawMessage, _>( let worker = Worker::<RawMessage, RawMessage>::spawn(
"test server", "test server",
128, 128,
move |mut msg_receiver, mut msg_sender| { move |mut msg_receiver, mut msg_sender| {
@ -63,7 +62,6 @@ impl Server {
dir, dir,
messages: Default::default(), messages: Default::default(),
worker: Some(worker), worker: Some(worker),
watcher: Some(watcher),
}; };
for (path, text) in files { for (path, text) in files {
@ -117,7 +115,7 @@ impl Server {
} }
fn send_request_(&self, r: RawRequest) -> Value { fn send_request_(&self, r: RawRequest) -> Value {
let id = r.id; let id = r.id;
self.worker.as_ref().unwrap().send(RawMessage::Request(r)).unwrap(); self.worker.as_ref().unwrap().sender().send(RawMessage::Request(r)).unwrap();
while let Some(msg) = self.recv() { while let Some(msg) = self.recv() {
match msg { match msg {
RawMessage::Request(req) => panic!("unexpected request: {:?}", req), RawMessage::Request(req) => panic!("unexpected request: {:?}", req),
@ -157,24 +155,19 @@ impl Server {
} }
} }
fn recv(&self) -> Option<RawMessage> { fn recv(&self) -> Option<RawMessage> {
recv_timeout(&self.worker.as_ref().unwrap().out).map(|msg| { recv_timeout(&self.worker.as_ref().unwrap().receiver()).map(|msg| {
self.messages.borrow_mut().push(msg.clone()); self.messages.borrow_mut().push(msg.clone());
msg msg
}) })
} }
fn send_notification(&self, not: RawNotification) { fn send_notification(&self, not: RawNotification) {
self.worker.as_ref().unwrap().send(RawMessage::Notification(not)).unwrap(); self.worker.as_ref().unwrap().sender().send(RawMessage::Notification(not)).unwrap();
} }
} }
impl Drop for Server { impl Drop for Server {
fn drop(&mut self) { fn drop(&mut self) {
self.send_request::<Shutdown>(()); self.send_request::<Shutdown>(());
let receiver = self.worker.take().unwrap().shutdown();
while let Some(msg) = recv_timeout(&receiver) {
drop(msg);
}
self.watcher.take().unwrap().shutdown().unwrap();
} }
} }

View file

@ -1,13 +1,11 @@
use std::{ use std::{
fs, fs,
thread,
path::{Path, PathBuf}, path::{Path, PathBuf},
sync::{mpsc, Arc}, sync::{mpsc, Arc},
time::Duration, time::Duration,
}; };
use crossbeam_channel::{Receiver, Sender, unbounded, RecvError, select}; use crossbeam_channel::{Receiver, Sender, unbounded, RecvError, select};
use relative_path::RelativePathBuf; use relative_path::RelativePathBuf;
use thread_worker::WorkerHandle;
use walkdir::WalkDir; use walkdir::WalkDir;
use notify::{DebouncedEvent, RecommendedWatcher, RecursiveMode, Watcher as _Watcher}; use notify::{DebouncedEvent, RecommendedWatcher, RecursiveMode, Watcher as _Watcher};
@ -49,8 +47,7 @@ enum ChangeKind {
const WATCHER_DELAY: Duration = Duration::from_millis(250); const WATCHER_DELAY: Duration = Duration::from_millis(250);
pub(crate) struct Worker { pub(crate) struct Worker {
worker: thread_worker::Worker<Task, TaskResult>, thread_worker: thread_worker::Worker<Task, TaskResult>,
worker_handle: WorkerHandle,
} }
impl Worker { impl Worker {
@ -62,82 +59,79 @@ impl Worker {
// * we want to read all files from a single thread, to guarantee that // * we want to read all files from a single thread, to guarantee that
// we always get fresher versions and never go back in time. // we always get fresher versions and never go back in time.
// * we want to tear down everything neatly during shutdown. // * we want to tear down everything neatly during shutdown.
let (worker, worker_handle) = thread_worker::spawn( let thread_worker = thread_worker::Worker::spawn(
"vfs", "vfs",
128, 128,
// This are the channels we use to communicate with outside world. // This are the channels we use to communicate with outside world.
// If `input_receiver` is closed we need to tear ourselves down. // If `input_receiver` is closed we need to tear ourselves down.
// `output_sender` should not be closed unless the parent died. // `output_sender` should not be closed unless the parent died.
move |input_receiver, output_sender| { move |input_receiver, output_sender| {
// These are `std` channels notify will send events to // Make sure that the destruction order is
let (notify_sender, notify_receiver) = mpsc::channel(); //
// * notify_sender
// * _thread
// * watcher_sender
//
// this is required to avoid deadlocks.
// These are the corresponding crossbeam channels // These are the corresponding crossbeam channels
let (watcher_sender, watcher_receiver) = unbounded(); let (watcher_sender, watcher_receiver) = unbounded();
let _thread;
{
// These are `std` channels notify will send events to
let (notify_sender, notify_receiver) = mpsc::channel();
let mut watcher = notify::watcher(notify_sender, WATCHER_DELAY) let mut watcher = notify::watcher(notify_sender, WATCHER_DELAY)
.map_err(|e| log::error!("failed to spawn notify {}", e)) .map_err(|e| log::error!("failed to spawn notify {}", e))
.ok(); .ok();
// Start a silly thread to transform between two channels // Start a silly thread to transform between two channels
let thread = thread::spawn(move || { _thread = thread_worker::ScopedThread::spawn("notify-convertor", move || {
notify_receiver notify_receiver
.into_iter() .into_iter()
.for_each(|event| convert_notify_event(event, &watcher_sender)) .for_each(|event| convert_notify_event(event, &watcher_sender))
}); });
// Process requests from the called or notifications from // Process requests from the called or notifications from
// watcher until the caller says stop. // watcher until the caller says stop.
loop { loop {
select! { select! {
// Received request from the caller. If this channel is // Received request from the caller. If this channel is
// closed, we should shutdown everything. // closed, we should shutdown everything.
recv(input_receiver) -> t => match t { recv(input_receiver) -> t => match t {
Err(RecvError) => { Err(RecvError) => {
drop(input_receiver); drop(input_receiver);
break break
},
Ok(Task::AddRoot { root, config }) => {
watch_root(watcher.as_mut(), &output_sender, root, Arc::clone(&config));
}
}, },
Ok(Task::AddRoot { root, config }) => { // Watcher send us changes. If **this** channel is
watch_root(watcher.as_mut(), &output_sender, root, Arc::clone(&config)); // closed, the watcher has died, which indicates a bug
} // -- escalate!
}, recv(watcher_receiver) -> event => match event {
// Watcher send us changes. If **this** channel is Err(RecvError) => panic!("watcher is dead"),
// closed, the watcher has died, which indicates a bug Ok((path, change)) => {
// -- escalate! handle_change(watcher.as_mut(), &output_sender, &*roots, path, change);
recv(watcher_receiver) -> event => match event { }
Err(RecvError) => panic!("watcher is dead"), },
Ok((path, change)) => { }
handle_change(watcher.as_mut(), &output_sender, &*roots, path, change);
}
},
} }
} }
// Stopped the watcher
drop(watcher.take());
// Drain pending events: we are not interested in them anyways! // Drain pending events: we are not interested in them anyways!
watcher_receiver.into_iter().for_each(|_| ()); watcher_receiver.into_iter().for_each(|_| ());
let res = thread.join();
match &res {
Ok(()) => log::info!("... Watcher terminated with ok"),
Err(_) => log::error!("... Watcher terminated with err"),
}
res.unwrap();
}, },
); );
Worker { worker, worker_handle } Worker { thread_worker }
} }
pub(crate) fn sender(&self) -> &Sender<Task> { pub(crate) fn sender(&self) -> &Sender<Task> {
&self.worker.inp &self.thread_worker.sender()
} }
pub(crate) fn receiver(&self) -> &Receiver<TaskResult> { pub(crate) fn receiver(&self) -> &Receiver<TaskResult> {
&self.worker.out &self.thread_worker.receiver()
}
pub(crate) fn shutdown(self) -> thread::Result<()> {
let _ = self.worker.shutdown();
self.worker_handle.shutdown()
} }
} }

View file

@ -22,7 +22,6 @@ use std::{
fmt, fs, mem, fmt, fs, mem,
path::{Path, PathBuf}, path::{Path, PathBuf},
sync::Arc, sync::Arc,
thread,
}; };
use crossbeam_channel::Receiver; use crossbeam_channel::Receiver;
@ -337,11 +336,6 @@ impl Vfs {
mem::replace(&mut self.pending_changes, Vec::new()) mem::replace(&mut self.pending_changes, Vec::new())
} }
/// Shutdown the VFS and terminate the background watching thread.
pub fn shutdown(self) -> thread::Result<()> {
self.worker.shutdown()
}
fn add_file( fn add_file(
&mut self, &mut self,
root: VfsRoot, root: VfsRoot,

View file

@ -158,6 +158,5 @@ fn test_vfs_works() -> std::io::Result<()> {
Err(RecvTimeoutError::Timeout) Err(RecvTimeoutError::Timeout)
); );
vfs.shutdown().unwrap();
Ok(()) Ok(())
} }

View file

@ -5,7 +5,6 @@ version = "0.1.0"
authors = ["rust-analyzer developers"] authors = ["rust-analyzer developers"]
[dependencies] [dependencies]
drop_bomb = "0.1.0"
crossbeam-channel = "0.3.5" crossbeam-channel = "0.3.5"
log = "0.4.3" log = "0.4.3"

View file

@ -2,74 +2,80 @@
use std::thread; use std::thread;
use crossbeam_channel::{bounded, unbounded, Receiver, Sender, RecvError, SendError}; use crossbeam_channel::{bounded, unbounded, Receiver, Sender};
use drop_bomb::DropBomb;
/// Like `std::thread::JoinHandle<()>`, but joins thread in drop automatically.
pub struct ScopedThread {
// Option for drop
inner: Option<thread::JoinHandle<()>>,
}
impl Drop for ScopedThread {
fn drop(&mut self) {
let inner = self.inner.take().unwrap();
let name = inner.thread().name().unwrap().to_string();
log::info!("waiting for {} to finish...", name);
let res = inner.join();
log::info!(".. {} terminated with {}", name, if res.is_ok() { "ok" } else { "err" });
// escalate panic, but avoid aborting the process
match res {
Err(e) => {
if !thread::panicking() {
panic!(e)
}
}
_ => (),
}
}
}
impl ScopedThread {
pub fn spawn(name: &'static str, f: impl FnOnce() + Send + 'static) -> ScopedThread {
let inner = thread::Builder::new().name(name.into()).spawn(f).unwrap();
ScopedThread { inner: Some(inner) }
}
}
/// A wrapper around event-processing thread with automatic shutdown semantics.
pub struct Worker<I, O> { pub struct Worker<I, O> {
pub inp: Sender<I>, // XXX: field order is significant here.
pub out: Receiver<O>, //
} // In Rust, fields are dropped in the declaration order, and we rely on this
// here. We must close input first, so that the `thread` (who holds the
pub struct WorkerHandle { // opposite side of the channel) noticed shutdown. Then, we must join the
name: &'static str, // thread, but we must keep out alive so that the thread does not panic.
thread: thread::JoinHandle<()>, //
bomb: DropBomb, // Note that a potential problem here is that we might drop some messages
} // from receiver on the floor. This is ok for rust-analyzer: we have only a
// single client, so, if we are shutting down, nobody is interested in the
pub fn spawn<I, O, F>(name: &'static str, buf: usize, f: F) -> (Worker<I, O>, WorkerHandle) // unfinished work anyway!
where sender: Sender<I>,
F: FnOnce(Receiver<I>, Sender<O>) + Send + 'static, _thread: ScopedThread,
I: Send + 'static, receiver: Receiver<O>,
O: Send + 'static,
{
let (worker, inp_r, out_s) = worker_chan(buf);
let watcher = WorkerHandle::spawn(name, move || f(inp_r, out_s));
(worker, watcher)
} }
impl<I, O> Worker<I, O> { impl<I, O> Worker<I, O> {
/// Stops the worker. Returns the message receiver to fetch results which pub fn spawn<F>(name: &'static str, buf: usize, f: F) -> Worker<I, O>
/// have become ready before the worker is stopped. where
pub fn shutdown(self) -> Receiver<O> { F: FnOnce(Receiver<I>, Sender<O>) + Send + 'static,
self.out I: Send + 'static,
} O: Send + 'static,
{
pub fn send(&self, item: I) -> Result<(), SendError<I>> { // Set up worker channels in a deadlock-avoiding way. If one sets both input
self.inp.send(item) // and output buffers to a fixed size, a worker might get stuck.
} let (sender, input_receiver) = bounded::<I>(buf);
pub fn recv(&self) -> Result<O, RecvError> { let (output_sender, receiver) = unbounded::<O>();
self.out.recv() let _thread = ScopedThread::spawn(name, move || f(input_receiver, output_sender));
Worker { sender, _thread, receiver }
} }
} }
impl WorkerHandle { impl<I, O> Worker<I, O> {
fn spawn(name: &'static str, f: impl FnOnce() + Send + 'static) -> WorkerHandle { pub fn sender(&self) -> &Sender<I> {
let thread = thread::spawn(f); &self.sender
WorkerHandle {
name,
thread,
bomb: DropBomb::new(format!("WorkerHandle {} was not shutdown", name)),
}
} }
pub fn receiver(&self) -> &Receiver<O> {
pub fn shutdown(mut self) -> thread::Result<()> { &self.receiver
log::info!("waiting for {} to finish ...", self.name);
let name = self.name;
self.bomb.defuse();
let res = self.thread.join();
match &res {
Ok(()) => log::info!("... {} terminated with ok", name),
Err(_) => log::error!("... {} terminated with err", name),
}
res
} }
} }
/// Sets up worker channels in a deadlock-avoiding way.
/// If one sets both input and output buffers to a fixed size,
/// a worker might get stuck.
fn worker_chan<I, O>(buf: usize) -> (Worker<I, O>, Receiver<I>, Sender<O>) {
let (input_sender, input_receiver) = bounded::<I>(buf);
let (output_sender, output_receiver) = unbounded::<O>();
(Worker { inp: input_sender, out: output_receiver }, input_receiver, output_sender)
}