mirror of
https://github.com/fish-shell/fish-shell
synced 2024-12-26 04:43:10 +00:00
Remove custom lock types in favor of native C++11 mutexes
No longer using RAII wrappers around pthread_mutex_t and pthread_cond_t in favor of the C++11 std::mutex, std::recursive_mutex, and std::condition_variable data types.
This commit is contained in:
parent
b1ac07a178
commit
e76c1fd139
18 changed files with 74 additions and 250 deletions
|
@ -48,11 +48,7 @@ file_access_attempt_t access_file(const wcstring &path, int mode) {
|
|||
|
||||
autoload_t::autoload_t(const wcstring &env_var_name_var,
|
||||
command_removed_function_t cmd_removed_callback)
|
||||
: lock(), env_var_name(env_var_name_var), command_removed(cmd_removed_callback) {
|
||||
pthread_mutex_init(&lock, NULL);
|
||||
}
|
||||
|
||||
autoload_t::~autoload_t() { pthread_mutex_destroy(&lock); }
|
||||
: lock(), env_var_name(env_var_name_var), command_removed(cmd_removed_callback) {}
|
||||
|
||||
void autoload_t::entry_was_evicted(wcstring key, autoload_function_t node) {
|
||||
// This should only ever happen on the main thread.
|
||||
|
|
|
@ -47,7 +47,7 @@ class env_vars_snapshot_t;
|
|||
class autoload_t : public lru_cache_t<autoload_t, autoload_function_t> {
|
||||
private:
|
||||
/// Lock for thread safety.
|
||||
pthread_mutex_t lock;
|
||||
std::mutex lock;
|
||||
/// The environment variable name.
|
||||
const wcstring env_var_name;
|
||||
/// The path from which we most recently autoloaded.
|
||||
|
@ -76,8 +76,6 @@ class autoload_t : public lru_cache_t<autoload_t, autoload_function_t> {
|
|||
// Create an autoload_t for the given environment variable name.
|
||||
autoload_t(const wcstring &env_var_name_var, command_removed_function_t callback);
|
||||
|
||||
~autoload_t();
|
||||
|
||||
/// Autoload the specified file, if it exists in the specified path. Do not load it multiple
|
||||
/// times unless its timestamp changes or parse_util_unload is called.
|
||||
/// Autoloading one file may unload another.
|
||||
|
|
|
@ -55,7 +55,7 @@ static owning_lock<wcstring_list_t> &get_transient_stack() {
|
|||
}
|
||||
|
||||
static bool get_top_transient(wcstring *out_result) {
|
||||
auto locked = get_transient_stack().acquire();
|
||||
auto &&locked = get_transient_stack().acquire();
|
||||
wcstring_list_t &stack = locked.value;
|
||||
if (stack.empty()) {
|
||||
return false;
|
||||
|
@ -67,7 +67,7 @@ static bool get_top_transient(wcstring *out_result) {
|
|||
builtin_commandline_scoped_transient_t::builtin_commandline_scoped_transient_t(
|
||||
const wcstring &cmd) {
|
||||
ASSERT_IS_MAIN_THREAD();
|
||||
auto locked = get_transient_stack().acquire();
|
||||
auto &&locked = get_transient_stack().acquire();
|
||||
wcstring_list_t &stack = locked.value;
|
||||
stack.push_back(cmd);
|
||||
this->token = stack.size();
|
||||
|
@ -75,7 +75,7 @@ builtin_commandline_scoped_transient_t::builtin_commandline_scoped_transient_t(
|
|||
|
||||
builtin_commandline_scoped_transient_t::~builtin_commandline_scoped_transient_t() {
|
||||
ASSERT_IS_MAIN_THREAD();
|
||||
auto locked = get_transient_stack().acquire();
|
||||
auto &&locked = get_transient_stack().acquire();
|
||||
wcstring_list_t &stack = locked.value;
|
||||
assert(this->token == stack.size());
|
||||
stack.pop_back();
|
||||
|
|
101
src/common.cpp
101
src/common.cpp
|
@ -66,7 +66,7 @@ static pid_t initial_fg_process_group = -1;
|
|||
/// This struct maintains the current state of the terminal size. It is updated on demand after
|
||||
/// receiving a SIGWINCH. Do not touch this struct directly, it's managed with a rwlock. Use
|
||||
/// common_get_width()/common_get_height().
|
||||
static pthread_mutex_t termsize_lock = PTHREAD_MUTEX_INITIALIZER;
|
||||
static std::mutex termsize_lock;
|
||||
static struct winsize termsize = {USHRT_MAX, USHRT_MAX, USHRT_MAX, USHRT_MAX};
|
||||
static volatile bool termsize_valid = false;
|
||||
|
||||
|
@ -1975,102 +1975,15 @@ void assert_is_background_thread(const char *who) {
|
|||
}
|
||||
|
||||
void assert_is_locked(void *vmutex, const char *who, const char *caller) {
|
||||
pthread_mutex_t *mutex = static_cast<pthread_mutex_t *>(vmutex);
|
||||
if (0 == pthread_mutex_trylock(mutex)) {
|
||||
std::mutex *mutex = static_cast<std::mutex *>(vmutex);
|
||||
|
||||
// Note that std::mutex.try_lock() is allowed to return false when the mutex isn't
|
||||
// actually locked; fortunately we are checking the opposite so we're safe.
|
||||
if (mutex->try_lock()) {
|
||||
debug(0, "%s is not locked when it should be in '%s'", who, caller);
|
||||
debug(0, "Break on debug_thread_error to debug.");
|
||||
debug_thread_error();
|
||||
pthread_mutex_unlock(mutex);
|
||||
}
|
||||
}
|
||||
|
||||
void scoped_lock::lock(void) {
|
||||
assert(!locked); //!OCLINT(multiple unary operator)
|
||||
ASSERT_IS_NOT_FORKED_CHILD();
|
||||
DIE_ON_FAILURE(pthread_mutex_lock(lock_obj));
|
||||
locked = true;
|
||||
}
|
||||
|
||||
void scoped_lock::unlock(void) {
|
||||
assert(locked);
|
||||
ASSERT_IS_NOT_FORKED_CHILD();
|
||||
DIE_ON_FAILURE(pthread_mutex_unlock(lock_obj));
|
||||
locked = false;
|
||||
}
|
||||
|
||||
scoped_lock::scoped_lock(pthread_mutex_t &mutex) : lock_obj(&mutex), locked(false) { this->lock(); }
|
||||
|
||||
scoped_lock::scoped_lock(mutex_lock_t &lock) : lock_obj(&lock.mutex), locked(false) {
|
||||
this->lock();
|
||||
}
|
||||
|
||||
scoped_lock::~scoped_lock() {
|
||||
if (locked) this->unlock();
|
||||
}
|
||||
|
||||
void scoped_rwlock::lock(void) {
|
||||
assert(!(locked || locked_shared)); //!OCLINT(multiple unary operator)
|
||||
ASSERT_IS_NOT_FORKED_CHILD();
|
||||
DIE_ON_FAILURE(pthread_rwlock_rdlock(rwlock_obj));
|
||||
locked = true;
|
||||
}
|
||||
|
||||
void scoped_rwlock::unlock(void) {
|
||||
assert(locked);
|
||||
ASSERT_IS_NOT_FORKED_CHILD();
|
||||
DIE_ON_FAILURE(pthread_rwlock_unlock(rwlock_obj));
|
||||
locked = false;
|
||||
}
|
||||
|
||||
void scoped_rwlock::lock_shared(void) {
|
||||
assert(!(locked || locked_shared)); //!OCLINT(multiple unary operator)
|
||||
ASSERT_IS_NOT_FORKED_CHILD();
|
||||
DIE_ON_FAILURE(pthread_rwlock_wrlock(rwlock_obj));
|
||||
locked_shared = true;
|
||||
}
|
||||
|
||||
void scoped_rwlock::unlock_shared(void) {
|
||||
assert(locked_shared);
|
||||
ASSERT_IS_NOT_FORKED_CHILD();
|
||||
DIE_ON_FAILURE(pthread_rwlock_unlock(rwlock_obj));
|
||||
locked_shared = false;
|
||||
}
|
||||
|
||||
#if 0
|
||||
// This is not currently used.
|
||||
void scoped_rwlock::upgrade(void) {
|
||||
assert(locked_shared);
|
||||
ASSERT_IS_NOT_FORKED_CHILD();
|
||||
DIE_ON_FAILURE(pthread_rwlock_unlock(rwlock_obj));
|
||||
locked = false;
|
||||
DIE_ON_FAILURE(pthread_rwlock_wrlock(rwlock_obj));
|
||||
locked_shared = true;
|
||||
}
|
||||
#endif
|
||||
|
||||
scoped_rwlock::scoped_rwlock(pthread_rwlock_t &rwlock, bool shared)
|
||||
: rwlock_obj(&rwlock), locked(false), locked_shared(false) {
|
||||
if (shared) {
|
||||
this->lock_shared();
|
||||
} else {
|
||||
this->lock();
|
||||
}
|
||||
}
|
||||
|
||||
scoped_rwlock::scoped_rwlock(rwlock_t &rwlock, bool shared)
|
||||
: rwlock_obj(&rwlock.rwlock), locked(false), locked_shared(false) {
|
||||
if (shared) {
|
||||
this->lock_shared();
|
||||
} else {
|
||||
this->lock();
|
||||
}
|
||||
}
|
||||
|
||||
scoped_rwlock::~scoped_rwlock() {
|
||||
if (locked) {
|
||||
this->unlock();
|
||||
} else if (locked_shared) {
|
||||
this->unlock_shared();
|
||||
mutex->unlock();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
90
src/common.h
90
src/common.h
|
@ -17,6 +17,7 @@
|
|||
#endif
|
||||
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
#include <sstream>
|
||||
#include <string>
|
||||
#include <type_traits>
|
||||
|
@ -199,12 +200,12 @@ extern bool has_working_tty_timestamps;
|
|||
// from within a `switch` block. As of the time I'm writing this oclint doesn't recognize the
|
||||
// `__attribute__((noreturn))` on the exit_without_destructors() function.
|
||||
// TODO: we use C++11 [[noreturn]] now, does that change things?
|
||||
#define FATAL_EXIT() \
|
||||
{ \
|
||||
char exit_read_buff; \
|
||||
show_stackframe(L'E'); \
|
||||
#define FATAL_EXIT() \
|
||||
{ \
|
||||
char exit_read_buff; \
|
||||
show_stackframe(L'E'); \
|
||||
ignore_result(read(0, &exit_read_buff, 1)); \
|
||||
exit_without_destructors(1); \
|
||||
exit_without_destructors(1); \
|
||||
}
|
||||
|
||||
/// Exit the program at once after emitting an error message and stack trace if possible.
|
||||
|
@ -497,70 +498,8 @@ class null_terminated_array_t {
|
|||
// null_terminated_array_t<char_t>.
|
||||
void convert_wide_array_to_narrow(const null_terminated_array_t<wchar_t> &arr,
|
||||
null_terminated_array_t<char> *output);
|
||||
|
||||
class mutex_lock_t {
|
||||
public:
|
||||
pthread_mutex_t mutex;
|
||||
mutex_lock_t() { DIE_ON_FAILURE(pthread_mutex_init(&mutex, NULL)); }
|
||||
|
||||
~mutex_lock_t() { DIE_ON_FAILURE(pthread_mutex_destroy(&mutex)); }
|
||||
};
|
||||
|
||||
// Basic scoped lock class.
|
||||
class scoped_lock {
|
||||
pthread_mutex_t *lock_obj;
|
||||
bool locked;
|
||||
|
||||
// No copying.
|
||||
scoped_lock &operator=(const scoped_lock &) = delete;
|
||||
scoped_lock(const scoped_lock &) = delete;
|
||||
|
||||
public:
|
||||
scoped_lock(scoped_lock &&rhs) : lock_obj(rhs.lock_obj), locked(rhs.locked) {
|
||||
// we acquire any locked state
|
||||
// ensure the rhs doesn't try to unlock
|
||||
rhs.locked = false;
|
||||
}
|
||||
|
||||
void lock(void);
|
||||
void unlock(void);
|
||||
explicit scoped_lock(pthread_mutex_t &mutex);
|
||||
explicit scoped_lock(mutex_lock_t &lock);
|
||||
~scoped_lock();
|
||||
};
|
||||
|
||||
class rwlock_t {
|
||||
public:
|
||||
pthread_rwlock_t rwlock;
|
||||
rwlock_t() { DIE_ON_FAILURE(pthread_rwlock_init(&rwlock, NULL)); }
|
||||
|
||||
~rwlock_t() { DIE_ON_FAILURE(pthread_rwlock_destroy(&rwlock)); }
|
||||
|
||||
rwlock_t &operator=(const rwlock_t &) = delete;
|
||||
rwlock_t(const rwlock_t &) = delete;
|
||||
};
|
||||
|
||||
// Scoped lock class for rwlocks.
|
||||
class scoped_rwlock {
|
||||
pthread_rwlock_t *rwlock_obj;
|
||||
bool locked;
|
||||
bool locked_shared;
|
||||
|
||||
// No copying.
|
||||
scoped_rwlock &operator=(const scoped_lock &) = delete;
|
||||
explicit scoped_rwlock(const scoped_lock &) = delete;
|
||||
|
||||
public:
|
||||
void lock(void);
|
||||
void unlock(void);
|
||||
void lock_shared(void);
|
||||
void unlock_shared(void);
|
||||
// Upgrade shared lock to exclusive. Equivalent to `lock.unlock_shared(); lock.lock();`.
|
||||
void upgrade(void);
|
||||
explicit scoped_rwlock(pthread_rwlock_t &rwlock, bool shared = false);
|
||||
explicit scoped_rwlock(rwlock_t &rwlock, bool shared = false);
|
||||
~scoped_rwlock();
|
||||
};
|
||||
typedef std::lock_guard<std::mutex> scoped_lock;
|
||||
typedef std::lock_guard<std::recursive_mutex> scoped_rlock;
|
||||
|
||||
// An object wrapping a scoped lock and a value
|
||||
// This is returned from owning_lock.acquire()
|
||||
|
@ -575,7 +514,7 @@ class scoped_rwlock {
|
|||
template <typename DATA>
|
||||
class acquired_lock {
|
||||
scoped_lock lock;
|
||||
acquired_lock(mutex_lock_t &lk, DATA *v) : lock(lk), value(*v) {}
|
||||
acquired_lock(std::mutex &lk, DATA *v) : lock(lk), value(*v) {}
|
||||
|
||||
template <typename T>
|
||||
friend class owning_lock;
|
||||
|
@ -600,7 +539,7 @@ class owning_lock {
|
|||
owning_lock(owning_lock &&) = default;
|
||||
owning_lock &operator=(owning_lock &&) = default;
|
||||
|
||||
mutex_lock_t lock;
|
||||
std::mutex lock;
|
||||
DATA data;
|
||||
|
||||
public:
|
||||
|
@ -883,10 +822,13 @@ enum {
|
|||
and __extension__ to work around the problem, if the workaround is
|
||||
known to be needed. */
|
||||
#if 3 < __GNUC__ + (4 <= __GNUC_MINOR__)
|
||||
# define ignore_result(x) \
|
||||
(__extension__ ({ __typeof__ (x) __x = (x); (void) __x; }))
|
||||
#define ignore_result(x) \
|
||||
(__extension__({ \
|
||||
__typeof__(x) __x = (x); \
|
||||
(void)__x; \
|
||||
}))
|
||||
#else
|
||||
# define ignore_result(x) ((void) (x))
|
||||
#define ignore_result(x) ((void)(x))
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
|
|
@ -178,7 +178,7 @@ static bool compare_completions_by_order(const completion_entry_t *p1,
|
|||
}
|
||||
|
||||
/// The lock that guards the list of completion entries.
|
||||
static pthread_mutex_t completion_lock = PTHREAD_MUTEX_INITIALIZER;
|
||||
static std::mutex completion_lock;
|
||||
|
||||
void completion_entry_t::add_option(const complete_entry_opt_t &opt) {
|
||||
ASSERT_IS_LOCKED(completion_lock);
|
||||
|
@ -1552,7 +1552,7 @@ wcstring complete_print() {
|
|||
}
|
||||
|
||||
/// Completion "wrapper" support. The map goes from wrapping-command to wrapped-command-list.
|
||||
static pthread_mutex_t wrapper_lock = PTHREAD_MUTEX_INITIALIZER;
|
||||
static std::mutex wrapper_lock;
|
||||
typedef std::map<wcstring, wcstring_list_t> wrapper_map_t;
|
||||
static wrapper_map_t &wrap_map() {
|
||||
ASSERT_IS_LOCKED(wrapper_lock);
|
||||
|
|
|
@ -168,7 +168,11 @@ class env_node_t {
|
|||
const env_var_t find_entry(const wcstring &key);
|
||||
};
|
||||
|
||||
static pthread_mutex_t env_lock = PTHREAD_MUTEX_INITIALIZER;
|
||||
class variable_entry_t {
|
||||
wcstring value; /**< Value of the variable */
|
||||
};
|
||||
|
||||
static std::mutex env_lock;
|
||||
|
||||
static bool local_scope_exports(const env_node_t *n);
|
||||
|
||||
|
|
|
@ -263,11 +263,7 @@ static bool append_file_entry(fish_message_type_t type, const wcstring &key_in,
|
|||
}
|
||||
|
||||
env_universal_t::env_universal_t(const wcstring &path)
|
||||
: explicit_vars_path(path), tried_renaming(false), last_read_file(kInvalidFileID) {
|
||||
DIE_ON_FAILURE(pthread_mutex_init(&lock, NULL));
|
||||
}
|
||||
|
||||
env_universal_t::~env_universal_t() { pthread_mutex_destroy(&lock); }
|
||||
: explicit_vars_path(path), tried_renaming(false), last_read_file(kInvalidFileID) {}
|
||||
|
||||
env_var_t env_universal_t::get(const wcstring &name) const {
|
||||
var_table_t::const_iterator where = vars.find(name);
|
||||
|
|
|
@ -39,7 +39,7 @@ class env_universal_t {
|
|||
// Path that we save to. If empty, use the default.
|
||||
const wcstring explicit_vars_path;
|
||||
|
||||
mutable pthread_mutex_t lock;
|
||||
mutable std::mutex lock;
|
||||
bool tried_renaming;
|
||||
bool load_from_path(const wcstring &path, callback_data_list_t &callbacks);
|
||||
void load_from_fd(int fd, callback_data_list_t &callbacks);
|
||||
|
@ -70,7 +70,6 @@ class env_universal_t {
|
|||
|
||||
public:
|
||||
explicit env_universal_t(const wcstring &path);
|
||||
~env_universal_t();
|
||||
|
||||
// Get the value of the variable with the specified name.
|
||||
env_var_t get(const wcstring &name) const;
|
||||
|
|
|
@ -378,7 +378,6 @@ int main(int argc, char **argv) {
|
|||
proc_init();
|
||||
event_init();
|
||||
builtin_init();
|
||||
function_init();
|
||||
misc_init();
|
||||
reader_init();
|
||||
history_init();
|
||||
|
|
|
@ -4335,7 +4335,6 @@ int main(int argc, char **argv) {
|
|||
setup_fork_guards();
|
||||
proc_init();
|
||||
event_init();
|
||||
function_init();
|
||||
builtin_init();
|
||||
env_init();
|
||||
misc_init();
|
||||
|
|
|
@ -35,7 +35,7 @@ static function_map_t loaded_functions;
|
|||
static std::set<wcstring> function_tombstones;
|
||||
|
||||
/// Lock for functions.
|
||||
static pthread_mutex_t functions_lock;
|
||||
static std::recursive_mutex functions_lock;
|
||||
|
||||
static bool function_remove_ignore_autoload(const wcstring &name, bool tombstone = true);
|
||||
|
||||
|
@ -55,7 +55,7 @@ static bool is_autoload = false;
|
|||
/// loaded.
|
||||
static int load(const wcstring &name) {
|
||||
ASSERT_IS_MAIN_THREAD();
|
||||
scoped_lock locker(functions_lock);
|
||||
scoped_rlock locker(functions_lock);
|
||||
bool was_autoload = is_autoload;
|
||||
int res;
|
||||
|
||||
|
@ -106,17 +106,6 @@ static void autoload_names(std::set<wcstring> &names, int get_hidden) {
|
|||
}
|
||||
}
|
||||
|
||||
void function_init() {
|
||||
// PCA: This recursive lock was introduced early in my work. I would like to make this a
|
||||
// non-recursive lock but I haven't fully investigated all the call paths (for autoloading
|
||||
// functions, etc.).
|
||||
pthread_mutexattr_t a;
|
||||
DIE_ON_FAILURE(pthread_mutexattr_init(&a));
|
||||
DIE_ON_FAILURE(pthread_mutexattr_settype(&a, PTHREAD_MUTEX_RECURSIVE));
|
||||
DIE_ON_FAILURE(pthread_mutex_init(&functions_lock, &a));
|
||||
DIE_ON_FAILURE(pthread_mutexattr_destroy(&a));
|
||||
}
|
||||
|
||||
static std::map<wcstring, env_var_t> snapshot_vars(const wcstring_list_t &vars) {
|
||||
std::map<wcstring, env_var_t> result;
|
||||
for (wcstring_list_t::const_iterator it = vars.begin(), end = vars.end(); it != end; ++it) {
|
||||
|
@ -153,7 +142,7 @@ void function_add(const function_data_t &data, const parser_t &parser, int defin
|
|||
|
||||
CHECK(!data.name.empty(), ); //!OCLINT(multiple unary operator)
|
||||
CHECK(data.definition, );
|
||||
scoped_lock locker(functions_lock);
|
||||
scoped_rlock locker(functions_lock);
|
||||
|
||||
// Remove the old function.
|
||||
function_remove(data.name);
|
||||
|
@ -174,28 +163,28 @@ void function_add(const function_data_t &data, const parser_t &parser, int defin
|
|||
|
||||
int function_exists(const wcstring &cmd) {
|
||||
if (parser_keywords_is_reserved(cmd)) return 0;
|
||||
scoped_lock locker(functions_lock);
|
||||
scoped_rlock locker(functions_lock);
|
||||
load(cmd);
|
||||
return loaded_functions.find(cmd) != loaded_functions.end();
|
||||
}
|
||||
|
||||
void function_load(const wcstring &cmd) {
|
||||
if (!parser_keywords_is_reserved(cmd)) {
|
||||
scoped_lock locker(functions_lock);
|
||||
scoped_rlock locker(functions_lock);
|
||||
load(cmd);
|
||||
}
|
||||
}
|
||||
|
||||
int function_exists_no_autoload(const wcstring &cmd, const env_vars_snapshot_t &vars) {
|
||||
if (parser_keywords_is_reserved(cmd)) return 0;
|
||||
scoped_lock locker(functions_lock);
|
||||
scoped_rlock locker(functions_lock);
|
||||
return loaded_functions.find(cmd) != loaded_functions.end() ||
|
||||
function_autoloader.can_load(cmd, vars);
|
||||
}
|
||||
|
||||
static bool function_remove_ignore_autoload(const wcstring &name, bool tombstone) {
|
||||
// Note: the lock may be held at this point, but is recursive.
|
||||
scoped_lock locker(functions_lock);
|
||||
scoped_rlock locker(functions_lock);
|
||||
|
||||
function_map_t::iterator iter = loaded_functions.find(name);
|
||||
|
||||
|
@ -229,7 +218,7 @@ static const function_info_t *function_get(const wcstring &name) {
|
|||
}
|
||||
|
||||
bool function_get_definition(const wcstring &name, wcstring *out_definition) {
|
||||
scoped_lock locker(functions_lock);
|
||||
scoped_rlock locker(functions_lock);
|
||||
const function_info_t *func = function_get(name);
|
||||
if (func && out_definition) {
|
||||
out_definition->assign(func->definition);
|
||||
|
@ -238,26 +227,26 @@ bool function_get_definition(const wcstring &name, wcstring *out_definition) {
|
|||
}
|
||||
|
||||
wcstring_list_t function_get_named_arguments(const wcstring &name) {
|
||||
scoped_lock locker(functions_lock);
|
||||
scoped_rlock locker(functions_lock);
|
||||
const function_info_t *func = function_get(name);
|
||||
return func ? func->named_arguments : wcstring_list_t();
|
||||
}
|
||||
|
||||
std::map<wcstring, env_var_t> function_get_inherit_vars(const wcstring &name) {
|
||||
scoped_lock locker(functions_lock);
|
||||
scoped_rlock locker(functions_lock);
|
||||
const function_info_t *func = function_get(name);
|
||||
return func ? func->inherit_vars : std::map<wcstring, env_var_t>();
|
||||
}
|
||||
|
||||
bool function_get_shadow_scope(const wcstring &name) {
|
||||
scoped_lock locker(functions_lock);
|
||||
scoped_rlock locker(functions_lock);
|
||||
const function_info_t *func = function_get(name);
|
||||
return func ? func->shadow_scope : false;
|
||||
}
|
||||
|
||||
bool function_get_desc(const wcstring &name, wcstring *out_desc) {
|
||||
// Empty length string goes to NULL.
|
||||
scoped_lock locker(functions_lock);
|
||||
scoped_rlock locker(functions_lock);
|
||||
const function_info_t *func = function_get(name);
|
||||
if (out_desc && func && !func->description.empty()) {
|
||||
out_desc->assign(_(func->description.c_str()));
|
||||
|
@ -269,7 +258,7 @@ bool function_get_desc(const wcstring &name, wcstring *out_desc) {
|
|||
|
||||
void function_set_desc(const wcstring &name, const wcstring &desc) {
|
||||
load(name);
|
||||
scoped_lock locker(functions_lock);
|
||||
scoped_rlock locker(functions_lock);
|
||||
function_map_t::iterator iter = loaded_functions.find(name);
|
||||
if (iter != loaded_functions.end()) {
|
||||
iter->second.description = desc;
|
||||
|
@ -278,7 +267,7 @@ void function_set_desc(const wcstring &name, const wcstring &desc) {
|
|||
|
||||
bool function_copy(const wcstring &name, const wcstring &new_name) {
|
||||
bool result = false;
|
||||
scoped_lock locker(functions_lock);
|
||||
scoped_rlock locker(functions_lock);
|
||||
function_map_t::const_iterator iter = loaded_functions.find(name);
|
||||
if (iter != loaded_functions.end()) {
|
||||
// This new instance of the function shouldn't be tied to the definition file of the
|
||||
|
@ -293,7 +282,7 @@ bool function_copy(const wcstring &name, const wcstring &new_name) {
|
|||
|
||||
wcstring_list_t function_get_names(int get_hidden) {
|
||||
std::set<wcstring> names;
|
||||
scoped_lock locker(functions_lock);
|
||||
scoped_rlock locker(functions_lock);
|
||||
autoload_names(names, get_hidden);
|
||||
|
||||
function_map_t::const_iterator iter;
|
||||
|
@ -310,19 +299,19 @@ wcstring_list_t function_get_names(int get_hidden) {
|
|||
}
|
||||
|
||||
const wchar_t *function_get_definition_file(const wcstring &name) {
|
||||
scoped_lock locker(functions_lock);
|
||||
scoped_rlock locker(functions_lock);
|
||||
const function_info_t *func = function_get(name);
|
||||
return func ? func->definition_file : NULL;
|
||||
}
|
||||
|
||||
bool function_is_autoloaded(const wcstring &name) {
|
||||
scoped_lock locker(functions_lock);
|
||||
scoped_rlock locker(functions_lock);
|
||||
const function_info_t *func = function_get(name);
|
||||
return func->is_autoload;
|
||||
}
|
||||
|
||||
int function_get_definition_offset(const wcstring &name) {
|
||||
scoped_lock locker(functions_lock);
|
||||
scoped_rlock locker(functions_lock);
|
||||
const function_info_t *func = function_get(name);
|
||||
return func ? func->definition_offset : -1;
|
||||
}
|
||||
|
|
|
@ -708,7 +708,7 @@ history_t &history_collection_t::get_creating(const wcstring &name) {
|
|||
// Return a history for the given name, creating it if necessary
|
||||
// Note that histories are currently never deleted, so we can return a reference to them without
|
||||
// using something like shared_ptr
|
||||
auto hs = histories.acquire();
|
||||
auto &&hs = histories.acquire();
|
||||
std::unique_ptr<history_t> &hist = hs.value[name];
|
||||
if (!hist) {
|
||||
hist = make_unique<history_t>(name);
|
||||
|
@ -732,11 +732,7 @@ history_t::history_t(const wcstring &pname)
|
|||
boundary_timestamp(time(NULL)),
|
||||
countdown_to_vacuum(-1),
|
||||
loaded_old(false),
|
||||
chaos_mode(false) {
|
||||
pthread_mutex_init(&lock, NULL);
|
||||
}
|
||||
|
||||
history_t::~history_t() { pthread_mutex_destroy(&lock); }
|
||||
chaos_mode(false) {}
|
||||
|
||||
void history_t::add(const history_item_t &item, bool pending) {
|
||||
scoped_lock locker(lock);
|
||||
|
@ -1780,7 +1776,7 @@ void history_init() {}
|
|||
|
||||
void history_collection_t::save() {
|
||||
// Save all histories
|
||||
auto h = histories.acquire();
|
||||
auto &&h = histories.acquire();
|
||||
for (auto &p : h.value) {
|
||||
p.second->save();
|
||||
}
|
||||
|
@ -1806,9 +1802,8 @@ wcstring history_session_id() {
|
|||
} else if (valid_var_name(session_id)) {
|
||||
result = session_id;
|
||||
} else {
|
||||
debug(0,
|
||||
_(L"History session ID '%ls' is not a valid variable name. "
|
||||
L"Falling back to `%ls`."),
|
||||
debug(0, _(L"History session ID '%ls' is not a valid variable name. "
|
||||
L"Falling back to `%ls`."),
|
||||
session_id.c_str(), result.c_str());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -115,7 +115,7 @@ class history_t {
|
|||
void add(const history_item_t &item, bool pending = false);
|
||||
|
||||
// Lock for thread safety.
|
||||
pthread_mutex_t lock;
|
||||
std::mutex lock;
|
||||
|
||||
// Internal function.
|
||||
void clear_file_state();
|
||||
|
@ -209,7 +209,6 @@ class history_t {
|
|||
|
||||
public:
|
||||
explicit history_t(const wcstring &); // constructor
|
||||
~history_t(); // destructor
|
||||
|
||||
// Returns history with the given name, creating it if necessary.
|
||||
static history_t &history_with_name(const wcstring &name);
|
||||
|
|
|
@ -21,7 +21,7 @@ static const wchar_t *intern_with_dup(const wchar_t *in, bool dup) {
|
|||
if (!in) return NULL;
|
||||
|
||||
debug(5, L"intern %ls", in);
|
||||
auto lock_string_table = string_table.acquire();
|
||||
auto &&lock_string_table = string_table.acquire();
|
||||
std::vector<const wchar_t *> &string_table = lock_string_table.value;
|
||||
|
||||
const wchar_t *result;
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
#include <sys/types.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#include <condition_variable>
|
||||
#include <queue>
|
||||
|
||||
#include "common.h"
|
||||
|
@ -70,11 +71,9 @@ static owning_lock<thread_data_t> s_spawn_requests;
|
|||
static owning_lock<std::queue<spawn_request_t>> s_result_queue;
|
||||
|
||||
// "Do on main thread" support.
|
||||
static pthread_mutex_t s_main_thread_performer_lock =
|
||||
PTHREAD_MUTEX_INITIALIZER; // protects the main thread requests
|
||||
static pthread_cond_t s_main_thread_performer_cond; // protects the main thread requests
|
||||
static pthread_mutex_t s_main_thread_request_q_lock =
|
||||
PTHREAD_MUTEX_INITIALIZER; // protects the queue
|
||||
static std::mutex s_main_thread_performer_lock; // protects the main thread requests
|
||||
static std::condition_variable s_main_thread_performer_cond; // protects the main thread requests
|
||||
static std::mutex s_main_thread_request_q_lock; // protects the queue
|
||||
static std::queue<main_thread_request_t *> s_main_thread_request_queue;
|
||||
|
||||
// Notifying pipes.
|
||||
|
@ -85,9 +84,6 @@ static void iothread_init(void) {
|
|||
if (!inited) {
|
||||
inited = true;
|
||||
|
||||
// Initialize some locks.
|
||||
DIE_ON_FAILURE(pthread_cond_init(&s_main_thread_performer_cond, NULL));
|
||||
|
||||
// Initialize the completion pipes.
|
||||
int pipes[2] = {0, 0};
|
||||
assert_with_errno(pipe(pipes) != -1);
|
||||
|
@ -100,7 +96,7 @@ static void iothread_init(void) {
|
|||
}
|
||||
|
||||
static bool dequeue_spawn_request(spawn_request_t *result) {
|
||||
auto locker = s_spawn_requests.acquire();
|
||||
auto &&locker = s_spawn_requests.acquire();
|
||||
thread_data_t &td = locker.value;
|
||||
if (!td.request_queue.empty()) {
|
||||
*result = std::move(td.request_queue.front());
|
||||
|
@ -183,7 +179,7 @@ int iothread_perform_impl(void_function_t &&func, void_function_t &&completion)
|
|||
bool spawn_new_thread = false;
|
||||
{
|
||||
// Lock around a local region.
|
||||
auto locker = s_spawn_requests.acquire();
|
||||
auto &&locker = s_spawn_requests.acquire();
|
||||
thread_data_t &td = locker.value;
|
||||
td.request_queue.push(std::move(req));
|
||||
if (td.thread_count < IO_MAX_THREADS) {
|
||||
|
@ -295,7 +291,7 @@ static void iothread_service_main_thread_requests(void) {
|
|||
// Because the waiting thread performs step 1 under the lock, if we take the lock, we avoid
|
||||
// posting before the waiting thread is waiting.
|
||||
scoped_lock broadcast_lock(s_main_thread_performer_lock);
|
||||
DIE_ON_FAILURE(pthread_cond_broadcast(&s_main_thread_performer_cond));
|
||||
s_main_thread_performer_cond.notify_all();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -337,12 +333,11 @@ void iothread_perform_on_main(void_function_t &&func) {
|
|||
assert_with_errno(write_loop(s_write_pipe, &wakeup_byte, sizeof wakeup_byte) != -1);
|
||||
|
||||
// Wait on the condition, until we're done.
|
||||
scoped_lock perform_lock(s_main_thread_performer_lock);
|
||||
std::unique_lock<std::mutex> perform_lock(s_main_thread_performer_lock);
|
||||
while (!req.done) {
|
||||
// It would be nice to support checking for cancellation here, but the clients need a
|
||||
// deterministic way to clean up to avoid leaks
|
||||
DIE_ON_FAILURE(
|
||||
pthread_cond_wait(&s_main_thread_performer_cond, &s_main_thread_performer_lock));
|
||||
s_main_thread_performer_cond.wait(perform_lock);
|
||||
}
|
||||
|
||||
// Ok, the request must now be done.
|
||||
|
|
|
@ -161,7 +161,7 @@ int proc_get_last_status() { return last_status; }
|
|||
static owning_lock<std::vector<bool>> locked_consumed_job_ids;
|
||||
|
||||
job_id_t acquire_job_id(void) {
|
||||
auto locker = locked_consumed_job_ids.acquire();
|
||||
auto &&locker = locked_consumed_job_ids.acquire();
|
||||
std::vector<bool> &consumed_job_ids = locker.value;
|
||||
|
||||
// Find the index of the first 0 slot.
|
||||
|
@ -181,7 +181,7 @@ job_id_t acquire_job_id(void) {
|
|||
|
||||
void release_job_id(job_id_t jid) {
|
||||
assert(jid > 0);
|
||||
auto locker = locked_consumed_job_ids.acquire();
|
||||
auto &&locker = locked_consumed_job_ids.acquire();
|
||||
std::vector<bool> &consumed_job_ids = locker.value;
|
||||
size_t slot = (size_t)(jid - 1), count = consumed_job_ids.size();
|
||||
|
||||
|
|
|
@ -432,7 +432,7 @@ const wcstring &wgettext(const wchar_t *in) {
|
|||
wcstring key = in;
|
||||
|
||||
wgettext_init_if_necessary();
|
||||
auto wmap = wgettext_map.acquire();
|
||||
auto &&wmap = wgettext_map.acquire();
|
||||
wcstring &val = wmap.value[key];
|
||||
if (val.empty()) {
|
||||
cstring mbs_in = wcs2string(key);
|
||||
|
|
Loading…
Reference in a new issue