2019-04-28 20:16:40 +00:00
|
|
|
// Support for enforcing correct access to globals.
|
|
|
|
#ifndef FISH_GLOBAL_SAFETY_H
|
|
|
|
#define FISH_GLOBAL_SAFETY_H
|
|
|
|
|
|
|
|
#include "config.h" // IWYU pragma: keep
|
|
|
|
|
2019-04-28 22:00:36 +00:00
|
|
|
#include <atomic>
|
2019-04-28 20:16:40 +00:00
|
|
|
#include <cassert>
|
|
|
|
|
2019-10-13 22:50:48 +00:00
|
|
|
#include "common.h"
|
|
|
|
|
2019-04-28 20:16:40 +00:00
|
|
|
// fish is multithreaded. Global (which includes function and file-level statics) when used naively
|
|
|
|
// may therefore lead to data races. Use the following types to characterize and enforce correct
|
|
|
|
// access patterns.
|
|
|
|
|
|
|
|
namespace detail {
|
|
|
|
// An empty value type that cannot be copied or moved.
|
|
|
|
// Include this as an instance variable to prevent globals from being copied or moved.
|
|
|
|
struct fixed_t {
|
|
|
|
fixed_t(const fixed_t &) = delete;
|
|
|
|
fixed_t(fixed_t &&) = delete;
|
|
|
|
fixed_t &operator=(fixed_t &&) = delete;
|
|
|
|
fixed_t &operator=(const fixed_t &) = delete;
|
|
|
|
fixed_t() = default;
|
|
|
|
};
|
|
|
|
} // namespace detail
|
|
|
|
|
|
|
|
/// A mainthread_t variable may only be accessed on the main thread.
|
|
|
|
template <typename T>
|
|
|
|
class mainthread_t : detail::fixed_t {
|
|
|
|
T value_{};
|
|
|
|
|
|
|
|
public:
|
|
|
|
mainthread_t(T value) : value_(std::move(value)) {}
|
|
|
|
mainthread_t() = default;
|
|
|
|
|
2019-05-05 00:32:40 +00:00
|
|
|
T *operator->() {
|
|
|
|
ASSERT_IS_MAIN_THREAD();
|
|
|
|
return &value_;
|
|
|
|
}
|
|
|
|
|
2019-04-28 20:16:40 +00:00
|
|
|
operator T &() {
|
|
|
|
ASSERT_IS_MAIN_THREAD();
|
|
|
|
return value_;
|
|
|
|
}
|
|
|
|
|
|
|
|
operator const T &() const {
|
|
|
|
ASSERT_IS_MAIN_THREAD();
|
|
|
|
return value_;
|
|
|
|
}
|
|
|
|
|
|
|
|
void operator=(T value) {
|
|
|
|
ASSERT_IS_MAIN_THREAD();
|
|
|
|
value_ = std::move(value);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
/// A latch variable may only be set once, on the main thread.
|
|
|
|
/// The value is a immortal.
|
|
|
|
template <typename T>
|
|
|
|
class latch_t : detail::fixed_t {
|
|
|
|
T *value_{};
|
|
|
|
|
|
|
|
public:
|
|
|
|
operator T *() { return value_; }
|
|
|
|
operator const T *() const { return value_; }
|
|
|
|
|
|
|
|
T *operator->() { return value_; }
|
|
|
|
const T *operator->() const { return value_; }
|
|
|
|
|
2019-04-29 02:16:55 +00:00
|
|
|
void operator=(std::unique_ptr<T> value) {
|
2019-04-28 20:16:40 +00:00
|
|
|
ASSERT_IS_MAIN_THREAD();
|
|
|
|
assert(value_ == nullptr && "Latch variable initialized multiple times");
|
2019-04-29 01:13:55 +00:00
|
|
|
assert(value != nullptr && "Latch variable initialized with null");
|
2019-04-29 02:16:55 +00:00
|
|
|
// Note: deliberate leak.
|
|
|
|
value_ = value.release();
|
2019-04-29 01:13:55 +00:00
|
|
|
}
|
|
|
|
|
2019-04-29 02:16:55 +00:00
|
|
|
void operator=(T &&value) { *this = make_unique<T>(std::move(value)); }
|
|
|
|
|
2019-04-29 01:13:55 +00:00
|
|
|
template <typename... Args>
|
2020-11-22 13:39:48 +00:00
|
|
|
void emplace(Args &&...args) {
|
2019-04-29 02:16:55 +00:00
|
|
|
*this = make_unique<T>(std::forward<Args>(args)...);
|
2019-04-28 20:16:40 +00:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2019-04-28 22:00:36 +00:00
|
|
|
/// An atomic type that always use relaxed reads.
|
|
|
|
template <typename T>
|
|
|
|
class relaxed_atomic_t {
|
|
|
|
std::atomic<T> value_{};
|
|
|
|
|
|
|
|
public:
|
|
|
|
relaxed_atomic_t() = default;
|
|
|
|
relaxed_atomic_t(T value) : value_(value) {}
|
|
|
|
|
2020-09-04 23:10:22 +00:00
|
|
|
operator T() const volatile { return value_.load(std::memory_order_relaxed); }
|
2019-04-28 22:00:36 +00:00
|
|
|
|
|
|
|
void operator=(T v) { return value_.store(v, std::memory_order_relaxed); }
|
2020-09-04 23:10:22 +00:00
|
|
|
void operator=(T v) volatile { return value_.store(v, std::memory_order_relaxed); }
|
2019-04-29 01:13:55 +00:00
|
|
|
|
Implement cancel groups
This concerns how "internal job groups" know to stop executing when an
external command receives a "cancel signal" (SIGINT or SIGQUIT). For
example:
while true
sleep 1
end
The intent is that if any 'sleep' exits from a cancel signal, then so would
the while loop. This is why you can hit control-C to end the loop even
if the SIGINT is delivered to sleep and not fish.
Here the 'while' loop is considered an "internal job group" (no separate
pgid, bash would not fork) while each 'sleep' is a separate external
command with its own job group, pgroup, etc. Prior to this change, after
running each 'sleep', parse_execution_context_t would check to see if its
exit status was a cancel signal, and if so, stash it into an int that the
cancel checker would check. But this became unwieldy: now there were three
sources of cancellation signals (that int, the job group, and fish itself).
Introduce the notion of a "cancellation group" which is a set of job
groups that should cancel together. Even though the while loop and sleep
are in different job groups, they are in the same cancellation group. When
any job gets a SIGINT or SIGQUIT, it marks that signal in its cancellation
group, which prevents running new jobs in that group.
This reduces the number of signals to check from 3 to 2; eventually we can
teach cancellation groups how to check fish's own signals and then it will
just be 1.
2020-09-02 22:06:05 +00:00
|
|
|
// Perform a CAS operation, returning whether it succeeded.
|
|
|
|
bool compare_exchange(T expected, T desired) {
|
|
|
|
return value_.compare_exchange_strong(expected, desired, std::memory_order_relaxed);
|
|
|
|
}
|
|
|
|
|
2019-04-29 01:13:55 +00:00
|
|
|
// postincrement
|
|
|
|
T operator++(int) { return value_.fetch_add(1, std::memory_order_relaxed); }
|
|
|
|
|
|
|
|
// preincrement
|
|
|
|
T operator++() { return 1 + value_.fetch_add(1, std::memory_order_relaxed); }
|
2019-04-28 22:00:36 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
using relaxed_atomic_bool_t = relaxed_atomic_t<bool>;
|
|
|
|
|
2019-04-28 20:16:40 +00:00
|
|
|
#endif
|