mirror of
https://github.com/fish-shell/fish-shell
synced 2025-01-15 14:34:05 +00:00
parent
d73ee4d54b
commit
87971e1f2e
8 changed files with 20 additions and 20 deletions
|
@ -2124,7 +2124,7 @@ int create_directory(const wcstring &d) {
|
|||
|
||||
[[gnu::noinline]] void bugreport() {
|
||||
FLOG(error, _(L"This is a bug. Break on 'bugreport' to debug."));
|
||||
FLOG(error, _(L"If you can reproduce it, please report: "), PACKAGE_BUGREPORT, '.');
|
||||
FLOG(error, _(L"If you can reproduce it, please report: "), PACKAGE_BUGREPORT, L'.');
|
||||
}
|
||||
|
||||
wcstring format_size(long long sz) {
|
||||
|
@ -2313,24 +2313,24 @@ bool is_main_thread() { return thread_id() == 1; }
|
|||
|
||||
void assert_is_main_thread(const char *who) {
|
||||
if (!is_main_thread() && !thread_asserts_cfg_for_testing) {
|
||||
FLOGF(error, "%s called off of main thread.", who);
|
||||
FLOGF(error, "Break on debug_thread_error to debug.");
|
||||
FLOGF(error, L"%s called off of main thread.", who);
|
||||
FLOGF(error, L"Break on debug_thread_error to debug.");
|
||||
debug_thread_error();
|
||||
}
|
||||
}
|
||||
|
||||
void assert_is_not_forked_child(const char *who) {
|
||||
if (is_forked_child()) {
|
||||
FLOGF(error, "%s called in a forked child.", who);
|
||||
FLOGF(error, "Break on debug_thread_error to debug.");
|
||||
FLOGF(error, L"%s called in a forked child.", who);
|
||||
FLOG(error, L"Break on debug_thread_error to debug.");
|
||||
debug_thread_error();
|
||||
}
|
||||
}
|
||||
|
||||
void assert_is_background_thread(const char *who) {
|
||||
if (is_main_thread() && !thread_asserts_cfg_for_testing) {
|
||||
FLOGF(error, "%s called on the main thread (may block!).", who);
|
||||
FLOGF(error, "Break on debug_thread_error to debug.");
|
||||
FLOGF(error, L"%s called on the main thread (may block!).", who);
|
||||
FLOG(error, L"Break on debug_thread_error to debug.");
|
||||
debug_thread_error();
|
||||
}
|
||||
}
|
||||
|
@ -2341,8 +2341,8 @@ void assert_is_locked(void *vmutex, const char *who, const char *caller) {
|
|||
// Note that std::mutex.try_lock() is allowed to return false when the mutex isn't
|
||||
// actually locked; fortunately we are checking the opposite so we're safe.
|
||||
if (mutex->try_lock()) {
|
||||
FLOGF(error, "%s is not locked when it should be in '%s'", who, caller);
|
||||
FLOGF(error, "Break on debug_thread_error to debug.");
|
||||
FLOGF(error, L"%s is not locked when it should be in '%s'", who, caller);
|
||||
FLOG(error, L"Break on debug_thread_error to debug.");
|
||||
debug_thread_error();
|
||||
mutex->unlock();
|
||||
}
|
||||
|
|
|
@ -369,7 +369,7 @@ static bool run_internal_process(process_t *p, std::string outdata, std::string
|
|||
p->internal_proc_ = std::make_shared<internal_proc_t>();
|
||||
f->internal_proc = p->internal_proc_;
|
||||
|
||||
FLOGF(proc_internal_proc, "Created internal proc %llu to write output for proc '%ls'",
|
||||
FLOGF(proc_internal_proc, L"Created internal proc %llu to write output for proc '%ls'",
|
||||
p->internal_proc_->get_id(), p->argv0());
|
||||
|
||||
// Resolve the IO chain.
|
||||
|
|
|
@ -202,7 +202,7 @@ void iothread_service_completion() {
|
|||
} else if (wakeup_byte == IO_SERVICE_RESULT_QUEUE) {
|
||||
iothread_service_result_queue();
|
||||
} else {
|
||||
FLOGF(error, "Unknown wakeup byte %02x in %s", wakeup_byte, __FUNCTION__);
|
||||
FLOGF(error, L"Unknown wakeup byte %02x in %s", wakeup_byte, __FUNCTION__);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
#define PARSE_ASSERT(a) assert(a)
|
||||
#define PARSER_DIE() \
|
||||
do { \
|
||||
FLOG(error, "Parser dying!"); \
|
||||
FLOG(error, L"Parser dying!"); \
|
||||
exit_without_destructors(-1); \
|
||||
} while (0)
|
||||
|
||||
|
|
|
@ -1382,7 +1382,7 @@ parse_execution_result_t parse_execution_context_t::eval_node(tnode_t<g::stateme
|
|||
} else if (auto switchstat = statement.try_get_child<g::switch_statement, 0>()) {
|
||||
status = this->run_switch_statement(switchstat);
|
||||
} else {
|
||||
FLOGF(error, "Unexpected node %ls found in %s", statement.node()->describe().c_str(),
|
||||
FLOGF(error, L"Unexpected node %ls found in %s", statement.node()->describe().c_str(),
|
||||
__FUNCTION__);
|
||||
abort();
|
||||
}
|
||||
|
|
|
@ -378,7 +378,7 @@ const production_element_t *parse_productions::production_for_token(parse_token_
|
|||
const parse_token_t &input2,
|
||||
parse_node_tag_t *out_tag) {
|
||||
// this is **extremely** chatty
|
||||
debug(6, "Resolving production for %ls with input token <%ls>",
|
||||
debug(6, L"Resolving production for %ls with input token <%ls>",
|
||||
token_type_description(node_type), input1.describe().c_str());
|
||||
|
||||
// Fetch the function to resolve the list of productions.
|
||||
|
@ -403,7 +403,7 @@ const production_element_t *parse_productions::production_for_token(parse_token_
|
|||
case parse_token_type_oror:
|
||||
case parse_token_type_end:
|
||||
case parse_token_type_terminate: {
|
||||
FLOGF(error, "Terminal token type %ls passed to %s", token_type_description(node_type),
|
||||
FLOGF(error, L"Terminal token type %ls passed to %s", token_type_description(node_type),
|
||||
__FUNCTION__);
|
||||
PARSER_DIE();
|
||||
break;
|
||||
|
@ -411,13 +411,13 @@ const production_element_t *parse_productions::production_for_token(parse_token_
|
|||
case parse_special_type_parse_error:
|
||||
case parse_special_type_tokenizer_error:
|
||||
case parse_special_type_comment: {
|
||||
FLOGF(error, "Special type %ls passed to %s\n", token_type_description(node_type),
|
||||
FLOGF(error, L"Special type %ls passed to %s\n", token_type_description(node_type),
|
||||
__FUNCTION__);
|
||||
PARSER_DIE();
|
||||
break;
|
||||
}
|
||||
case token_type_invalid: {
|
||||
FLOGF(error, "token_type_invalid passed to %s", __FUNCTION__);
|
||||
FLOGF(error, L"token_type_invalid passed to %s", __FUNCTION__);
|
||||
PARSER_DIE();
|
||||
break;
|
||||
}
|
||||
|
@ -426,7 +426,7 @@ const production_element_t *parse_productions::production_for_token(parse_token_
|
|||
|
||||
const production_element_t *result = resolver(input1, input2, out_tag);
|
||||
if (result == NULL) {
|
||||
debug(5, "Node type '%ls' has no production for input '%ls' (in %s)",
|
||||
debug(5, L"Node type '%ls' has no production for input '%ls' (in %s)",
|
||||
token_type_description(node_type), input1.describe().c_str(), __FUNCTION__);
|
||||
}
|
||||
|
||||
|
|
|
@ -257,7 +257,7 @@ static inline parse_token_type_t parse_token_type_from_tokenizer_token(
|
|||
case TOK_COMMENT:
|
||||
return parse_special_type_comment;
|
||||
}
|
||||
FLOGF(error, "Bad token type %d passed to %s", (int)tokenizer_token_type, __FUNCTION__);
|
||||
FLOGF(error, L"Bad token type %d passed to %s", (int)tokenizer_token_type, __FUNCTION__);
|
||||
DIE("bad token type");
|
||||
return token_type_invalid;
|
||||
}
|
||||
|
|
|
@ -214,7 +214,7 @@ void internal_proc_t::mark_exited(proc_status_t status) {
|
|||
status_.store(status, std::memory_order_relaxed);
|
||||
exited_.store(true, std::memory_order_release);
|
||||
topic_monitor_t::principal().post(topic_t::internal_exit);
|
||||
FLOG(proc_internal_proc, "Internal proc", internal_proc_id_, "exited with status",
|
||||
FLOG(proc_internal_proc, L"Internal proc", internal_proc_id_, L"exited with status",
|
||||
status.status_value());
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in a new issue