simplify oclint error suppression for scoped_lock

This commit is contained in:
Kurtis Rader 2016-07-20 22:30:58 -07:00
parent b53f42970c
commit 1d2fff9686
12 changed files with 71 additions and 61 deletions

10
.oclint
View file

@ -15,6 +15,16 @@ rule-configurations:
#
- key: LONG_VARIABLE_NAME
value: 30
#
# This allows us to avoid peppering our code with inline comments such as
#
# scoped_lock locker(m_lock); //!OCLINT(side-effect)
#
# Specifically, this config key tells oclint that the named classes have
# RAII behavior so the local vars are actually used.
#
- key: RAII_CUSTOM_CLASSES
value: scoped_lock
disable-rules:
#

View file

@ -86,7 +86,7 @@ int autoload_t::load(const wcstring &cmd, bool reload) {
this->last_path_tokenized.clear();
tokenize_variable_array(this->last_path, this->last_path_tokenized);
scoped_lock locker(lock); //!OCLINT(side-effect)
scoped_lock locker(lock);
this->evict_all_nodes();
}

View file

@ -66,7 +66,7 @@ static wcstring_list_t *get_transient_stack() {
static bool get_top_transient(wcstring *out_result) {
ASSERT_IS_MAIN_THREAD();
bool result = false;
scoped_lock locker(transient_commandline_lock); //!OCLINT(side-effect)
scoped_lock locker(transient_commandline_lock);
const wcstring_list_t *stack = get_transient_stack();
if (!stack->empty()) {
out_result->assign(stack->back());
@ -78,7 +78,7 @@ static bool get_top_transient(wcstring *out_result) {
builtin_commandline_scoped_transient_t::builtin_commandline_scoped_transient_t(
const wcstring &cmd) {
ASSERT_IS_MAIN_THREAD();
scoped_lock locker(transient_commandline_lock); //!OCLINT(side-effect)
scoped_lock locker(transient_commandline_lock);
wcstring_list_t *stack = get_transient_stack();
stack->push_back(cmd);
this->token = stack->size();
@ -86,7 +86,7 @@ builtin_commandline_scoped_transient_t::builtin_commandline_scoped_transient_t(
builtin_commandline_scoped_transient_t::~builtin_commandline_scoped_transient_t() {
ASSERT_IS_MAIN_THREAD();
scoped_lock locker(transient_commandline_lock); //!OCLINT(side-effect)
scoped_lock locker(transient_commandline_lock);
wcstring_list_t *stack = get_transient_stack();
assert(this->token == stack->size());
stack->pop_back();

View file

@ -1,8 +1,8 @@
/** \file complete.c Functions related to tab-completion.
These functions are used for storing and retrieving tab-completion data, as well as for performing
tab-completion.
*/
/// Functions related to tab-completion.
///
/// These functions are used for storing and retrieving tab-completion data, as well as for
/// performing tab-completion.
///
#include "config.h" // IWYU pragma: keep
#include <assert.h>
@ -427,7 +427,7 @@ static completion_entry_t &complete_get_exact_entry(const wcstring &cmd, bool cm
void complete_set_authoritative(const wchar_t *cmd, bool cmd_is_path, bool authoritative) {
CHECK(cmd, );
scoped_lock lock(completion_lock); //!OCLINT(has side effects)
scoped_lock lock(completion_lock);
completion_entry_t &c = complete_get_exact_entry(cmd, cmd_is_path);
c.authoritative = authoritative;
@ -441,7 +441,7 @@ void complete_add(const wchar_t *cmd, bool cmd_is_path, const wcstring &option,
assert(option.empty() == (option_type == option_type_args_only));
// Lock the lock that allows us to edit the completion entry list.
scoped_lock lock(completion_lock); //!OCLINT(has side effects)
scoped_lock lock(completion_lock);
completion_entry_t &c = complete_get_exact_entry(cmd, cmd_is_path);
@ -478,7 +478,7 @@ bool completion_entry_t::remove_option(const wcstring &option, complete_option_t
void complete_remove(const wcstring &cmd, bool cmd_is_path, const wcstring &option,
complete_option_type_t type) {
scoped_lock lock(completion_lock); //!OCLINT(has side effects)
scoped_lock lock(completion_lock);
completion_entry_t tmp_entry(cmd, cmd_is_path, false);
completion_entry_set_t::iterator iter = completion_set.find(tmp_entry);
@ -495,7 +495,7 @@ void complete_remove(const wcstring &cmd, bool cmd_is_path, const wcstring &opti
}
void complete_remove_all(const wcstring &cmd, bool cmd_is_path) {
scoped_lock lock(completion_lock); //!OCLINT(has side effects)
scoped_lock lock(completion_lock);
completion_entry_t tmp_entry(cmd, cmd_is_path, false);
completion_set.erase(tmp_entry);
@ -1480,7 +1480,7 @@ static void append_switch(wcstring &out, const wcstring &opt, const wcstring &ar
wcstring complete_print() {
wcstring out;
scoped_lock locker(completion_lock); //!OCLINT(side-effect)
scoped_lock locker(completion_lock);
// Get a list of all completions in a vector, then sort it by order.
std::vector<const completion_entry_t *> all_completions;
@ -1597,7 +1597,7 @@ wcstring_list_t complete_get_wrap_chain(const wcstring &command) {
if (command.empty()) {
return wcstring_list_t();
}
scoped_lock locker(wrapper_lock); //!OCLINT(side-effect)
scoped_lock locker(wrapper_lock);
const wrapper_map_t &wraps = wrap_map();
wcstring_list_t result;
@ -1633,7 +1633,7 @@ wcstring_list_t complete_get_wrap_chain(const wcstring &command) {
wcstring_list_t complete_get_wrap_pairs() {
wcstring_list_t result;
scoped_lock locker(wrapper_lock); //!OCLINT(side-effect)
scoped_lock locker(wrapper_lock);
const wrapper_map_t &wraps = wrap_map();
for (wrapper_map_t::const_iterator outer = wraps.begin(); outer != wraps.end(); ++outer) {
const wcstring &cmd = outer->first;

View file

@ -749,7 +749,7 @@ env_var_t env_get_string(const wcstring &key, env_mode_flags_t mode) {
if (search_local || search_global) {
/* Lock around a local region */
scoped_lock lock(env_lock); //!OCLINT(has side effects)
scoped_lock locker(env_lock);
env_node_t *env = search_local ? top : global_env;
@ -918,7 +918,7 @@ static void add_key_to_string_set(const var_table_t &envs, std::set<wcstring> *s
}
wcstring_list_t env_get_names(int flags) {
scoped_lock lock(env_lock); //!OCLINT(has side effects)
scoped_lock locker(env_lock);
wcstring_list_t result;
std::set<wcstring> names;

View file

@ -311,7 +311,7 @@ void env_universal_t::set_internal(const wcstring &key, const wcstring &val, boo
}
void env_universal_t::set(const wcstring &key, const wcstring &val, bool exportv) {
scoped_lock locker(lock); //!OCLINT(side-effect)
scoped_lock locker(lock);
this->set_internal(key, val, exportv, true /* overwrite */);
}
@ -331,7 +331,7 @@ bool env_universal_t::remove(const wcstring &key) {
wcstring_list_t env_universal_t::get_names(bool show_exported, bool show_unexported) const {
wcstring_list_t result;
scoped_lock locker(lock); //!OCLINT(side-effect)
scoped_lock locker(lock);
var_table_t::const_iterator iter;
for (iter = vars.begin(); iter != vars.end(); ++iter) {
const wcstring &key = iter->first;

View file

@ -64,7 +64,7 @@ static bool is_autoload = false;
/// loaded.
static int load(const wcstring &name) {
ASSERT_IS_MAIN_THREAD();
scoped_lock lock(functions_lock); //!OCLINT(has side effects)
scoped_lock locker(functions_lock);
bool was_autoload = is_autoload;
int res;
@ -164,7 +164,7 @@ void function_add(const function_data_t &data, const parser_t &parser, int defin
CHECK(!data.name.empty(), );
CHECK(data.definition, );
scoped_lock lock(functions_lock); //!OCLINT(has side effects)
scoped_lock locker(functions_lock);
// Remove the old function.
function_remove(data.name);
@ -185,28 +185,28 @@ void function_add(const function_data_t &data, const parser_t &parser, int defin
int function_exists(const wcstring &cmd) {
if (parser_keywords_is_reserved(cmd)) return 0;
scoped_lock lock(functions_lock); //!OCLINT(has side effects)
scoped_lock locker(functions_lock);
load(cmd);
return loaded_functions.find(cmd) != loaded_functions.end();
}
void function_load(const wcstring &cmd) {
if (!parser_keywords_is_reserved(cmd)) {
scoped_lock lock(functions_lock); //!OCLINT(has side effects)
scoped_lock locker(functions_lock);
load(cmd);
}
}
int function_exists_no_autoload(const wcstring &cmd, const env_vars_snapshot_t &vars) {
if (parser_keywords_is_reserved(cmd)) return 0;
scoped_lock lock(functions_lock); //!OCLINT(has side effects)
scoped_lock locker(functions_lock);
return loaded_functions.find(cmd) != loaded_functions.end() ||
function_autoloader.can_load(cmd, vars);
}
static bool function_remove_ignore_autoload(const wcstring &name, bool tombstone) {
// Note: the lock may be held at this point, but is recursive.
scoped_lock lock(functions_lock);
scoped_lock locker(functions_lock);
function_map_t::iterator iter = loaded_functions.find(name);
@ -240,7 +240,7 @@ static const function_info_t *function_get(const wcstring &name) {
}
bool function_get_definition(const wcstring &name, wcstring *out_definition) {
scoped_lock lock(functions_lock);
scoped_lock locker(functions_lock);
const function_info_t *func = function_get(name);
if (func && out_definition) {
out_definition->assign(func->definition);
@ -249,32 +249,32 @@ bool function_get_definition(const wcstring &name, wcstring *out_definition) {
}
wcstring_list_t function_get_named_arguments(const wcstring &name) {
scoped_lock lock(functions_lock); //!OCLINT(has side effects)
scoped_lock locker(functions_lock);
const function_info_t *func = function_get(name);
return func ? func->named_arguments : wcstring_list_t();
}
std::map<wcstring, env_var_t> function_get_inherit_vars(const wcstring &name) {
scoped_lock lock(functions_lock); //!OCLINT(has side effects)
scoped_lock locker(functions_lock);
const function_info_t *func = function_get(name);
return func ? func->inherit_vars : std::map<wcstring, env_var_t>();
}
int function_get_shadow_builtin(const wcstring &name) {
scoped_lock lock(functions_lock); //!OCLINT(has side effects)
scoped_lock locker(functions_lock);
const function_info_t *func = function_get(name);
return func ? func->shadow_builtin : false;
}
int function_get_shadow_scope(const wcstring &name) {
scoped_lock lock(functions_lock); //!OCLINT(has side effects)
scoped_lock locker(functions_lock);
const function_info_t *func = function_get(name);
return func ? func->shadow_scope : false;
}
bool function_get_desc(const wcstring &name, wcstring *out_desc) {
// Empty length string goes to NULL.
scoped_lock lock(functions_lock);
scoped_lock locker(functions_lock);
const function_info_t *func = function_get(name);
if (out_desc && func && !func->description.empty()) {
out_desc->assign(_(func->description.c_str()));
@ -286,7 +286,7 @@ bool function_get_desc(const wcstring &name, wcstring *out_desc) {
void function_set_desc(const wcstring &name, const wcstring &desc) {
load(name);
scoped_lock lock(functions_lock); //!OCLINT(has side effects)
scoped_lock locker(functions_lock);
function_map_t::iterator iter = loaded_functions.find(name);
if (iter != loaded_functions.end()) {
iter->second.description = desc;
@ -295,7 +295,7 @@ void function_set_desc(const wcstring &name, const wcstring &desc) {
bool function_copy(const wcstring &name, const wcstring &new_name) {
bool result = false;
scoped_lock lock(functions_lock);
scoped_lock locker(functions_lock);
function_map_t::const_iterator iter = loaded_functions.find(name);
if (iter != loaded_functions.end()) {
// This new instance of the function shouldn't be tied to the definition file of the
@ -310,7 +310,7 @@ bool function_copy(const wcstring &name, const wcstring &new_name) {
wcstring_list_t function_get_names(int get_hidden) {
std::set<wcstring> names;
scoped_lock lock(functions_lock); //!OCLINT(has side effects)
scoped_lock locker(functions_lock);
autoload_names(names, get_hidden);
function_map_t::const_iterator iter;
@ -327,13 +327,13 @@ wcstring_list_t function_get_names(int get_hidden) {
}
const wchar_t *function_get_definition_file(const wcstring &name) {
scoped_lock lock(functions_lock); //!OCLINT(has side effects)
scoped_lock locker(functions_lock);
const function_info_t *func = function_get(name);
return func ? func->definition_file : NULL;
}
int function_get_definition_offset(const wcstring &name) {
scoped_lock lock(functions_lock); //!OCLINT(has side effects)
scoped_lock locker(functions_lock);
const function_info_t *func = function_get(name);
return func ? func->definition_offset : -1;
}

View file

@ -674,7 +674,7 @@ static size_t offset_of_next_item(const char *begin, size_t mmap_length,
history_t &history_collection_t::alloc(const wcstring &name) {
// Note that histories are currently never deleted, so we can return a reference to them without
// using something like shared_ptr.
scoped_lock locker(m_lock); //!OCLINT(side-effect)
scoped_lock locker(m_lock);
history_t *&current = m_histories[name];
if (current == NULL) current = new history_t(name);
return *current;
@ -701,7 +701,7 @@ history_t::history_t(const wcstring &pname)
history_t::~history_t() { pthread_mutex_destroy(&lock); }
void history_t::add(const history_item_t &item, bool pending) {
scoped_lock locker(lock); //!OCLINT(side-effect)
scoped_lock locker(lock);
// Try merging with the last item.
if (!new_items.empty() && new_items.back().merge(item)) {
@ -794,7 +794,7 @@ void history_t::set_valid_file_paths(const wcstring_list_t &valid_file_paths,
return;
}
scoped_lock locker(lock); //!OCLINT(side-effect)
scoped_lock locker(lock);
// Look for an item with the given identifier. It is likely to be at the end of new_items.
for (history_item_list_t::reverse_iterator iter = new_items.rbegin(); iter != new_items.rend();
@ -807,7 +807,7 @@ void history_t::set_valid_file_paths(const wcstring_list_t &valid_file_paths,
}
void history_t::get_string_representation(wcstring *result, const wcstring &separator) {
scoped_lock locker(lock); //!OCLINT(side-effect)
scoped_lock locker(lock);
bool first = true;
@ -853,7 +853,7 @@ void history_t::get_string_representation(wcstring *result, const wcstring &sepa
}
history_item_t history_t::item_at_index(size_t idx) {
scoped_lock locker(lock); //!OCLINT(side-effect)
scoped_lock locker(lock);
// 0 is considered an invalid index.
assert(idx > 0);
@ -1393,7 +1393,7 @@ void history_t::save_internal(bool vacuum) {
}
void history_t::save(void) {
scoped_lock locker(lock); //!OCLINT(side-effect)
scoped_lock locker(lock);
this->save_internal(false);
}
@ -1415,7 +1415,7 @@ static bool format_history_record(const history_item_t &item, const bool with_ti
}
bool history_t::search(history_search_type_t search_type, wcstring_list_t search_args, bool with_time, io_streams_t &streams) {
// scoped_lock locker(lock); //!OCLINT(side-effect)
// scoped_lock locker(lock);
if (search_args.empty()) {
// Start at one because zero is the current command.
for (int i = 1; !this->item_at_index(i).empty(); ++i) {
@ -1443,20 +1443,20 @@ bool history_t::search(history_search_type_t search_type, wcstring_list_t search
}
void history_t::disable_automatic_saving() {
scoped_lock locker(lock); //!OCLINT(side-effect)
scoped_lock locker(lock);
disable_automatic_save_counter++;
assert(disable_automatic_save_counter != 0); // overflow!
}
void history_t::enable_automatic_saving() {
scoped_lock locker(lock); //!OCLINT(side-effect)
scoped_lock locker(lock);
assert(disable_automatic_save_counter > 0); // underflow
disable_automatic_save_counter--;
save_internal_unless_disabled();
}
void history_t::clear(void) {
scoped_lock locker(lock); //!OCLINT(side-effect)
scoped_lock locker(lock);
new_items.clear();
deleted_items.clear();
first_unwritten_new_item_index = 0;
@ -1467,7 +1467,7 @@ void history_t::clear(void) {
}
bool history_t::is_empty(void) {
scoped_lock locker(lock); //!OCLINT(side-effect)
scoped_lock locker(lock);
// If we have new items, we're not empty.
if (!new_items.empty()) return false;
@ -1587,7 +1587,7 @@ void history_t::incorporate_external_changes() {
// to preserve old_item_offsets so that they don't have to be recomputed. (However, then items
// *deleted* in other instances would not show up here).
time_t new_timestamp = time(NULL);
scoped_lock locker(lock); //!OCLINT(side-effect)
scoped_lock locker(lock);
// If for some reason the clock went backwards, we don't want to start dropping items; therefore
// we only do work if time has progressed. This also makes multiple calls cheap.
@ -1747,6 +1747,6 @@ void history_t::add_pending_with_file_detection(const wcstring &str) {
/// Very simple, just mark that we have no more pending items.
void history_t::resolve_pending() {
scoped_lock locker(lock); //!OCLINT(side-effect)
scoped_lock locker(lock);
this->has_pending_item = false;
}

View file

@ -38,7 +38,7 @@ static const wchar_t *intern_with_dup(const wchar_t *in, bool dup) {
if (!in) return NULL;
debug(5, L"intern %ls", in);
scoped_lock lock(intern_lock); //!OCLINT(has side effects)
scoped_lock locker(intern_lock);
const wchar_t *result;
#if USE_SET

View file

@ -108,7 +108,7 @@ static SpawnRequest_t *dequeue_spawn_request(void) {
}
static void enqueue_thread_result(SpawnRequest_t *req) {
scoped_lock lock(s_result_queue_lock); //!OCLINT(has side effects)
scoped_lock locker(s_result_queue_lock);
s_result_queue.push(req);
}
@ -196,7 +196,7 @@ int iothread_perform_base(int (*handler)(void *), void (*completionCallback)(voi
{
// Lock around a local region. Note that we can only access s_active_thread_count under the
// lock.
scoped_lock lock(s_spawn_queue_lock); //!OCLINT(has side effects)
scoped_lock locker(s_spawn_queue_lock);
add_to_queue(req);
if (s_active_thread_count < IO_MAX_THREADS) {
s_active_thread_count++;
@ -294,7 +294,7 @@ static void iothread_service_main_thread_requests(void) {
// Move the queue to a local variable.
std::queue<MainThreadRequest_t *> request_queue;
{
scoped_lock queue_lock(s_main_thread_request_queue_lock); //!OCLINT(has side effects)
scoped_lock queue_lock(s_main_thread_request_queue_lock);
std::swap(request_queue, s_main_thread_request_queue);
}
@ -317,7 +317,7 @@ static void iothread_service_main_thread_requests(void) {
//
// Because the waiting thread performs step 1 under the lock, if we take the lock, we avoid
// posting before the waiting thread is waiting.
scoped_lock broadcast_lock(s_main_thread_performer_lock); //!OCLINT(has side effects)
scoped_lock broadcast_lock(s_main_thread_performer_lock);
VOMIT_ON_FAILURE(pthread_cond_broadcast(&s_main_thread_performer_condition));
}
}
@ -327,7 +327,7 @@ static void iothread_service_result_queue() {
// Move the queue to a local variable.
std::queue<SpawnRequest_t *> result_queue;
{
scoped_lock queue_lock(s_result_queue_lock); //!OCLINT(has side effects)
scoped_lock queue_lock(s_result_queue_lock);
std::swap(result_queue, s_result_queue);
}
@ -358,7 +358,7 @@ int iothread_perform_on_main_base(int (*handler)(void *), void *context) {
// Append it. Do not delete the nested scope as it is crucial to the proper functioning of this
// code by virtue of the lock management.
{
scoped_lock queue_lock(s_main_thread_request_queue_lock); //!OCLINT(has side effects)
scoped_lock queue_lock(s_main_thread_request_queue_lock);
s_main_thread_request_queue.push(&req);
}
@ -367,7 +367,7 @@ int iothread_perform_on_main_base(int (*handler)(void *), void *context) {
VOMIT_ON_FAILURE(!write_loop(s_write_pipe, &wakeup_byte, sizeof wakeup_byte));
// Wait on the condition, until we're done.
scoped_lock perform_lock(s_main_thread_performer_lock); //!OCLINT(has side effects)
scoped_lock perform_lock(s_main_thread_performer_lock);
while (!req.done) {
// It would be nice to support checking for cancellation here, but the clients need a
// deterministic way to clean up to avoid leaks

View file

@ -166,7 +166,7 @@ static pthread_mutex_t job_id_lock = PTHREAD_MUTEX_INITIALIZER;
static std::vector<bool> consumed_job_ids;
job_id_t acquire_job_id(void) {
scoped_lock lock(job_id_lock); //!OCLINT(has side effects)
scoped_lock locker(job_id_lock);
// Find the index of the first 0 slot.
std::vector<bool>::iterator slot =
@ -185,7 +185,7 @@ job_id_t acquire_job_id(void) {
void release_job_id(job_id_t jid) {
assert(jid > 0);
scoped_lock lock(job_id_lock); //!OCLINT(has side effects)
scoped_lock locker(job_id_lock);
size_t slot = (size_t)(jid - 1), count = consumed_job_ids.size();
// Make sure this slot is within our vector and is currently set to consumed.

View file

@ -417,7 +417,7 @@ const wcstring &wgettext(const wchar_t *in) {
wcstring key = in;
wgettext_init_if_necessary();
scoped_lock lock(wgettext_lock); //!OCLINT(has side effects)
scoped_lock locker(wgettext_lock);
wcstring &val = wgettext_map[key];
if (val.empty()) {
cstring mbs_in = wcs2string(key);