mirror of
https://github.com/fish-shell/fish-shell
synced 2024-12-25 20:33:08 +00:00
Set of fixes for issues identified by cppcheck
This commit is contained in:
parent
173a6a71c0
commit
a529fc9d83
13 changed files with 9 additions and 139 deletions
22
complete.cpp
22
complete.cpp
|
@ -324,18 +324,6 @@ completion_t &completion_t::operator=(const completion_t &him)
|
|||
return *this;
|
||||
}
|
||||
|
||||
|
||||
wcstring_list_t completions_to_wcstring_list(const std::vector<completion_t> &list)
|
||||
{
|
||||
wcstring_list_t strings;
|
||||
strings.reserve(list.size());
|
||||
for (std::vector<completion_t>::const_iterator iter = list.begin(); iter != list.end(); ++iter)
|
||||
{
|
||||
strings.push_back(iter->completion);
|
||||
}
|
||||
return strings;
|
||||
}
|
||||
|
||||
bool completion_t::is_alphabetically_less_than(const completion_t &a, const completion_t &b)
|
||||
{
|
||||
return a.completion < b.completion;
|
||||
|
@ -412,8 +400,6 @@ public:
|
|||
|
||||
void complete_param_expand(const wcstring &str, bool do_file);
|
||||
|
||||
void debug_print_completions();
|
||||
|
||||
void complete_cmd(const wcstring &str,
|
||||
bool use_function,
|
||||
bool use_builtin,
|
||||
|
@ -1659,14 +1645,6 @@ void completer_t::complete_param_expand(const wcstring &sstr, bool do_file)
|
|||
}
|
||||
}
|
||||
|
||||
void completer_t::debug_print_completions()
|
||||
{
|
||||
for (size_t i=0; i < completions.size(); i++)
|
||||
{
|
||||
printf("- Completion: %ls\n", completions.at(i).completion.c_str());
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
Complete the specified string as an environment variable
|
||||
*/
|
||||
|
|
|
@ -136,9 +136,6 @@ enum
|
|||
};
|
||||
typedef uint32_t completion_request_flags_t;
|
||||
|
||||
/** Given a list of completions, returns a list of their completion fields */
|
||||
wcstring_list_t completions_to_wcstring_list(const std::vector<completion_t> &completions);
|
||||
|
||||
/**
|
||||
|
||||
Add a completion.
|
||||
|
|
|
@ -97,14 +97,6 @@ void env_universal_common_init(void (*cb)(fish_message_type_t type, const wchar_
|
|||
callback = cb;
|
||||
}
|
||||
|
||||
/**
|
||||
Remove variable with specified name
|
||||
*/
|
||||
bool env_universal_common_remove(const wcstring &name)
|
||||
{
|
||||
return default_universal_vars().remove(name);
|
||||
}
|
||||
|
||||
/**
|
||||
Test if the message msg contains the command cmd
|
||||
*/
|
||||
|
@ -120,19 +112,6 @@ static bool match(const wchar_t *msg, const wchar_t *cmd)
|
|||
return true;
|
||||
}
|
||||
|
||||
void env_universal_common_set(const wchar_t *key, const wchar_t *val, bool exportv)
|
||||
{
|
||||
CHECK(key,);
|
||||
CHECK(val,);
|
||||
|
||||
default_universal_vars().set(key, val, exportv);
|
||||
|
||||
if (callback)
|
||||
{
|
||||
callback(exportv?SET_EXPORT:SET, key, val);
|
||||
}
|
||||
}
|
||||
|
||||
static void report_error(int err_code, const wchar_t *err_format, ...)
|
||||
{
|
||||
va_list va;
|
||||
|
@ -252,23 +231,6 @@ static bool append_file_entry(fish_message_type_t type, const wcstring &key_in,
|
|||
return success;
|
||||
}
|
||||
|
||||
wcstring_list_t env_universal_get_names(bool show_exported, bool show_unexported)
|
||||
{
|
||||
return default_universal_vars().get_names(show_exported, show_unexported);
|
||||
|
||||
}
|
||||
|
||||
|
||||
env_var_t env_universal_get(const wcstring &name)
|
||||
{
|
||||
return default_universal_vars().get(name);
|
||||
}
|
||||
|
||||
bool env_universal_common_get_export(const wcstring &name)
|
||||
{
|
||||
return default_universal_vars().get_export(name);
|
||||
}
|
||||
|
||||
env_universal_t::env_universal_t(const wcstring &path) : explicit_vars_path(path), tried_renaming(false), last_read_file(kInvalidFileID)
|
||||
{
|
||||
VOMIT_ON_FAILURE(pthread_mutex_init(&lock, NULL));
|
||||
|
|
|
@ -336,6 +336,7 @@ int main(int argc, char **argv)
|
|||
{
|
||||
print_help("fish_indent", 1);
|
||||
exit(0);
|
||||
assert(0 && "Unreachable code reached");
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -346,6 +347,8 @@ int main(int argc, char **argv)
|
|||
program_name,
|
||||
get_fish_version());
|
||||
exit(0);
|
||||
assert(0 && "Unreachable code reached");
|
||||
break;
|
||||
}
|
||||
|
||||
case 'i':
|
||||
|
|
|
@ -330,14 +330,6 @@ void input_function_set_status(bool status)
|
|||
input_function_status = status;
|
||||
}
|
||||
|
||||
/**
|
||||
Returns the nth argument for a given input function
|
||||
*/
|
||||
wchar_t input_function_get_arg(int index)
|
||||
{
|
||||
return input_function_args[index];
|
||||
}
|
||||
|
||||
/* Helper function to compare the lengths of sequences */
|
||||
static bool length_is_greater_than(const input_mapping_t &m1, const input_mapping_t &m2)
|
||||
{
|
||||
|
|
|
@ -30,7 +30,8 @@ static size_t divide_round_up(size_t numer, size_t denom)
|
|||
return 0;
|
||||
|
||||
assert(denom > 0);
|
||||
return numer / denom + (numer % denom ? 1 : 0);
|
||||
bool has_rem = (numer % denom) > 0;
|
||||
return numer / denom + (has_rem ? 1 : 0);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -393,7 +394,7 @@ void pager_t::refilter_completions()
|
|||
void pager_t::set_completions(const completion_list_t &raw_completions)
|
||||
{
|
||||
// Get completion infos out of it
|
||||
unfiltered_completion_infos = process_completions_into_infos(raw_completions, prefix.c_str());
|
||||
unfiltered_completion_infos = process_completions_into_infos(raw_completions, prefix);
|
||||
|
||||
// Maybe join them
|
||||
if (prefix == L"-")
|
||||
|
@ -626,7 +627,7 @@ bool pager_t::completion_try_print(size_t cols, const wcstring &prefix, const co
|
|||
if (! progress_text.empty())
|
||||
{
|
||||
line_t &line = rendering->screen_data.add_line();
|
||||
print_max(progress_text.c_str(), highlight_spec_pager_progress | highlight_make_background(highlight_spec_pager_progress), term_width, true /* has_more */, &line);
|
||||
print_max(progress_text, highlight_spec_pager_progress | highlight_make_background(highlight_spec_pager_progress), term_width, true /* has_more */, &line);
|
||||
}
|
||||
|
||||
if (search_field_shown)
|
||||
|
|
|
@ -1428,19 +1428,6 @@ const parse_node_t *parse_node_tree_t::get_parent(const parse_node_t &node, pars
|
|||
return result;
|
||||
}
|
||||
|
||||
const parse_node_t *parse_node_tree_t::get_first_ancestor_of_type(const parse_node_t &node, parse_token_type_t desired_type) const
|
||||
{
|
||||
const parse_node_t *ancestor = &node;
|
||||
while ((ancestor = this->get_parent(*ancestor)))
|
||||
{
|
||||
if (ancestor->type == desired_type)
|
||||
{
|
||||
break;
|
||||
}
|
||||
}
|
||||
return ancestor;
|
||||
}
|
||||
|
||||
static void find_nodes_recursive(const parse_node_tree_t &tree, const parse_node_t &parent, parse_token_type_t type, parse_node_tree_t::parse_node_list_t *result, size_t max_count)
|
||||
{
|
||||
if (result->size() < max_count)
|
||||
|
|
|
@ -148,9 +148,6 @@ public:
|
|||
/* Get the node corresponding to the parent of the given node, or NULL if there is no such child. If expected_type is provided, only returns the parent if it is of that type. Note the asymmetry: get_child asserts since the children are known, but get_parent does not, since the parent may not be known. */
|
||||
const parse_node_t *get_parent(const parse_node_t &node, parse_token_type_t expected_type = token_type_invalid) const;
|
||||
|
||||
/* Returns the first ancestor of the given type, or NULL. */
|
||||
const parse_node_t *get_first_ancestor_of_type(const parse_node_t &node, parse_token_type_t desired_type) const;
|
||||
|
||||
/* Find all the nodes of a given type underneath a given node, up to max_count of them */
|
||||
typedef std::vector<const parse_node_t *> parse_node_list_t;
|
||||
parse_node_list_t find_nodes(const parse_node_t &parent, parse_token_type_t type, size_t max_count = (size_t)(-1)) const;
|
||||
|
|
|
@ -53,7 +53,7 @@ int parse_util_lineno(const wchar_t *str, size_t offset)
|
|||
return 0;
|
||||
|
||||
int res = 1;
|
||||
for (size_t i=0; str[i] && i<offset; i++)
|
||||
for (size_t i=0; i < offset && str[i] != L'\0'; i++)
|
||||
{
|
||||
if (str[i] == L'\n')
|
||||
{
|
||||
|
@ -1186,7 +1186,6 @@ parser_test_error_bits_t parse_util_detect_errors_in_argument(const parse_node_t
|
|||
{
|
||||
|
||||
const wcstring subst(paran_begin + 1, paran_end);
|
||||
wcstring tmp;
|
||||
|
||||
// Replace the command substitution with just INTERNAL_SEPARATOR
|
||||
size_t cmd_sub_start = paran_begin - working_copy_cstr;
|
||||
|
|
|
@ -1019,13 +1019,6 @@ bool parser_t::detect_errors_in_argument_list(const wcstring &arg_list_src, wcst
|
|||
return errored;
|
||||
}
|
||||
|
||||
// helper type used in parser::test below
|
||||
struct block_info_t
|
||||
{
|
||||
int position; //tokenizer position
|
||||
block_type_t type; //type of the block
|
||||
};
|
||||
|
||||
void parser_t::get_backtrace(const wcstring &src, const parse_error_list_t &errors, wcstring *output) const
|
||||
{
|
||||
assert(output != NULL);
|
||||
|
|
|
@ -136,42 +136,6 @@ int tok_has_next(tokenizer_t *tok)
|
|||
return tok->has_next;
|
||||
}
|
||||
|
||||
int tokenizer_t::line_number_of_character_at_offset(size_t offset)
|
||||
{
|
||||
// we want to return (one plus) the number of newlines at offsets less than the given offset
|
||||
// cached_lineno_count is the number of newlines at indexes less than cached_lineno_offset
|
||||
const wchar_t *str = orig_buff;
|
||||
if (! str)
|
||||
return 0;
|
||||
|
||||
// easy hack to handle 0
|
||||
if (offset == 0)
|
||||
return 1;
|
||||
|
||||
size_t i;
|
||||
if (offset > cached_lineno_offset)
|
||||
{
|
||||
for (i = cached_lineno_offset; str[i] && i<offset; i++)
|
||||
{
|
||||
/* Add one for every newline we find in the range [cached_lineno_offset, offset) */
|
||||
if (str[i] == L'\n')
|
||||
cached_lineno_count++;
|
||||
}
|
||||
cached_lineno_offset = i; //note: i, not offset, in case offset is beyond the length of the string
|
||||
}
|
||||
else if (offset < cached_lineno_offset)
|
||||
{
|
||||
/* Subtract one for every newline we find in the range [offset, cached_lineno_offset) */
|
||||
for (i = offset; i < cached_lineno_offset; i++)
|
||||
{
|
||||
if (str[i] == L'\n')
|
||||
cached_lineno_count--;
|
||||
}
|
||||
cached_lineno_offset = offset;
|
||||
}
|
||||
return cached_lineno_count + 1;
|
||||
}
|
||||
|
||||
/**
|
||||
Tests if this character can be a part of a string. The redirect ^ is allowed unless it's the first character.
|
||||
*/
|
||||
|
|
|
@ -97,9 +97,6 @@ struct tokenizer_t
|
|||
size_t cached_lineno_offset;
|
||||
int cached_lineno_count;
|
||||
|
||||
/** Return the line number of the character at the given offset */
|
||||
int line_number_of_character_at_offset(size_t offset);
|
||||
|
||||
/**
|
||||
Constructor for a tokenizer. b is the string that is to be
|
||||
tokenized. It is not copied, and should not be freed by the caller
|
||||
|
|
|
@ -485,7 +485,7 @@ const wchar_t *wgettext(const wchar_t *in)
|
|||
val = new wcstring(format_string(L"%s", out)); //note that this writes into the map!
|
||||
}
|
||||
errno = err;
|
||||
return val->c_str();
|
||||
return val->c_str(); //looks dangerous but is safe, since the string is stored in the map
|
||||
}
|
||||
|
||||
const wchar_t *wgetenv(const wcstring &name)
|
||||
|
|
Loading…
Reference in a new issue