Remove some dead code

This commit is contained in:
ridiculousfish 2015-07-25 20:29:19 -07:00
parent f4d1657c22
commit 0dbd83ffaf
6 changed files with 4 additions and 102 deletions

View file

@ -479,12 +479,10 @@ static void test_tok()
if (types[i] != tok_last_type(&t))
{
err(L"Tokenization error:");
wprintf(L"Token number %d of string \n'%ls'\n, expected token type %ls, got token '%ls' of type %ls\n",
wprintf(L"Token number %d of string \n'%ls'\n, got token '%ls'\n",
i+1,
str,
tok_get_desc(types[i]),
tok_last(&t),
tok_get_desc(tok_last_type(&t)));
tok_last(&t));
}
}
}

View file

@ -95,25 +95,6 @@ wcstring parse_error_t::describe(const wcstring &src) const
return this->describe_with_prefix(src, wcstring(), get_is_interactive(), false);
}
wcstring parse_errors_description(const parse_error_list_t &errors, const wcstring &src, const wchar_t *prefix)
{
wcstring target;
for (size_t i=0; i < errors.size(); i++)
{
if (i > 0)
{
target.push_back(L'\n');
}
if (prefix != NULL)
{
target.append(prefix);
target.append(L": ");
}
target.append(errors.at(i).describe(src));
}
return target;
}
void parse_error_offset_source_start(parse_error_list_t *errors, size_t amt)
{
assert(errors != NULL);

View file

@ -27,9 +27,6 @@ typedef uint32_t source_offset_t;
#define SOURCE_OFFSET_INVALID (static_cast<source_offset_t>(-1))
/* Returns a description of a list of parse errors */
wcstring parse_errors_description(const parse_error_list_t &errors, const wcstring &src, const wchar_t *prefix = NULL);
/** A struct representing the token type that we use internally */
struct parse_token_t
{

View file

@ -226,7 +226,6 @@ struct profile_item_t
wcstring cmd;
};
struct tokenizer_t;
class parse_execution_context_t;
class parser_t

View file

@ -52,32 +52,6 @@ segments.
*/
#define PIPE_ERROR _( L"Cannot use stdin (fd 0) as pipe output" )
/**
Characters that separate tokens. They are ordered by frequency of occurrence to increase parsing speed.
*/
#define SEP L" \n|\t;#\r<>^&"
/**
Descriptions of all tokenizer errors
*/
static const wchar_t *tok_desc[] =
{
N_(L"Tokenizer not yet initialized"),
N_(L"Tokenizer error"),
N_(L"String"),
N_(L"Pipe"),
N_(L"End of command"),
N_(L"Redirect output to file"),
N_(L"Append output to file"),
N_(L"Redirect input to file"),
N_(L"Redirect to file descriptor"),
N_(L"Redirect output to file if file does not exist"),
N_(L"Run job in background"),
N_(L"Comment")
};
/**
Set the latest tokens string to be the specified error message
*/
@ -93,8 +67,7 @@ int tok_get_error(tokenizer_t *tok)
return tok->error;
}
tokenizer_t::tokenizer_t(const wchar_t *b, tok_flags_t flags) : buff(NULL), orig_buff(NULL), last_type(TOK_NONE), last_pos(0), has_next(false), accept_unfinished(false), show_comments(false), show_blank_lines(false), last_quote(0), error(0), squash_errors(false), cached_lineno_offset(0), cached_lineno_count(0), continue_line_after_comment(false)
tokenizer_t::tokenizer_t(const wchar_t *b, tok_flags_t flags) : buff(NULL), orig_buff(NULL), last_type(TOK_NONE), last_pos(0), has_next(false), accept_unfinished(false), show_comments(false), show_blank_lines(false), error(0), squash_errors(false), continue_line_after_comment(false)
{
CHECK(b,);
@ -105,8 +78,6 @@ tokenizer_t::tokenizer_t(const wchar_t *b, tok_flags_t flags) : buff(NULL), orig
this->has_next = (*b != L'\0');
this->orig_buff = this->buff = b;
this->cached_lineno_offset = 0;
this->cached_lineno_count = 0;
tok_next(this);
}
@ -142,7 +113,7 @@ int tok_has_next(tokenizer_t *tok)
Hash (#) starts a comment if it's the first character in a token; otherwise it is considered a string character.
See #953.
*/
bool tok_is_string_character(wchar_t c, bool is_first)
static bool tok_is_string_character(wchar_t c, bool is_first)
{
switch (c)
{
@ -251,7 +222,6 @@ static void read_string(tokenizer_t *tok)
{
const wchar_t *end = quote_end(tok->buff);
tok->last_quote = *tok->buff;
if (end)
{
tok->buff=(wchar_t *)end;
@ -553,16 +523,6 @@ static bool my_iswspace(wchar_t c)
return c != L'\n' && iswspace(c);
}
const wchar_t *tok_get_desc(int type)
{
if (type < 0 || (size_t)type >= (sizeof tok_desc / sizeof *tok_desc))
{
return _(L"Invalid token type");
}
return _(tok_desc[type]);
}
void tok_next(tokenizer_t *tok)
{
@ -756,15 +716,6 @@ size_t tok_get_extent(const tokenizer_t *tok)
}
void tok_set_pos(tokenizer_t *tok, int pos)
{
CHECK(tok,);
tok->buff = tok->orig_buff + pos;
tok->has_next = true;
tok_next(tok);
}
bool move_word_state_machine_t::consume_char_punctuation(wchar_t c)
{
enum

View file

@ -92,17 +92,11 @@ struct tokenizer_t
bool show_comments;
/** Whether all blank lines are returned */
bool show_blank_lines;
/** Type of last quote, can be either ' or ".*/
wchar_t last_quote;
/** Last error */
int error;
/* Whether we are squashing errors */
bool squash_errors;
/* Cached line number information */
size_t cached_lineno_offset;
int cached_lineno_count;
/* Whether to continue the previous line after the comment */
bool continue_line_after_comment;
@ -157,24 +151,6 @@ size_t tok_get_extent(const tokenizer_t *tok);
*/
wcstring tok_first(const wchar_t *str);
/**
Indicates whether a character can be part of a string, or is a string separator.
Separators include newline, tab, |, ^, >, <, etc.
is_first should indicate whether this is the first character in a potential string.
*/
bool tok_is_string_character(wchar_t c, bool is_first);
/**
Move tokenizer position
*/
void tok_set_pos(tokenizer_t *tok, int pos);
/**
Returns a string description of the specified token type
*/
const wchar_t *tok_get_desc(int type);
/**
Get tokenizer error type. Should only be called if tok_last_tope returns TOK_ERROR.
*/