More work towards incremental history. Added some tests.

This commit is contained in:
ridiculousfish 2012-12-02 23:38:38 -08:00
parent c1b51c6530
commit 984a498063
5 changed files with 223 additions and 39 deletions

View file

@ -978,6 +978,9 @@ public:
static void test_history(void);
static void test_history_merge(void);
static void test_history_formats(void);
static void test_history_races(void);
static void test_history_races_pound_on_history();
};
static wcstring random_string(void)
@ -1077,6 +1080,121 @@ static void time_barrier(void)
while (time(NULL) == start);
}
static wcstring_list_t generate_history_lines(int pid)
{
wcstring_list_t result;
long max = 256;
result.reserve(max);
for (long i=0; i < max; i++)
{
result.push_back(format_string(L"%ld %ld", (long)pid, i));
}
return result;
}
void history_tests_t::test_history_races_pound_on_history()
{
/* Called in child process to modify history */
history_t *hist = new history_t(L"race_test");
hist->chaos_mode = true;
const wcstring_list_t lines = generate_history_lines(getpid());
for (size_t idx = 0; idx < lines.size(); idx++)
{
const wcstring &line = lines.at(idx);
hist->add(line);
hist->save();
}
delete hist;
}
void history_tests_t::test_history_races(void)
{
say(L"Testing history race conditions");
// Ensure history is clear
history_t *hist = new history_t(L"race_test");
hist->clear();
delete hist;
// Test concurrent history writing
#define RACE_COUNT 20
pid_t children[RACE_COUNT];
for (size_t i=0; i < RACE_COUNT; i++)
{
pid_t pid = fork();
if (! pid)
{
// Child process
setup_fork_guards();
test_history_races_pound_on_history();
exit_without_destructors(0);
}
else
{
// Parent process
children[i] = pid;
}
}
// Wait for all children
for (size_t i=0; i < RACE_COUNT; i++)
{
int stat;
waitpid(children[i], &stat, WUNTRACED);
}
// Compute the expected lines
wcstring_list_t lines[RACE_COUNT];
for (size_t i=0; i < RACE_COUNT; i++)
{
lines[i] = generate_history_lines(children[i]);
}
// Count total lines
size_t line_count = 0;
for (size_t i=0; i < RACE_COUNT; i++)
{
line_count += lines[i].size();
}
// Ensure we consider the lines that have been outputted as part of our history
time_barrier();
/* Ensure that we got sane, sorted results */
hist = new history_t(L"race_test");
hist->chaos_mode = true;
size_t hist_idx;
for (hist_idx = 1; ; hist_idx ++)
{
history_item_t item = hist->item_at_index(hist_idx);
if (item.empty())
break;
// The item must be present in one of our 'lines' arrays
// If it is present, then every item after it is assumed to be missed
size_t i;
for (i=0; i < RACE_COUNT; i++)
{
wcstring_list_t::iterator where = std::find(lines[i].begin(), lines[i].end(), item.str());
if (where != lines[i].end())
{
// Delete everything from the found location onwards
lines[i].resize(where - lines[i].begin());
// Break because we found it
break;
}
}
assert(i < RACE_COUNT && "Line expected to be at end of some array");
}
// every write should add at least one item
assert(hist_idx >= RACE_COUNT);
//hist->clear();
delete hist;
}
void history_tests_t::test_history_merge(void)
{
// In a single fish process, only one history is allowed to exist with the given name
@ -1310,6 +1428,7 @@ int main(int argc, char **argv)
reader_init();
env_init();
#if 0
test_format();
test_escape();
test_convert();
@ -1323,8 +1442,11 @@ int main(int argc, char **argv)
test_is_potential_path();
test_colors();
test_autosuggest_suggest_special();
#endif
history_tests_t::test_history();
history_tests_t::test_history_merge();
history_tests_t::test_history_races();
return 0;
history_tests_t::test_history_formats();
say(L"Encountered %d errors in low-level tests", err_count);

View file

@ -96,6 +96,20 @@ static bool history_file_lock(int fd, short type)
return ret != -1;
}
/* Get a file_id_t corresponding to the given fd */
static file_id_t history_file_identify(int fd)
{
file_id_t result(-1, -1);
struct stat buf = {};
if (0 == fstat(fd, &buf))
{
result.first = buf.st_dev;
result.second = buf.st_ino;
}
return result;
}
/* Our LRU cache is used for restricting the amount of history we have, and limiting how long we order it. */
class history_lru_node_t : public lru_node_t
{
@ -457,12 +471,12 @@ history_t & history_t::history_with_name(const wcstring &name)
history_t::history_t(const wcstring &pname) :
name(pname),
first_unwritten_new_item_index(0),
unsaved_item_count(0),
mmap_start(NULL),
mmap_length(0),
birth_timestamp(time(NULL)),
save_timestamp(0),
loaded_old(false)
loaded_old(false),
chaos_mode(false)
{
pthread_mutex_init(&lock, NULL);
}
@ -485,7 +499,6 @@ void history_t::add(const history_item_t &item)
{
/* We have to add a new item */
new_items.push_back(item);
unsaved_item_count++;
}
/* Prevent the first write from always triggering a save */
@ -494,10 +507,12 @@ void history_t::add(const history_item_t &item)
save_timestamp = now;
/* This might be a good candidate for moving to a background thread */
if ((now > save_timestamp + SAVE_INTERVAL) || (unsaved_item_count >= SAVE_COUNT))
assert(new_items.size() >= first_unwritten_new_item_index);
if ((now > save_timestamp + SAVE_INTERVAL) || (new_items.size() - first_unwritten_new_item_index >= SAVE_COUNT))
{
time_profiler_t profiler("save_internal");
this->save_internal();
this->save_internal(false);
}
}
@ -880,7 +895,7 @@ void history_t::populate_from_mmap(void)
}
/* Do a private, read-only map of the entirety of a history file with the given name. Returns true if successful. Returns the mapped memory region by reference. */
static bool map_file(const wcstring &name, const char **out_map_start, size_t *out_map_len)
static bool map_file(const wcstring &name, const char **out_map_start, size_t *out_map_len, file_id_t *file_id)
{
bool result = false;
wcstring filename = history_filename(name, L"");
@ -889,7 +904,12 @@ static bool map_file(const wcstring &name, const char **out_map_start, size_t *o
int fd = wopen_cloexec(filename, O_RDONLY);
if (fd >= 0)
{
/* Take a read lock to guard against someone else appending. This is released when the file is closed (below). */
/* Get the file ID if requested */
if (file_id != NULL)
*file_id = history_file_identify(fd);
/* Take a read lock to guard against someone else appending. This is released when the file is closed (below). We will read the file after taking the lock, but that's not a problem, because we never modify already written data. In short, the purpose of this lock is to ensure we don't see the file size change mid-update. */
if (history_file_lock(fd, F_RDLCK))
{
off_t len = lseek(fd, 0, SEEK_END);
@ -924,7 +944,7 @@ bool history_t::load_old_if_needed(void)
//signal_block();
bool ok = false;
if (map_file(name, &mmap_start, &mmap_length))
if (map_file(name, &mmap_start, &mmap_length, &mmap_file_id))
{
// Here we've mapped the file
ok = true;
@ -1149,7 +1169,7 @@ bool history_t::save_internal_via_rewrite()
/* Map in existing items (which may have changed out from underneath us, so don't trust our old mmap'd data) */
const char *local_mmap_start = NULL;
size_t local_mmap_size = 0;
if (map_file(name, &local_mmap_start, &local_mmap_size))
if (map_file(name, &local_mmap_start, &local_mmap_size, NULL))
{
const history_file_type_t local_mmap_type = infer_file_type(local_mmap_start, local_mmap_size);
size_t cursor = 0;
@ -1196,14 +1216,14 @@ bool history_t::save_internal_via_rewrite()
signal_block();
int out_fd = wopen_cloexec(tmp_name, O_WRONLY | O_CREAT | O_TRUNC);
int out_fd = wopen_cloexec(tmp_name, O_WRONLY | O_CREAT | O_TRUNC, 0644);
if (out_fd >= 0)
{
FILE *out = fdopen(out_fd, "w");
if (out)
{
/* Be block buffered */
setvbuf(out, NULL, _IOFBF, 0);
/* Be block buffered. In chaos mode, choose a tiny buffer so as to magnify the effects of race conditions. Otherwise, use the default buffer */
setvbuf(out, NULL, _IOFBF, chaos_mode ? 1 : 0);
/* Write them out */
for (history_lru_cache_t::iterator iter = lru.begin(); iter != lru.end(); ++iter)
@ -1252,6 +1272,17 @@ bool history_t::save_internal_via_rewrite()
lru.evict_all_nodes();
}
if (ok)
{
/* We've saved everything, so we have no more unsaved items */
this->first_unwritten_new_item_index = new_items.size();
this->deleted_items.clear();
/* Our history has been written to the file, so clear our state so we can re-reference the file. */
this->clear_file_state();
}
return ok;
}
@ -1260,8 +1291,14 @@ bool history_t::save_internal_via_appending()
/* This must be called while locked */
ASSERT_IS_LOCKED(lock);
/* No deleting allowed */
assert(deleted_items.empty());
bool ok = false;
/* If the file is different (someone vacuumed it) then we need to update our mmap */
bool file_changed = false;
/* Get the path to the real history file */
wcstring history_path = history_filename(name, wcstring());
@ -1271,25 +1308,31 @@ bool history_t::save_internal_via_appending()
int out_fd = wopen_cloexec(history_path, O_WRONLY | O_APPEND);
if (out_fd >= 0)
{
/* Check to see if the file changed */
if (history_file_identify(out_fd) == mmap_file_id)
file_changed = true;
/* Exclusive lock on the entire file. This is released when we close the file (below). */
struct flock flk = {};
flk.l_type = F_WRLCK;
flk.l_whence = SEEK_SET;
if (history_file_lock(out_fd, F_WRLCK))
{
/* We successfully took the exclusive lock. Append to the file.
Note that this is sketchy for a few reasons:
- Another shell may have appended its own items with a later timestamp, so our file may no longer be sorted by timestamp.
- Another shell may have appended the same items, so our file may now contain duplicates.
We cannot modify any previous parts of our file, because other instances may be reading those portions. We can only append.
Originally we always rewrote the file on saving, which avoided both of these problems. However, appending allows us to save history after every command, which is nice!
Periodically we "clean up" the file by rewriting it, so that most of the time it doesn't have these issues.
Periodically we "clean up" the file by rewriting it, so that most of the time it doesn't have duplicates, although we don't yet sort by timestamp (the timestamp isn't really used for much anyways).
*/
FILE *out = fdopen(out_fd, "a");
if (out)
{
/* Be block buffered. In chaos mode, choose a tiny buffer so as to magnify the effects of race conditions. Otherwise, use the default buffer */
setvbuf(out, NULL, _IOFBF, chaos_mode ? 1 : 0);
bool errored = false;
/* Write all items at or after first_unwritten_new_item_index */
@ -1316,6 +1359,7 @@ bool history_t::save_internal_via_appending()
errored = true;
}
/* We're OK if we did not error */
ok = ! errored;
}
}
@ -1325,11 +1369,18 @@ bool history_t::save_internal_via_appending()
close(out_fd);
signal_unblock();
/* If someone has replaced the file, forget our file state */
if (file_changed)
{
this->clear_file_state();
}
return ok;
}
/** Save the specified mode to file */
void history_t::save_internal()
/** Save the specified mode to file; optionally also vacuums */
void history_t::save_internal(bool vacuum)
{
/* This must be called while locked */
ASSERT_IS_LOCKED(lock);
@ -1343,25 +1394,28 @@ void history_t::save_internal()
/* Try saving. If we have items to delete, we have to rewrite the file. If we do not, we can append to it. */
bool ok = false;
if (
ok = save_internal_via_appending();
if (! ok)
ok = this->save_internal_via_rewrite();
if (ok)
if (! vacuum && deleted_items.empty())
{
/* We've saved everything, so we have no more unsaved items */
unsaved_item_count = 0;
/* Our history has been written to the file, so clear our state so we can re-reference the file. */
this->clear_file_state();
/* Try doing a fast append */
ok = save_internal_via_appending();
}
if (! ok)
{
/* We did not or could not append; rewrite the file ("vacuum" it) */
ok = this->save_internal_via_rewrite();
}
}
void history_t::save(void)
{
scoped_lock locker(lock);
this->save_internal();
this->save_internal(false);
}
void history_t::save_and_vacuum(void)
{
scoped_lock locker(lock);
this->save_internal(true);
}
void history_t::clear(void)
@ -1370,7 +1424,6 @@ void history_t::clear(void)
new_items.clear();
deleted_items.clear();
first_unwritten_new_item_index = 0;
unsaved_item_count = 0;
old_item_offsets.clear();
wcstring filename = history_filename(name, L"");
if (! filename.empty())

View file

@ -8,6 +8,7 @@
#include <wchar.h>
#include "common.h"
#include "pthread.h"
#include "wutil.h"
#include <vector>
#include <utility>
#include <list>
@ -120,9 +121,6 @@ private:
/** Deleted item contents. */
std::set<wcstring> deleted_items;
/** How many items we've added without saving */
size_t unsaved_item_count;
/** The mmaped region for the history file */
const char *mmap_start;
@ -131,6 +129,9 @@ private:
/** The type of file we mmap'd */
history_file_type_t mmap_type;
/** The file ID of the file we mmap'd */
file_id_t mmap_file_id;
/** Timestamp of when this history was created */
const time_t birth_timestamp;
@ -159,7 +160,10 @@ private:
bool save_internal_via_appending();
/** Saves history */
void save_internal();
void save_internal(bool vacuum);
/** Whether we're in maximum chaos mode, useful for testing */
bool chaos_mode;
/* Versioned decoding */
static history_item_t decode_item_fish_2_0(const char *base, size_t len);
@ -184,6 +188,9 @@ public:
/** Saves history */
void save();
/** Performs a full (non-incremental) save */
void save_and_vacuum();
/** Irreversibly clears history */
void clear();

View file

@ -686,9 +686,6 @@ static void insert_completion_if_missing(const wcstring &str, std::vector<comple
append_completion(out, str);
}
/** Helper stuff to avoid symlink loops */
typedef std::pair<dev_t, ino_t> file_id_t;
/**
The real implementation of wildcard expansion is in this
function. Other functions are just wrappers around this one.

View file

@ -15,6 +15,7 @@
#include <sys/types.h>
#include <stdarg.h>
#include <string>
#include <utility>
#include "common.h"
/**
@ -144,4 +145,8 @@ int wrename(const wcstring &oldName, const wcstring &newName);
/** Like wcstol(), but fails on a value outside the range of an int */
int fish_wcstoi(const wchar_t *str, wchar_t ** endptr, int base);
/** Class for representing a file's inode. We use this to detect and avoid symlink loops, among other things. */
typedef std::pair<dev_t, ino_t> file_id_t;
#endif