mirror of
https://github.com/agersant/polaris
synced 2024-11-10 10:14:12 +00:00
Moved resident configuration from .toml file to database
This commit is contained in:
parent
55b49f1ace
commit
66f59da7b2
23 changed files with 521 additions and 445 deletions
2
.gitignore
vendored
2
.gitignore
vendored
|
@ -1,7 +1,7 @@
|
|||
target
|
||||
release
|
||||
*.res
|
||||
test/*
|
||||
test/*.sqlite
|
||||
*.sqlite-journal
|
||||
*.sqlite-wal
|
||||
*.sqlite-shm
|
||||
|
|
2
Cargo.lock
generated
2
Cargo.lock
generated
|
@ -17,8 +17,10 @@ dependencies = [
|
|||
"metaflac 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"mount 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"params 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"rand 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"regex 0.1.80 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"reqwest 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"ring 0.9.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"router 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"rusqlite 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"secure-session 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
|
|
|
@ -21,7 +21,9 @@ lewton = "0.5.2"
|
|||
metaflac = "0.1.4"
|
||||
mount = "0.3.0"
|
||||
params = "*"
|
||||
rand = "0.3.15"
|
||||
regex = "0.1"
|
||||
ring = "0.9.7"
|
||||
reqwest = "0.6.2"
|
||||
router = "0.5.1"
|
||||
secure-session = "0.1.0"
|
||||
|
|
78
src/api.rs
78
src/api.rs
|
@ -15,7 +15,7 @@ use serde_json;
|
|||
use typemap;
|
||||
use url::percent_encoding::percent_decode;
|
||||
|
||||
use collection::*;
|
||||
use db::DB;
|
||||
use errors::*;
|
||||
use thumbnails::*;
|
||||
use utils::*;
|
||||
|
@ -49,67 +49,67 @@ impl typemap::Key for SessionKey {
|
|||
type Value = Session;
|
||||
}
|
||||
|
||||
pub fn get_handler(collection: Collection, secret: &str) -> Chain {
|
||||
let collection = Arc::new(collection);
|
||||
let api_handler = get_endpoints(collection);
|
||||
pub fn get_handler(db: Arc<DB>) -> Result<Chain> {
|
||||
let api_handler = get_endpoints(db.clone());
|
||||
let mut api_chain = Chain::new(api_handler);
|
||||
|
||||
let manager = ChaCha20Poly1305SessionManager::<Session>::from_password(secret.as_bytes());
|
||||
let config = SessionConfig::default();
|
||||
let auth_secret = db.deref().get_auth_secret()?;
|
||||
let session_manager = ChaCha20Poly1305SessionManager::<Session>::from_password(auth_secret.as_bytes());
|
||||
let session_config = SessionConfig::default();
|
||||
let session_middleware =
|
||||
SessionMiddleware::<Session,
|
||||
SessionKey,
|
||||
ChaCha20Poly1305SessionManager<Session>>::new(manager, config);
|
||||
ChaCha20Poly1305SessionManager<Session>>::new(session_manager, session_config);
|
||||
api_chain.link_around(session_middleware);
|
||||
|
||||
api_chain
|
||||
Ok(api_chain)
|
||||
}
|
||||
|
||||
fn get_endpoints(collection: Arc<Collection>) -> Mount {
|
||||
fn get_endpoints(db: Arc<DB>) -> Mount {
|
||||
let mut api_handler = Mount::new();
|
||||
|
||||
{
|
||||
let collection = collection.clone();
|
||||
let db = db.clone();
|
||||
api_handler.mount("/version/", self::version);
|
||||
api_handler.mount("/auth/",
|
||||
move |request: &mut Request| self::auth(request, collection.deref()));
|
||||
move |request: &mut Request| self::auth(request, db.deref()));
|
||||
}
|
||||
|
||||
{
|
||||
let mut auth_api_mount = Mount::new();
|
||||
{
|
||||
let collection = collection.clone();
|
||||
let db = db.clone();
|
||||
auth_api_mount.mount("/browse/", move |request: &mut Request| {
|
||||
self::browse(request, collection.deref())
|
||||
self::browse(request, db.deref())
|
||||
});
|
||||
}
|
||||
{
|
||||
let collection = collection.clone();
|
||||
let db = db.clone();
|
||||
auth_api_mount.mount("/flatten/", move |request: &mut Request| {
|
||||
self::flatten(request, collection.deref())
|
||||
self::flatten(request, db.deref())
|
||||
});
|
||||
}
|
||||
{
|
||||
let collection = collection.clone();
|
||||
let db = db.clone();
|
||||
auth_api_mount.mount("/random/", move |request: &mut Request| {
|
||||
self::random(request, collection.deref())
|
||||
self::random(request, db.deref())
|
||||
});
|
||||
}
|
||||
{
|
||||
let collection = collection.clone();
|
||||
let db = db.clone();
|
||||
auth_api_mount.mount("/recent/", move |request: &mut Request| {
|
||||
self::recent(request, collection.deref())
|
||||
self::recent(request, db.deref())
|
||||
});
|
||||
}
|
||||
{
|
||||
let collection = collection.clone();
|
||||
let db = db.clone();
|
||||
auth_api_mount.mount("/serve/", move |request: &mut Request| {
|
||||
self::serve(request, collection.deref())
|
||||
self::serve(request, db.deref())
|
||||
});
|
||||
}
|
||||
|
||||
let mut auth_api_chain = Chain::new(auth_api_mount);
|
||||
let auth = AuthRequirement { collection: collection.clone() };
|
||||
let auth = AuthRequirement { db: db.clone() };
|
||||
auth_api_chain.link_around(auth);
|
||||
|
||||
api_handler.mount("/", auth_api_chain);
|
||||
|
@ -127,13 +127,13 @@ fn path_from_request(request: &Request) -> Result<PathBuf> {
|
|||
}
|
||||
|
||||
struct AuthRequirement {
|
||||
collection: Arc<Collection>,
|
||||
db: Arc<DB>,
|
||||
}
|
||||
|
||||
impl AroundMiddleware for AuthRequirement {
|
||||
fn around(self, handler: Box<Handler>) -> Box<Handler> {
|
||||
Box::new(AuthHandler {
|
||||
collection: self.collection,
|
||||
db: self.db,
|
||||
handler: handler,
|
||||
}) as Box<Handler>
|
||||
}
|
||||
|
@ -141,7 +141,7 @@ impl AroundMiddleware for AuthRequirement {
|
|||
|
||||
struct AuthHandler {
|
||||
handler: Box<Handler>,
|
||||
collection: Arc<Collection>,
|
||||
db: Arc<DB>,
|
||||
}
|
||||
|
||||
impl Handler for AuthHandler {
|
||||
|
@ -152,8 +152,8 @@ impl Handler for AuthHandler {
|
|||
// Auth via Authorization header
|
||||
if let Some(auth) = req.headers.get::<Authorization<Basic>>() {
|
||||
if let Some(ref password) = auth.password {
|
||||
auth_success = self.collection
|
||||
.auth(auth.username.as_str(), password.as_str());
|
||||
auth_success = self.db
|
||||
.auth(auth.username.as_str(), password.as_str())?;
|
||||
req.extensions
|
||||
.insert::<SessionKey>(Session { username: auth.username.clone() });
|
||||
}
|
||||
|
@ -183,7 +183,7 @@ fn version(_: &mut Request) -> IronResult<Response> {
|
|||
}
|
||||
}
|
||||
|
||||
fn auth(request: &mut Request, collection: &Collection) -> IronResult<Response> {
|
||||
fn auth(request: &mut Request, db: &DB) -> IronResult<Response> {
|
||||
let username;
|
||||
let password;
|
||||
{
|
||||
|
@ -197,7 +197,7 @@ fn auth(request: &mut Request, collection: &Collection) -> IronResult<Response>
|
|||
_ => return Err(Error::from(ErrorKind::MissingPassword).into()),
|
||||
};
|
||||
}
|
||||
if collection.auth(username.as_str(), password.as_str()) {
|
||||
if db.auth(username.as_str(), password.as_str())? {
|
||||
request
|
||||
.extensions
|
||||
.insert::<SessionKey>(Session { username: username.clone() });
|
||||
|
@ -207,13 +207,13 @@ fn auth(request: &mut Request, collection: &Collection) -> IronResult<Response>
|
|||
}
|
||||
}
|
||||
|
||||
fn browse(request: &mut Request, collection: &Collection) -> IronResult<Response> {
|
||||
fn browse(request: &mut Request, db: &DB) -> IronResult<Response> {
|
||||
let path = path_from_request(request);
|
||||
let path = match path {
|
||||
Err(e) => return Err(IronError::new(e, status::BadRequest)),
|
||||
Ok(p) => p,
|
||||
};
|
||||
let browse_result = collection.browse(&path)?;
|
||||
let browse_result = db.browse(&path)?;
|
||||
|
||||
let result_json = serde_json::to_string(&browse_result);
|
||||
let result_json = match result_json {
|
||||
|
@ -224,13 +224,13 @@ fn browse(request: &mut Request, collection: &Collection) -> IronResult<Response
|
|||
Ok(Response::with((status::Ok, result_json)))
|
||||
}
|
||||
|
||||
fn flatten(request: &mut Request, collection: &Collection) -> IronResult<Response> {
|
||||
fn flatten(request: &mut Request, db: &DB) -> IronResult<Response> {
|
||||
let path = path_from_request(request);
|
||||
let path = match path {
|
||||
Err(e) => return Err(IronError::new(e, status::BadRequest)),
|
||||
Ok(p) => p,
|
||||
};
|
||||
let flatten_result = collection.flatten(&path)?;
|
||||
let flatten_result = db.flatten(&path)?;
|
||||
|
||||
let result_json = serde_json::to_string(&flatten_result);
|
||||
let result_json = match result_json {
|
||||
|
@ -241,8 +241,8 @@ fn flatten(request: &mut Request, collection: &Collection) -> IronResult<Respons
|
|||
Ok(Response::with((status::Ok, result_json)))
|
||||
}
|
||||
|
||||
fn random(_: &mut Request, collection: &Collection) -> IronResult<Response> {
|
||||
let random_result = collection.get_random_albums(20)?;
|
||||
fn random(_: &mut Request, db: &DB) -> IronResult<Response> {
|
||||
let random_result = db.get_random_albums(20)?;
|
||||
let result_json = serde_json::to_string(&random_result);
|
||||
let result_json = match result_json {
|
||||
Ok(j) => j,
|
||||
|
@ -251,8 +251,8 @@ fn random(_: &mut Request, collection: &Collection) -> IronResult<Response> {
|
|||
Ok(Response::with((status::Ok, result_json)))
|
||||
}
|
||||
|
||||
fn recent(_: &mut Request, collection: &Collection) -> IronResult<Response> {
|
||||
let recent_result = collection.get_recent_albums(20)?;
|
||||
fn recent(_: &mut Request, db: &DB) -> IronResult<Response> {
|
||||
let recent_result = db.get_recent_albums(20)?;
|
||||
let result_json = serde_json::to_string(&recent_result);
|
||||
let result_json = match result_json {
|
||||
Ok(j) => j,
|
||||
|
@ -261,14 +261,14 @@ fn recent(_: &mut Request, collection: &Collection) -> IronResult<Response> {
|
|||
Ok(Response::with((status::Ok, result_json)))
|
||||
}
|
||||
|
||||
fn serve(request: &mut Request, collection: &Collection) -> IronResult<Response> {
|
||||
fn serve(request: &mut Request, db: &DB) -> IronResult<Response> {
|
||||
let virtual_path = path_from_request(request);
|
||||
let virtual_path = match virtual_path {
|
||||
Err(e) => return Err(IronError::new(e, status::BadRequest)),
|
||||
Ok(p) => p,
|
||||
};
|
||||
|
||||
let real_path = collection.locate(virtual_path.as_path());
|
||||
let real_path = db.locate(virtual_path.as_path());
|
||||
let real_path = match real_path {
|
||||
Err(e) => return Err(IronError::new(e, status::NotFound)),
|
||||
Ok(p) => p,
|
||||
|
|
|
@ -1,63 +0,0 @@
|
|||
use core::ops::Deref;
|
||||
use std::path::Path;
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
|
||||
use config::Config;
|
||||
use errors::*;
|
||||
use db::*;
|
||||
use vfs::*;
|
||||
|
||||
|
||||
#[derive(Clone, Debug, Deserialize)]
|
||||
pub struct User {
|
||||
name: String,
|
||||
password: String,
|
||||
}
|
||||
|
||||
pub struct Collection {
|
||||
vfs: Arc<Vfs>,
|
||||
users: Vec<User>,
|
||||
db: Arc<DB>,
|
||||
}
|
||||
|
||||
impl Collection {
|
||||
pub fn new(vfs: Arc<Vfs>, db: Arc<DB>) -> Collection {
|
||||
Collection {
|
||||
vfs: vfs,
|
||||
users: Vec::new(),
|
||||
db: db,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn load_config(&mut self, config: &Config) -> Result<()> {
|
||||
self.users = config.users.to_vec();
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn auth(&self, username: &str, password: &str) -> bool {
|
||||
self.users
|
||||
.iter()
|
||||
.any(|u| u.name == username && u.password == password)
|
||||
}
|
||||
|
||||
pub fn browse(&self, virtual_path: &Path) -> Result<Vec<CollectionFile>> {
|
||||
self.db.deref().browse(virtual_path)
|
||||
}
|
||||
|
||||
pub fn flatten(&self, virtual_path: &Path) -> Result<Vec<Song>> {
|
||||
self.db.deref().flatten(virtual_path)
|
||||
}
|
||||
|
||||
pub fn get_random_albums(&self, count: i64) -> Result<Vec<Directory>> {
|
||||
self.db.deref().get_random_albums(count)
|
||||
}
|
||||
|
||||
pub fn get_recent_albums(&self, count: i64) -> Result<Vec<Directory>> {
|
||||
self.db.deref().get_recent_albums(count)
|
||||
}
|
||||
|
||||
pub fn locate(&self, virtual_path: &Path) -> Result<PathBuf> {
|
||||
self.vfs.virtual_to_real(virtual_path)
|
||||
}
|
||||
}
|
|
@ -4,91 +4,51 @@ use std::io::Read;
|
|||
use std::path;
|
||||
use toml;
|
||||
|
||||
use collection::User;
|
||||
use ddns::DDNSConfig;
|
||||
use errors::*;
|
||||
use db::IndexConfig;
|
||||
use utils;
|
||||
use vfs::VfsConfig;
|
||||
|
||||
const DEFAULT_CONFIG_FILE_NAME: &'static str = "polaris.toml";
|
||||
const INDEX_FILE_NAME: &'static str = "index.sqlite";
|
||||
use db::NewMountPoint;
|
||||
|
||||
#[derive(Deserialize)]
|
||||
struct MountDir {
|
||||
pub struct User {
|
||||
pub name: String,
|
||||
pub source: String,
|
||||
pub password: String,
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
struct UserConfig {
|
||||
pub auth_secret: String,
|
||||
pub struct DDNSConfig {
|
||||
pub host: String,
|
||||
pub username: String,
|
||||
pub password: String,
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
pub struct UserConfig {
|
||||
pub album_art_pattern: Option<String>,
|
||||
pub reindex_every_n_seconds: Option<u64>,
|
||||
pub mount_dirs: Vec<MountDir>,
|
||||
pub users: Vec<User>,
|
||||
pub mount_dirs: Option<Vec<NewMountPoint>>,
|
||||
pub users: Option<Vec<User>>,
|
||||
pub ydns: Option<DDNSConfig>,
|
||||
}
|
||||
|
||||
pub struct Config {
|
||||
pub secret: String,
|
||||
pub vfs: VfsConfig,
|
||||
pub users: Vec<User>,
|
||||
pub index: IndexConfig,
|
||||
pub ddns: Option<DDNSConfig>,
|
||||
}
|
||||
|
||||
impl Config {
|
||||
pub fn parse(custom_path: Option<path::PathBuf>) -> Result<Config> {
|
||||
|
||||
let config_path = match custom_path {
|
||||
Some(p) => p,
|
||||
None => {
|
||||
let mut root = utils::get_config_root()?;
|
||||
root.push(DEFAULT_CONFIG_FILE_NAME);
|
||||
root
|
||||
}
|
||||
};
|
||||
println!("Config file path: {}", config_path.to_string_lossy());
|
||||
impl UserConfig {
|
||||
pub fn parse(path: &path::Path) -> Result<UserConfig> {
|
||||
println!("Config file path: {}", path.to_string_lossy());
|
||||
|
||||
// Parse user config
|
||||
let mut config_file = fs::File::open(config_path)?;
|
||||
let mut config_file = fs::File::open(path)?;
|
||||
let mut config_file_content = String::new();
|
||||
config_file.read_to_string(&mut config_file_content)?;
|
||||
let user_config = toml::de::from_str::<UserConfig>(config_file_content.as_str())?;
|
||||
let mut config = toml::de::from_str::<UserConfig>(config_file_content.as_str())?;
|
||||
|
||||
// Init VFS config
|
||||
let mut vfs_config = VfsConfig::new();
|
||||
for dir in user_config.mount_dirs {
|
||||
if vfs_config.mount_points.contains_key(&dir.name) {
|
||||
bail!("Conflicting mount directories");
|
||||
// Clean path
|
||||
if let Some(ref mut mount_dirs) = config.mount_dirs {
|
||||
for mount_dir in mount_dirs {
|
||||
match clean_path_string(&mount_dir.source).to_str() {
|
||||
Some(p) => mount_dir.source = p.to_owned(),
|
||||
_ => bail!("Bad mount directory path")
|
||||
}
|
||||
}
|
||||
vfs_config
|
||||
.mount_points
|
||||
.insert(dir.name.to_owned(), clean_path_string(dir.source.as_str()));
|
||||
}
|
||||
|
||||
// Init Index config
|
||||
let mut index_config = IndexConfig::new();
|
||||
index_config.album_art_pattern = user_config
|
||||
.album_art_pattern
|
||||
.and_then(|s| Regex::new(s.as_str()).ok());
|
||||
if let Some(duration) = user_config.reindex_every_n_seconds {
|
||||
index_config.sleep_duration = duration;
|
||||
}
|
||||
let mut index_path = utils::get_data_root()?;
|
||||
index_path.push(INDEX_FILE_NAME);
|
||||
index_config.path = index_path;
|
||||
|
||||
// Init master config
|
||||
let config = Config {
|
||||
secret: user_config.auth_secret,
|
||||
vfs: vfs_config,
|
||||
users: user_config.users,
|
||||
index: index_config,
|
||||
ddns: user_config.ydns,
|
||||
};
|
||||
|
||||
Ok(config)
|
||||
}
|
||||
}
|
||||
|
|
317
src/db/index.rs
317
src/db/index.rs
|
@ -5,34 +5,20 @@ use diesel::sqlite::SqliteConnection;
|
|||
use regex::Regex;
|
||||
use std::fs;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::sync::{Mutex};
|
||||
use std::thread;
|
||||
use std::time;
|
||||
|
||||
use config::UserConfig;
|
||||
use db::DB;
|
||||
use db::schema::{directories, songs};
|
||||
use db::models::*;
|
||||
use db::schema::*;
|
||||
use errors::*;
|
||||
use metadata;
|
||||
use vfs::Vfs;
|
||||
|
||||
const INDEX_BUILDING_INSERT_BUFFER_SIZE: usize = 1000; // Insertions in each transaction
|
||||
const INDEX_BUILDING_CLEAN_BUFFER_SIZE: usize = 500; // Insertions in each transaction
|
||||
|
||||
pub struct IndexConfig {
|
||||
pub album_art_pattern: Option<Regex>,
|
||||
pub sleep_duration: u64, // in seconds
|
||||
pub path: PathBuf,
|
||||
}
|
||||
|
||||
impl IndexConfig {
|
||||
pub fn new() -> IndexConfig {
|
||||
IndexConfig {
|
||||
sleep_duration: 60 * 30, // 30 minutes
|
||||
album_art_pattern: None,
|
||||
path: Path::new(":memory:").to_path_buf(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Insertable)]
|
||||
#[table_name="songs"]
|
||||
|
@ -65,10 +51,11 @@ struct IndexBuilder<'db> {
|
|||
new_songs: Vec<NewSong>,
|
||||
new_directories: Vec<NewDirectory>,
|
||||
connection: &'db Mutex<SqliteConnection>,
|
||||
album_art_pattern: Regex,
|
||||
}
|
||||
|
||||
impl<'db> IndexBuilder<'db> {
|
||||
fn new(connection: &Mutex<SqliteConnection>) -> Result<IndexBuilder> {
|
||||
fn new(connection: &Mutex<SqliteConnection>, album_art_pattern: Regex) -> Result<IndexBuilder> {
|
||||
let mut new_songs = Vec::new();
|
||||
let mut new_directories = Vec::new();
|
||||
new_songs.reserve_exact(INDEX_BUILDING_INSERT_BUFFER_SIZE);
|
||||
|
@ -77,6 +64,7 @@ impl<'db> IndexBuilder<'db> {
|
|||
new_songs: new_songs,
|
||||
new_directories: new_directories,
|
||||
connection: connection,
|
||||
album_art_pattern: album_art_pattern,
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -123,124 +111,13 @@ impl<'db> IndexBuilder<'db> {
|
|||
self.new_directories.push(directory);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub struct Index {
|
||||
vfs: Arc<Vfs>,
|
||||
connection: Arc<Mutex<SqliteConnection>>,
|
||||
album_art_pattern: Option<Regex>,
|
||||
sleep_duration: u64,
|
||||
}
|
||||
|
||||
impl Index {
|
||||
pub fn new(vfs: Arc<Vfs>,
|
||||
connection: Arc<Mutex<SqliteConnection>>,
|
||||
config: &IndexConfig)
|
||||
-> Index {
|
||||
let index = Index {
|
||||
vfs: vfs,
|
||||
connection: connection,
|
||||
album_art_pattern: config.album_art_pattern.clone(),
|
||||
sleep_duration: config.sleep_duration,
|
||||
};
|
||||
index
|
||||
}
|
||||
|
||||
pub fn update_index(&self) -> Result<()> {
|
||||
let start = time::Instant::now();
|
||||
println!("Beginning library index update");
|
||||
self.clean()?;
|
||||
self.populate()?;
|
||||
println!("Library index update took {} seconds",
|
||||
start.elapsed().as_secs());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn clean(&self) -> Result<()> {
|
||||
{
|
||||
let all_songs: Vec<String>;
|
||||
{
|
||||
let connection = self.connection.lock().unwrap();
|
||||
let connection = connection.deref();
|
||||
all_songs = songs::table
|
||||
.select(songs::columns::path)
|
||||
.load(connection)?;
|
||||
}
|
||||
|
||||
let missing_songs = all_songs
|
||||
.into_iter()
|
||||
.filter(|ref song_path| {
|
||||
let path = Path::new(&song_path);
|
||||
!path.exists() || self.vfs.real_to_virtual(path).is_err()
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
{
|
||||
let connection = self.connection.lock().unwrap();
|
||||
let connection = connection.deref();
|
||||
for chunk in missing_songs[..].chunks(INDEX_BUILDING_CLEAN_BUFFER_SIZE) {
|
||||
diesel::delete(songs::table.filter(songs::columns::path.eq_any(chunk)))
|
||||
.execute(connection)?;
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
let all_directories: Vec<String>;
|
||||
{
|
||||
let connection = self.connection.lock().unwrap();
|
||||
let connection = connection.deref();
|
||||
all_directories = directories::table
|
||||
.select(directories::columns::path)
|
||||
.load(connection)?;
|
||||
}
|
||||
|
||||
let missing_directories = all_directories
|
||||
.into_iter()
|
||||
.filter(|ref directory_path| {
|
||||
let path = Path::new(&directory_path);
|
||||
!path.exists() || self.vfs.real_to_virtual(path).is_err()
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
{
|
||||
let connection = self.connection.lock().unwrap();
|
||||
let connection = connection.deref();
|
||||
for chunk in missing_directories[..].chunks(INDEX_BUILDING_CLEAN_BUFFER_SIZE) {
|
||||
diesel::delete(directories::table.filter(directories::columns::path
|
||||
.eq_any(chunk)))
|
||||
.execute(connection)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn populate(&self) -> Result<()> {
|
||||
let vfs = self.vfs.deref();
|
||||
let mount_points = vfs.get_mount_points();
|
||||
let mut builder = IndexBuilder::new(&self.connection)?;
|
||||
for (_, target) in mount_points {
|
||||
self.populate_directory(&mut builder, None, target.as_path())?;
|
||||
}
|
||||
builder.flush_songs()?;
|
||||
builder.flush_directories()?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get_artwork(&self, dir: &Path) -> Option<String> {
|
||||
let pattern = match self.album_art_pattern {
|
||||
Some(ref p) => p,
|
||||
_ => return None,
|
||||
};
|
||||
|
||||
if let Ok(dir_content) = fs::read_dir(dir) {
|
||||
for file in dir_content {
|
||||
if let Ok(file) = file {
|
||||
if let Some(name_string) = file.file_name().to_str() {
|
||||
if pattern.is_match(name_string) {
|
||||
if self.album_art_pattern.is_match(name_string) {
|
||||
return file.path().to_str().map(|p| p.to_owned());
|
||||
}
|
||||
}
|
||||
|
@ -251,8 +128,7 @@ impl Index {
|
|||
None
|
||||
}
|
||||
|
||||
fn populate_directory(&self,
|
||||
builder: &mut IndexBuilder,
|
||||
fn populate_directory(&mut self,
|
||||
parent: Option<&Path>,
|
||||
path: &Path)
|
||||
-> Result<()> {
|
||||
|
@ -332,7 +208,7 @@ impl Index {
|
|||
artwork: artwork.as_ref().map(|s| s.to_owned()),
|
||||
};
|
||||
|
||||
builder.push_song(song)?;
|
||||
self.push_song(song)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -358,49 +234,162 @@ impl Index {
|
|||
year: directory_year,
|
||||
date_added: created,
|
||||
};
|
||||
builder.push_directory(directory)?;
|
||||
self.push_directory(directory)?;
|
||||
|
||||
// Populate subdirectories
|
||||
for sub_directory in sub_directories {
|
||||
self.populate_directory(builder, Some(path), &sub_directory)?;
|
||||
self.populate_directory(Some(path), &sub_directory)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub struct Index {
|
||||
}
|
||||
|
||||
impl Index {
|
||||
pub fn new() -> Index {
|
||||
Index {}
|
||||
}
|
||||
|
||||
pub fn update_index(&self, db: &DB) -> Result<()> {
|
||||
let start = time::Instant::now();
|
||||
println!("Beginning library index update");
|
||||
self.clean(db)?;
|
||||
self.populate(db)?;
|
||||
println!("Library index update took {} seconds",
|
||||
start.elapsed().as_secs());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn clean(&self, db: &DB) -> Result<()> {
|
||||
let vfs = db.get_vfs()?;
|
||||
|
||||
{
|
||||
let all_songs: Vec<String>;
|
||||
{
|
||||
let connection = db.get_connection();
|
||||
let connection = connection.lock().unwrap();
|
||||
let connection = connection.deref();
|
||||
all_songs = songs::table
|
||||
.select(songs::columns::path)
|
||||
.load(connection)?;
|
||||
}
|
||||
|
||||
let missing_songs = all_songs
|
||||
.into_iter()
|
||||
.filter(|ref song_path| {
|
||||
let path = Path::new(&song_path);
|
||||
!path.exists() || vfs.real_to_virtual(path).is_err()
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
{
|
||||
let connection = db.get_connection();
|
||||
let connection = connection.lock().unwrap();
|
||||
let connection = connection.deref();
|
||||
for chunk in missing_songs[..].chunks(INDEX_BUILDING_CLEAN_BUFFER_SIZE) {
|
||||
diesel::delete(songs::table.filter(songs::columns::path.eq_any(chunk)))
|
||||
.execute(connection)?;
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
let all_directories: Vec<String>;
|
||||
{
|
||||
let connection = db.get_connection();
|
||||
let connection = connection.lock().unwrap();
|
||||
let connection = connection.deref();
|
||||
all_directories = directories::table
|
||||
.select(directories::columns::path)
|
||||
.load(connection)?;
|
||||
}
|
||||
|
||||
let missing_directories = all_directories
|
||||
.into_iter()
|
||||
.filter(|ref directory_path| {
|
||||
let path = Path::new(&directory_path);
|
||||
!path.exists() || vfs.real_to_virtual(path).is_err()
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
{
|
||||
let connection = db.get_connection();
|
||||
let connection = connection.lock().unwrap();
|
||||
let connection = connection.deref();
|
||||
for chunk in missing_directories[..].chunks(INDEX_BUILDING_CLEAN_BUFFER_SIZE) {
|
||||
diesel::delete(directories::table.filter(directories::columns::path
|
||||
.eq_any(chunk)))
|
||||
.execute(connection)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn update_loop(&self) {
|
||||
fn populate(&self, db: &DB) -> Result<()> {
|
||||
let vfs = db.get_vfs()?;
|
||||
let mount_points = vfs.get_mount_points();
|
||||
let connection = db.get_connection();
|
||||
|
||||
let album_art_pattern;
|
||||
{
|
||||
let connection = connection.lock().unwrap();
|
||||
let connection = connection.deref();
|
||||
let settings: MiscSettings = misc_settings::table.get_result(connection)?;
|
||||
album_art_pattern = Regex::new(&settings.index_album_art_pattern)?;
|
||||
}
|
||||
|
||||
let mut builder = IndexBuilder::new(&connection, album_art_pattern)?;
|
||||
for (_, target) in mount_points {
|
||||
builder.populate_directory(None, target.as_path())?;
|
||||
}
|
||||
builder.flush_songs()?;
|
||||
builder.flush_directories()?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn update_loop(&self, db: &DB) {
|
||||
loop {
|
||||
if let Err(e) = self.update_index() {
|
||||
if let Err(e) = self.update_index(db) {
|
||||
println!("Error while updating index: {}", e);
|
||||
}
|
||||
thread::sleep(time::Duration::from_secs(self.sleep_duration));
|
||||
{
|
||||
let sleep_duration;
|
||||
{
|
||||
let connection = db.get_connection();
|
||||
let connection = connection.lock().unwrap();
|
||||
let connection = connection.deref();
|
||||
let settings: Result<MiscSettings> = misc_settings::table.get_result(connection).map_err(|e| e.into());
|
||||
if let Err(ref e) = settings {
|
||||
println!("Could not retrieve index sleep duration: {}", e);
|
||||
}
|
||||
sleep_duration = settings.map(|s| s.index_sleep_duration_seconds).unwrap_or(1800);
|
||||
}
|
||||
thread::sleep(time::Duration::from_secs(sleep_duration as u64));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn _get_test_db(name: &str) -> DB {
|
||||
use vfs::VfsConfig;
|
||||
use std::collections::HashMap;
|
||||
let config_path = Path::new("test/config.toml");
|
||||
let config = UserConfig::parse(&config_path).unwrap();
|
||||
|
||||
let mut collection_path = PathBuf::new();
|
||||
collection_path.push("test");
|
||||
collection_path.push("collection");
|
||||
let mut mount_points = HashMap::new();
|
||||
mount_points.insert("root".to_owned(), collection_path);
|
||||
|
||||
let vfs = Arc::new(Vfs::new(VfsConfig { mount_points: mount_points }));
|
||||
|
||||
let mut index_config = IndexConfig::new();
|
||||
index_config.album_art_pattern = Some(Regex::new(r#"^Folder\.(png|jpg|jpeg)$"#).unwrap());
|
||||
index_config.path = PathBuf::new();
|
||||
index_config.path.push("test");
|
||||
index_config.path.push(name);
|
||||
|
||||
if index_config.path.exists() {
|
||||
fs::remove_file(&index_config.path).unwrap();
|
||||
let mut db_path = PathBuf::new();
|
||||
db_path.push("test");
|
||||
db_path.push(name);
|
||||
if db_path.exists() {
|
||||
fs::remove_file(&db_path).unwrap();
|
||||
}
|
||||
|
||||
DB::new(vfs, &index_config).unwrap()
|
||||
let db = DB::new(&db_path).unwrap();
|
||||
db.load_config(&config).unwrap();
|
||||
db
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
@ -409,10 +398,11 @@ fn test_populate() {
|
|||
|
||||
let db = _get_test_db("populate.sqlite");
|
||||
let index = db.get_index();
|
||||
index.update_index().unwrap();
|
||||
index.update_index().unwrap(); // Check that subsequent updates don't run into conflicts
|
||||
index.update_index(&db).unwrap();
|
||||
index.update_index(&db).unwrap(); // Check that subsequent updates don't run into conflicts
|
||||
|
||||
let connection = index.connection.lock().unwrap();
|
||||
let connection = db.get_connection();
|
||||
let connection = connection.lock().unwrap();
|
||||
let connection = connection.deref();
|
||||
let all_directories: Vec<Directory> = directories::table.load(connection).unwrap();
|
||||
let all_songs: Vec<Song> = songs::table.load(connection).unwrap();
|
||||
|
@ -438,9 +428,10 @@ fn test_metadata() {
|
|||
|
||||
let db = _get_test_db("metadata.sqlite");
|
||||
let index = db.get_index();
|
||||
index.update_index().unwrap();
|
||||
index.update_index(&db).unwrap();
|
||||
|
||||
let connection = index.connection.lock().unwrap();
|
||||
let connection = db.get_connection();
|
||||
let connection = connection.lock().unwrap();
|
||||
let connection = connection.deref();
|
||||
let songs: Vec<Song> = songs::table
|
||||
.filter(songs::columns::title.eq("シャーベット (Sherbet)"))
|
||||
|
|
1
src/db/migrations/201706272129_users_table/down.sql
Normal file
1
src/db/migrations/201706272129_users_table/down.sql
Normal file
|
@ -0,0 +1 @@
|
|||
DROP TABLE users;
|
7
src/db/migrations/201706272129_users_table/up.sql
Normal file
7
src/db/migrations/201706272129_users_table/up.sql
Normal file
|
@ -0,0 +1,7 @@
|
|||
CREATE TABLE users (
|
||||
id INTEGER PRIMARY KEY NOT NULL,
|
||||
name TEXT NOT NULL,
|
||||
password_salt BLOB NOT NULL,
|
||||
password_hash BLOB NOT NULL,
|
||||
UNIQUE(name)
|
||||
);
|
|
@ -0,0 +1 @@
|
|||
DROP TABLE misc_settings;
|
|
@ -0,0 +1,7 @@
|
|||
CREATE TABLE misc_settings (
|
||||
id INTEGER PRIMARY KEY NOT NULL CHECK(id = 0),
|
||||
auth_secret TEXT NOT NULL,
|
||||
index_sleep_duration_seconds INTEGER NOT NULL,
|
||||
index_album_art_pattern TEXT NOT NULL
|
||||
);
|
||||
INSERT INTO misc_settings (id, auth_secret, index_sleep_duration_seconds, index_album_art_pattern) VALUES (0, hex(randomblob(64)), 1800, "Folder.(jpg|png)");
|
|
@ -0,0 +1 @@
|
|||
DROP TABLE ddns_config;
|
8
src/db/migrations/201706272313_ddns_config_table/up.sql
Normal file
8
src/db/migrations/201706272313_ddns_config_table/up.sql
Normal file
|
@ -0,0 +1,8 @@
|
|||
CREATE TABLE ddns_config (
|
||||
id INTEGER PRIMARY KEY NOT NULL CHECK(id = 0),
|
||||
host TEXT NOT NULL,
|
||||
username TEXT NOT NULL,
|
||||
password TEXT NOT NULL
|
||||
);
|
||||
|
||||
INSERT INTO ddns_config (id, host, username, password) VALUES (0, "", "", "");
|
|
@ -0,0 +1 @@
|
|||
DROP TABLE mount_points;
|
6
src/db/migrations/201706272327_mount_points_table/up.sql
Normal file
6
src/db/migrations/201706272327_mount_points_table/up.sql
Normal file
|
@ -0,0 +1,6 @@
|
|||
CREATE TABLE mount_points (
|
||||
id INTEGER PRIMARY KEY NOT NULL,
|
||||
source TEXT NOT NULL,
|
||||
name TEXT NOT NULL,
|
||||
UNIQUE(name)
|
||||
);
|
159
src/db/mod.rs
159
src/db/mod.rs
|
@ -8,7 +8,8 @@ use std::fs;
|
|||
use std::path::{Path, PathBuf};
|
||||
use std::sync::{Arc, Mutex};
|
||||
|
||||
use db::schema::{directories, songs};
|
||||
use config::UserConfig;
|
||||
use db::schema::*;
|
||||
use errors::*;
|
||||
use vfs::Vfs;
|
||||
|
||||
|
@ -16,29 +17,26 @@ mod index;
|
|||
mod models;
|
||||
mod schema;
|
||||
|
||||
pub use self::index::{Index, IndexConfig};
|
||||
pub use self::models::{CollectionFile, Directory, Song};
|
||||
pub use self::index::Index;
|
||||
pub use self::models::*;
|
||||
|
||||
#[allow(dead_code)]
|
||||
const DB_MIGRATIONS_PATH: &'static str = "src/db/migrations";
|
||||
embed_migrations!("src/db/migrations");
|
||||
|
||||
pub struct DB {
|
||||
vfs: Arc<Vfs>,
|
||||
connection: Arc<Mutex<SqliteConnection>>,
|
||||
index: Index,
|
||||
}
|
||||
|
||||
impl DB {
|
||||
pub fn new(vfs: Arc<Vfs>, config: &IndexConfig) -> Result<DB> {
|
||||
let path = &config.path;
|
||||
println!("Index file path: {}", path.to_string_lossy());
|
||||
pub fn new(path: &Path) -> Result<DB> {
|
||||
println!("Database file path: {}", path.to_string_lossy());
|
||||
let connection =
|
||||
Arc::new(Mutex::new(SqliteConnection::establish(&path.to_string_lossy())?));
|
||||
let db = DB {
|
||||
vfs: vfs.clone(),
|
||||
connection: connection.clone(),
|
||||
index: Index::new(vfs, connection.clone(), config),
|
||||
index: Index::new(),
|
||||
};
|
||||
db.init()?;
|
||||
Ok(db)
|
||||
|
@ -53,6 +51,10 @@ impl DB {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
pub fn get_connection(&self) -> Arc<Mutex<SqliteConnection>> {
|
||||
self.connection.clone()
|
||||
}
|
||||
|
||||
pub fn get_index(&self) -> &Index {
|
||||
&self.index
|
||||
}
|
||||
|
@ -78,13 +80,74 @@ impl DB {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
fn virtualize_song(&self, mut song: Song) -> Option<Song> {
|
||||
song.path = match self.vfs.real_to_virtual(Path::new(&song.path)) {
|
||||
pub fn load_config(&self, config: &UserConfig) -> Result<()> {
|
||||
let connection = self.connection.lock().unwrap();
|
||||
let connection = connection.deref();
|
||||
if let Some(ref mount_dirs) = config.mount_dirs {
|
||||
diesel::delete(mount_points::table).execute(connection)?;
|
||||
diesel::insert(mount_dirs).into(mount_points::table).execute(connection)?;
|
||||
}
|
||||
|
||||
if let Some(ref config_users) = config.users {
|
||||
diesel::delete(users::table).execute(connection)?;
|
||||
for config_user in config_users {
|
||||
let new_user = NewUser::new(&config_user.name, &config_user.password);
|
||||
println!("new user: {}", &config_user.name);
|
||||
diesel::insert(&new_user).into(users::table).execute(connection)?;
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(sleep_duration) = config.reindex_every_n_seconds {
|
||||
diesel::update(misc_settings::table)
|
||||
.set(misc_settings::columns::index_sleep_duration_seconds.eq(sleep_duration as i32))
|
||||
.execute(connection)?;
|
||||
}
|
||||
|
||||
if let Some(ref album_art_pattern) = config.album_art_pattern {
|
||||
diesel::update(misc_settings::table)
|
||||
.set(misc_settings::columns::index_album_art_pattern.eq(album_art_pattern))
|
||||
.execute(connection)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn get_auth_secret(&self) -> Result<String> {
|
||||
let connection = self.connection.lock().unwrap();
|
||||
let connection = connection.deref();
|
||||
let misc : MiscSettings = misc_settings::table.get_result(connection)?;
|
||||
Ok(misc.auth_secret.to_owned())
|
||||
}
|
||||
|
||||
pub fn get_ddns_config(&self) -> Result<DDNSConfig> {
|
||||
let connection = self.connection.lock().unwrap();
|
||||
let connection = connection.deref();
|
||||
Ok(ddns_config::table.get_result(connection)?)
|
||||
}
|
||||
|
||||
pub fn locate(&self, virtual_path: &Path) -> Result<PathBuf> {
|
||||
let vfs = self.get_vfs()?;
|
||||
vfs.virtual_to_real(virtual_path)
|
||||
}
|
||||
|
||||
fn get_vfs(&self) -> Result<Vfs> {
|
||||
let mut vfs = Vfs::new();
|
||||
let connection = self.connection.lock().unwrap();
|
||||
let connection = connection.deref();
|
||||
let mount_points : Vec<MountPoint> = mount_points::table.get_results(connection)?;
|
||||
for mount_point in mount_points {
|
||||
vfs.mount(&Path::new(&mount_point.real_path), &mount_point.name)?;
|
||||
}
|
||||
Ok(vfs)
|
||||
}
|
||||
|
||||
fn virtualize_song(&self, vfs: &Vfs, mut song: Song) -> Option<Song> {
|
||||
song.path = match vfs.real_to_virtual(Path::new(&song.path)) {
|
||||
Ok(p) => p.to_string_lossy().into_owned(),
|
||||
_ => return None,
|
||||
};
|
||||
if let Some(artwork_path) = song.artwork {
|
||||
song.artwork = match self.vfs.real_to_virtual(Path::new(&artwork_path)) {
|
||||
song.artwork = match vfs.real_to_virtual(Path::new(&artwork_path)) {
|
||||
Ok(p) => Some(p.to_string_lossy().into_owned()),
|
||||
_ => None,
|
||||
};
|
||||
|
@ -92,13 +155,13 @@ impl DB {
|
|||
Some(song)
|
||||
}
|
||||
|
||||
fn virtualize_directory(&self, mut directory: Directory) -> Option<Directory> {
|
||||
directory.path = match self.vfs.real_to_virtual(Path::new(&directory.path)) {
|
||||
fn virtualize_directory(&self, vfs: &Vfs, mut directory: Directory) -> Option<Directory> {
|
||||
directory.path = match vfs.real_to_virtual(Path::new(&directory.path)) {
|
||||
Ok(p) => p.to_string_lossy().into_owned(),
|
||||
_ => return None,
|
||||
};
|
||||
if let Some(artwork_path) = directory.artwork {
|
||||
directory.artwork = match self.vfs.real_to_virtual(Path::new(&artwork_path)) {
|
||||
directory.artwork = match vfs.real_to_virtual(Path::new(&artwork_path)) {
|
||||
Ok(p) => Some(p.to_string_lossy().into_owned()),
|
||||
_ => None,
|
||||
};
|
||||
|
@ -108,6 +171,7 @@ impl DB {
|
|||
|
||||
pub fn browse(&self, virtual_path: &Path) -> Result<Vec<CollectionFile>> {
|
||||
let mut output = Vec::new();
|
||||
let vfs = self.get_vfs()?;
|
||||
let connection = self.connection.lock().unwrap();
|
||||
let connection = connection.deref();
|
||||
|
||||
|
@ -118,14 +182,14 @@ impl DB {
|
|||
.load(connection)?;
|
||||
let virtual_directories = real_directories
|
||||
.into_iter()
|
||||
.filter_map(|s| self.virtualize_directory(s));
|
||||
.filter_map(|s| self.virtualize_directory(&vfs, s));
|
||||
output.extend(virtual_directories
|
||||
.into_iter()
|
||||
.map(|d| CollectionFile::Directory(d)));
|
||||
|
||||
} else {
|
||||
// Browse sub-directory
|
||||
let real_path = self.vfs.virtual_to_real(virtual_path)?;
|
||||
let real_path = vfs.virtual_to_real(virtual_path)?;
|
||||
let real_path_string = real_path.as_path().to_string_lossy().into_owned();
|
||||
|
||||
let real_directories: Vec<Directory> = directories::table
|
||||
|
@ -134,7 +198,7 @@ impl DB {
|
|||
.load(connection)?;
|
||||
let virtual_directories = real_directories
|
||||
.into_iter()
|
||||
.filter_map(|s| self.virtualize_directory(s));
|
||||
.filter_map(|s| self.virtualize_directory(&vfs, s));
|
||||
output.extend(virtual_directories.map(|d| CollectionFile::Directory(d)));
|
||||
|
||||
let real_songs: Vec<Song> = songs::table
|
||||
|
@ -143,7 +207,7 @@ impl DB {
|
|||
.load(connection)?;
|
||||
let virtual_songs = real_songs
|
||||
.into_iter()
|
||||
.filter_map(|s| self.virtualize_song(s));
|
||||
.filter_map(|s| self.virtualize_song(&vfs, s));
|
||||
output.extend(virtual_songs.map(|s| CollectionFile::Song(s)));
|
||||
}
|
||||
|
||||
|
@ -151,20 +215,22 @@ impl DB {
|
|||
}
|
||||
|
||||
pub fn flatten(&self, virtual_path: &Path) -> Result<Vec<Song>> {
|
||||
let vfs = self.get_vfs()?;
|
||||
let connection = self.connection.lock().unwrap();
|
||||
let connection = connection.deref();
|
||||
let real_path = self.vfs.virtual_to_real(virtual_path)?;
|
||||
let real_path = vfs.virtual_to_real(virtual_path)?;
|
||||
let like_path = real_path.as_path().to_string_lossy().into_owned() + "%";
|
||||
let real_songs: Vec<Song> = songs::table
|
||||
.filter(songs::columns::path.like(&like_path))
|
||||
.load(connection)?;
|
||||
let virtual_songs = real_songs
|
||||
.into_iter()
|
||||
.filter_map(|s| self.virtualize_song(s));
|
||||
.filter_map(|s| self.virtualize_song(&vfs, s));
|
||||
Ok(virtual_songs.collect::<Vec<_>>())
|
||||
}
|
||||
|
||||
pub fn get_random_albums(&self, count: i64) -> Result<Vec<Directory>> {
|
||||
let vfs = self.get_vfs()?;
|
||||
let connection = self.connection.lock().unwrap();
|
||||
let connection = connection.deref();
|
||||
let real_directories = directories::table
|
||||
|
@ -174,11 +240,12 @@ impl DB {
|
|||
.load(connection)?;
|
||||
let virtual_directories = real_directories
|
||||
.into_iter()
|
||||
.filter_map(|s| self.virtualize_directory(s));
|
||||
.filter_map(|s| self.virtualize_directory(&vfs, s));
|
||||
Ok(virtual_directories.collect::<Vec<_>>())
|
||||
}
|
||||
|
||||
pub fn get_recent_albums(&self, count: i64) -> Result<Vec<Directory>> {
|
||||
let vfs = self.get_vfs()?;
|
||||
let connection = self.connection.lock().unwrap();
|
||||
let connection = connection.deref();
|
||||
let real_directories: Vec<Directory> = directories::table
|
||||
|
@ -188,32 +255,34 @@ impl DB {
|
|||
.load(connection)?;
|
||||
let virtual_directories = real_directories
|
||||
.into_iter()
|
||||
.filter_map(|s| self.virtualize_directory(s));
|
||||
.filter_map(|s| self.virtualize_directory(&vfs, s));
|
||||
Ok(virtual_directories.collect::<Vec<_>>())
|
||||
}
|
||||
|
||||
pub fn auth(&self, username: &str, password: &str) -> Result<bool> {
|
||||
let connection = self.connection.lock().unwrap();
|
||||
let connection = connection.deref();
|
||||
let user: User = users::table
|
||||
.filter(users::columns::name.eq(username))
|
||||
.get_result(connection)?;
|
||||
Ok(user.verify_password(password))
|
||||
}
|
||||
}
|
||||
|
||||
fn _get_test_db(name: &str) -> DB {
|
||||
use vfs::VfsConfig;
|
||||
use std::collections::HashMap;
|
||||
let config_path = Path::new("test/config.toml");
|
||||
let config = UserConfig::parse(&config_path).unwrap();
|
||||
|
||||
let mut collection_path = PathBuf::new();
|
||||
collection_path.push("test");
|
||||
collection_path.push("collection");
|
||||
let mut mount_points = HashMap::new();
|
||||
mount_points.insert("root".to_owned(), collection_path);
|
||||
let vfs = Arc::new(Vfs::new(VfsConfig { mount_points: mount_points }));
|
||||
|
||||
let mut index_config = IndexConfig::new();
|
||||
index_config.path = PathBuf::new();
|
||||
index_config.path.push("test");
|
||||
index_config.path.push(name);
|
||||
|
||||
if index_config.path.exists() {
|
||||
fs::remove_file(&index_config.path).unwrap();
|
||||
let mut db_path = PathBuf::new();
|
||||
db_path.push("test");
|
||||
db_path.push(name);
|
||||
if db_path.exists() {
|
||||
fs::remove_file(&db_path).unwrap();
|
||||
}
|
||||
|
||||
DB::new(vfs, &index_config).unwrap()
|
||||
let db = DB::new(&db_path).unwrap();
|
||||
db.load_config(&config).unwrap();
|
||||
db
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
@ -234,7 +303,7 @@ fn test_browse_top_level() {
|
|||
root_path.push("root");
|
||||
|
||||
let db = _get_test_db("browse_top_level.sqlite");
|
||||
db.get_index().update_index().unwrap();
|
||||
db.get_index().update_index(&db).unwrap();
|
||||
let results = db.browse(Path::new("")).unwrap();
|
||||
|
||||
assert_eq!(results.len(), 1);
|
||||
|
@ -255,7 +324,7 @@ fn test_browse() {
|
|||
tobokegao_path.push("Tobokegao");
|
||||
|
||||
let db = _get_test_db("browse.sqlite");
|
||||
db.get_index().update_index().unwrap();
|
||||
db.get_index().update_index(&db).unwrap();
|
||||
let results = db.browse(Path::new("root")).unwrap();
|
||||
|
||||
assert_eq!(results.len(), 2);
|
||||
|
@ -272,7 +341,7 @@ fn test_browse() {
|
|||
#[test]
|
||||
fn test_flatten() {
|
||||
let db = _get_test_db("flatten.sqlite");
|
||||
db.get_index().update_index().unwrap();
|
||||
db.get_index().update_index(&db).unwrap();
|
||||
let results = db.flatten(Path::new("root")).unwrap();
|
||||
assert_eq!(results.len(), 12);
|
||||
}
|
||||
|
@ -280,7 +349,7 @@ fn test_flatten() {
|
|||
#[test]
|
||||
fn test_random() {
|
||||
let db = _get_test_db("random.sqlite");
|
||||
db.get_index().update_index().unwrap();
|
||||
db.get_index().update_index(&db).unwrap();
|
||||
let results = db.get_random_albums(1).unwrap();
|
||||
assert_eq!(results.len(), 1);
|
||||
}
|
||||
|
@ -288,7 +357,7 @@ fn test_random() {
|
|||
#[test]
|
||||
fn test_recent() {
|
||||
let db = _get_test_db("recent.sqlite");
|
||||
db.get_index().update_index().unwrap();
|
||||
db.get_index().update_index(&db).unwrap();
|
||||
let results = db.get_recent_albums(2).unwrap();
|
||||
assert_eq!(results.len(), 2);
|
||||
assert!(results[0].date_added >= results[1].date_added);
|
||||
|
|
|
@ -1,3 +1,9 @@
|
|||
use rand;
|
||||
use ring::{digest, pbkdf2};
|
||||
|
||||
use db::schema::*;
|
||||
|
||||
// Collection content
|
||||
#[derive(Debug, Queryable, Serialize)]
|
||||
pub struct Song {
|
||||
#[serde(skip_serializing)]
|
||||
|
@ -34,3 +40,87 @@ pub enum CollectionFile {
|
|||
Directory(Directory),
|
||||
Song(Song),
|
||||
}
|
||||
|
||||
|
||||
// User
|
||||
#[derive(Debug, Queryable)]
|
||||
pub struct User {
|
||||
id: i32,
|
||||
pub name: String,
|
||||
pub password_salt: Vec<u8>,
|
||||
pub password_hash: Vec<u8>,
|
||||
}
|
||||
|
||||
impl User {
|
||||
pub fn verify_password(&self, attempted_password: &str) -> bool {
|
||||
pbkdf2::verify(DIGEST_ALG, HASH_ITERATIONS, &self.password_salt, attempted_password.as_bytes(), &self.password_hash).is_ok()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Insertable)]
|
||||
#[table_name="users"]
|
||||
pub struct NewUser {
|
||||
pub name: String,
|
||||
pub password_salt: Vec<u8>,
|
||||
pub password_hash: Vec<u8>,
|
||||
}
|
||||
|
||||
static DIGEST_ALG: &'static pbkdf2::PRF = &pbkdf2::HMAC_SHA256;
|
||||
const CREDENTIAL_LEN: usize = digest::SHA256_OUTPUT_LEN;
|
||||
const HASH_ITERATIONS: u32 = 10000;
|
||||
type PasswordHash = [u8; CREDENTIAL_LEN];
|
||||
|
||||
impl NewUser {
|
||||
pub fn new(name: &str, password: &str) -> NewUser {
|
||||
let salt = rand::random::<[u8; 16]>().to_vec();
|
||||
let hash = NewUser::hash_password(&salt, password);
|
||||
NewUser {
|
||||
name: name.to_owned(),
|
||||
password_salt: salt,
|
||||
password_hash: hash,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn hash_password(salt: &Vec<u8>, password: &str) -> Vec<u8> {
|
||||
let mut hash: PasswordHash = [0; CREDENTIAL_LEN];
|
||||
pbkdf2::derive(DIGEST_ALG, HASH_ITERATIONS, salt, password.as_bytes(), &mut hash);
|
||||
hash.to_vec()
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// VFS
|
||||
#[derive(Debug, Queryable)]
|
||||
pub struct MountPoint {
|
||||
id: i32,
|
||||
pub real_path: String,
|
||||
pub name: String,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Insertable)]
|
||||
#[table_name="mount_points"]
|
||||
pub struct NewMountPoint {
|
||||
pub name: String,
|
||||
pub source: String,
|
||||
}
|
||||
|
||||
|
||||
// Misc Settings
|
||||
#[derive(Debug, Queryable)]
|
||||
pub struct MiscSettings {
|
||||
id: i32,
|
||||
pub auth_secret: String,
|
||||
pub index_sleep_duration_seconds: i32,
|
||||
pub index_album_art_pattern: String,
|
||||
}
|
||||
|
||||
|
||||
// DDNS Settings
|
||||
#[derive(Debug, Deserialize, Queryable)]
|
||||
pub struct DDNSConfig {
|
||||
id : i32,
|
||||
pub host: String,
|
||||
pub username: String,
|
||||
pub password: String,
|
||||
}
|
||||
|
||||
|
|
Binary file not shown.
30
src/ddns.rs
30
src/ddns.rs
|
@ -1,18 +1,18 @@
|
|||
use core::ops::Deref;
|
||||
use reqwest;
|
||||
use reqwest::header::{Authorization, Basic};
|
||||
use std::io;
|
||||
use std::sync::Arc;
|
||||
use std::thread;
|
||||
use std::time;
|
||||
|
||||
#[derive(Clone, Debug, Deserialize)]
|
||||
pub struct DDNSConfig {
|
||||
pub host: String,
|
||||
pub username: String,
|
||||
pub password: String,
|
||||
}
|
||||
use db::DB;
|
||||
use errors;
|
||||
|
||||
|
||||
#[derive(Debug)]
|
||||
enum DDNSError {
|
||||
InternalError(errors::Error),
|
||||
IoError(io::Error),
|
||||
ReqwestError(reqwest::Error),
|
||||
UpdateError(reqwest::StatusCode),
|
||||
|
@ -24,6 +24,12 @@ impl From<io::Error> for DDNSError {
|
|||
}
|
||||
}
|
||||
|
||||
impl From<errors::Error> for DDNSError {
|
||||
fn from(err: errors::Error) -> DDNSError {
|
||||
DDNSError::InternalError(err)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<reqwest::Error> for DDNSError {
|
||||
fn from(err: reqwest::Error) -> DDNSError {
|
||||
DDNSError::ReqwestError(err)
|
||||
|
@ -33,7 +39,13 @@ impl From<reqwest::Error> for DDNSError {
|
|||
const DDNS_UPDATE_URL: &'static str = "https://ydns.io/api/v1/update/";
|
||||
|
||||
|
||||
fn update_my_ip(config: &DDNSConfig) -> Result<(), DDNSError> {
|
||||
fn update_my_ip(db: &DB) -> Result<(), DDNSError> {
|
||||
let config = db.get_ddns_config()?;
|
||||
if config.host.len() == 0 || config.username.len() == 0 {
|
||||
println!("Skipping DDNS update because credentials are missing");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let full_url = format!("{}?host={}", DDNS_UPDATE_URL, &config.host);
|
||||
let auth_header = Authorization(Basic {
|
||||
username: config.username.clone(),
|
||||
|
@ -50,9 +62,9 @@ fn update_my_ip(config: &DDNSConfig) -> Result<(), DDNSError> {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
pub fn run(config: DDNSConfig) {
|
||||
pub fn run(db: &DB) {
|
||||
loop {
|
||||
match update_my_ip(&config) {
|
||||
match update_my_ip(db) {
|
||||
Err(e) => println!("Dynamic DNS Error: {:?}", e),
|
||||
Ok(_) => (),
|
||||
};
|
||||
|
|
47
src/main.rs
47
src/main.rs
|
@ -18,8 +18,10 @@ extern crate lewton;
|
|||
extern crate metaflac;
|
||||
extern crate mount;
|
||||
extern crate params;
|
||||
extern crate rand;
|
||||
extern crate reqwest;
|
||||
extern crate regex;
|
||||
extern crate ring;
|
||||
extern crate secure_session;
|
||||
extern crate serde;
|
||||
#[macro_use]
|
||||
|
@ -47,6 +49,7 @@ extern crate unix_daemonize;
|
|||
#[cfg(unix)]
|
||||
use unix_daemonize::{daemonize_redirect, ChdirMode};
|
||||
|
||||
use core::ops::Deref;
|
||||
use errors::*;
|
||||
use getopts::Options;
|
||||
use iron::prelude::*;
|
||||
|
@ -56,7 +59,6 @@ use std::path::Path;
|
|||
use std::sync::Arc;
|
||||
|
||||
mod api;
|
||||
mod collection;
|
||||
mod config;
|
||||
mod db;
|
||||
mod ddns;
|
||||
|
@ -104,29 +106,31 @@ fn run() -> Result<()> {
|
|||
options.optopt("w", "web", "set the path to web client files", "DIRECTORY");
|
||||
let matches = options.parse(&args[1..])?;
|
||||
|
||||
// Parse config
|
||||
let config_file_name = matches.opt_str("c");
|
||||
let config_file_path = config_file_name.map(|n| Path::new(n.as_str()).to_path_buf());
|
||||
let mut config = config::Config::parse(config_file_path)?;
|
||||
|
||||
// Init VFS
|
||||
let vfs = Arc::new(vfs::Vfs::new(config.vfs.clone()));
|
||||
|
||||
// Init DB
|
||||
println!("Starting up database");
|
||||
let db_file_name = matches.opt_str("d");
|
||||
let db_file_path = db_file_name.map(|n| Path::new(n.as_str()).to_path_buf());
|
||||
config.index.path = db_file_path.unwrap_or(config.index.path);
|
||||
let db = Arc::new(db::DB::new(vfs.clone(), &config.index)?);
|
||||
let db_file_name = matches.opt_str("d").unwrap_or("db.sqlite".to_owned());
|
||||
let db_file_path = Path::new(&db_file_name);
|
||||
let db = Arc::new(db::DB::new(&db_file_path)?);
|
||||
|
||||
// Parse config
|
||||
let config_file_name = matches.opt_str("c");
|
||||
let config_file_path = config_file_name.map(|p| Path::new(p.as_str()).to_path_buf());
|
||||
if let Some(path) = config_file_path {
|
||||
let config = config::UserConfig::parse(&path)?;
|
||||
db.load_config(&config)?;
|
||||
}
|
||||
|
||||
// Begin indexing
|
||||
let db_ref = db.clone();
|
||||
std::thread::spawn(move || db_ref.get_index().update_loop());
|
||||
std::thread::spawn(move || {
|
||||
let db = db_ref.deref();
|
||||
db.get_index().update_loop(db);
|
||||
});
|
||||
|
||||
// Mount API
|
||||
println!("Mounting API");
|
||||
let mut mount = Mount::new();
|
||||
let mut collection = collection::Collection::new(vfs, db);
|
||||
collection.load_config(&config)?;
|
||||
let handler = api::get_handler(collection, &config.secret);
|
||||
let handler = api::get_handler(db.clone())?;
|
||||
mount.mount("/api/", handler);
|
||||
|
||||
// Mount static files
|
||||
|
@ -144,13 +148,8 @@ fn run() -> Result<()> {
|
|||
let mut server = Iron::new(mount).http(("0.0.0.0", 5050))?;
|
||||
|
||||
// Start DDNS updates
|
||||
match config.ddns {
|
||||
Some(ref ddns_config) => {
|
||||
let ddns_config = ddns_config.clone();
|
||||
std::thread::spawn(|| { ddns::run(ddns_config); });
|
||||
}
|
||||
None => (),
|
||||
};
|
||||
let db_ref = db.clone();
|
||||
std::thread::spawn(move || { ddns::run(db_ref.deref()); });
|
||||
|
||||
// Run UI
|
||||
ui::run();
|
||||
|
|
|
@ -16,15 +16,6 @@ const APP_INFO: AppInfo = AppInfo {
|
|||
author: "permafrost",
|
||||
};
|
||||
|
||||
pub fn get_config_root() -> Result<PathBuf> {
|
||||
if let Ok(root) = app_root(AppDataType::UserConfig, &APP_INFO) {
|
||||
fs::create_dir_all(&root)
|
||||
.chain_err(|| format!("opening user config: {}", root.display()))?;
|
||||
return Ok(root);
|
||||
}
|
||||
bail!("Could not retrieve config directory root");
|
||||
}
|
||||
|
||||
pub fn get_data_root() -> Result<PathBuf> {
|
||||
if let Ok(root) = app_root(AppDataType::UserData, &APP_INFO) {
|
||||
fs::create_dir_all(&root)
|
||||
|
|
40
src/vfs.rs
40
src/vfs.rs
|
@ -4,24 +4,19 @@ use std::path::Path;
|
|||
|
||||
use errors::*;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct VfsConfig {
|
||||
pub mount_points: HashMap<String, PathBuf>,
|
||||
}
|
||||
|
||||
impl VfsConfig {
|
||||
pub fn new() -> VfsConfig {
|
||||
VfsConfig { mount_points: HashMap::new() }
|
||||
}
|
||||
}
|
||||
|
||||
pub struct Vfs {
|
||||
mount_points: HashMap<String, PathBuf>,
|
||||
}
|
||||
|
||||
impl Vfs {
|
||||
pub fn new(config: VfsConfig) -> Vfs {
|
||||
Vfs { mount_points: config.mount_points }
|
||||
pub fn new() -> Vfs {
|
||||
Vfs { mount_points: HashMap::new() }
|
||||
}
|
||||
|
||||
pub fn mount(&mut self, real_path: &Path, name: &str) -> Result<()> {
|
||||
self.mount_points.insert(name.to_owned(), real_path.to_path_buf());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn real_to_virtual(&self, real_path: &Path) -> Result<PathBuf> {
|
||||
|
@ -65,11 +60,8 @@ impl Vfs {
|
|||
|
||||
#[test]
|
||||
fn test_virtual_to_real() {
|
||||
let mut config = VfsConfig::new();
|
||||
config
|
||||
.mount_points
|
||||
.insert("root".to_owned(), Path::new("test_dir").to_path_buf());
|
||||
let vfs = Vfs::new(config);
|
||||
let mut vfs = Vfs::new();
|
||||
vfs.mount(Path::new("test_dir"), "root").unwrap();
|
||||
|
||||
let mut correct_path = PathBuf::new();
|
||||
correct_path.push("test_dir");
|
||||
|
@ -87,11 +79,8 @@ fn test_virtual_to_real() {
|
|||
|
||||
#[test]
|
||||
fn test_virtual_to_real_no_trail() {
|
||||
let mut config = VfsConfig::new();
|
||||
config
|
||||
.mount_points
|
||||
.insert("root".to_owned(), Path::new("test_dir").to_path_buf());
|
||||
let vfs = Vfs::new(config);
|
||||
let mut vfs = Vfs::new();
|
||||
vfs.mount(Path::new("test_dir"), "root").unwrap();
|
||||
let correct_path = Path::new("test_dir");
|
||||
let found_path = vfs.virtual_to_real(Path::new("root")).unwrap();
|
||||
assert!(found_path.to_str() == correct_path.to_str());
|
||||
|
@ -99,11 +88,8 @@ fn test_virtual_to_real_no_trail() {
|
|||
|
||||
#[test]
|
||||
fn test_real_to_virtual() {
|
||||
let mut config = VfsConfig::new();
|
||||
config
|
||||
.mount_points
|
||||
.insert("root".to_owned(), Path::new("test_dir").to_path_buf());
|
||||
let vfs = Vfs::new(config);
|
||||
let mut vfs = Vfs::new();
|
||||
vfs.mount(Path::new("test_dir"), "root").unwrap();
|
||||
|
||||
let mut correct_path = PathBuf::new();
|
||||
correct_path.push("root");
|
||||
|
|
5
test/config.toml
Normal file
5
test/config.toml
Normal file
|
@ -0,0 +1,5 @@
|
|||
album_art_pattern = '^Folder\.(png|jpg|jpeg)$'
|
||||
|
||||
[[mount_dirs]]
|
||||
name = 'root'
|
||||
source = 'test/collection'
|
Loading…
Reference in a new issue