mirror of
https://github.com/launchbadge/sqlx
synced 2024-11-10 06:24:16 +00:00
implement cargo sqlx prepare
also organize code more clearly in `cargo-sqlx`
This commit is contained in:
parent
6913695588
commit
21041ff55e
13 changed files with 505 additions and 324 deletions
16
Cargo.lock
generated
16
Cargo.lock
generated
|
@ -309,6 +309,9 @@ dependencies = [
|
|||
"dialoguer",
|
||||
"dotenv",
|
||||
"futures 0.3.4",
|
||||
"glob",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"sqlx",
|
||||
"structopt",
|
||||
"tokio 0.2.13",
|
||||
|
@ -1657,18 +1660,18 @@ checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3"
|
|||
|
||||
[[package]]
|
||||
name = "serde"
|
||||
version = "1.0.105"
|
||||
version = "1.0.110"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e707fbbf255b8fc8c3b99abb91e7257a622caeb20a9818cbadbeeede4e0932ff"
|
||||
checksum = "99e7b308464d16b56eba9964e4972a3eee817760ab60d88c3f86e1fecb08204c"
|
||||
dependencies = [
|
||||
"serde_derive",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_derive"
|
||||
version = "1.0.105"
|
||||
version = "1.0.110"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ac5d00fc561ba2724df6758a17de23df5914f20e41cb00f94d5b7ae42fffaff8"
|
||||
checksum = "818fbf6bfa9a42d3bfcaca148547aa00c7b915bec71d1757aa2d44ca68771984"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
|
@ -1677,10 +1680,11 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "serde_json"
|
||||
version = "1.0.50"
|
||||
version = "1.0.53"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "78a7a12c167809363ec3bd7329fc0a3369056996de43c4b37ef3cd54a6ce4867"
|
||||
checksum = "993948e75b189211a9b31a7528f950c6adc21f9720b6438ff80a7fa2f864cea2"
|
||||
dependencies = [
|
||||
"indexmap",
|
||||
"itoa",
|
||||
"ryu",
|
||||
"serde",
|
||||
|
|
|
@ -9,15 +9,21 @@ homepage = "https://github.com/launchbadge/sqlx"
|
|||
repository = "https://github.com/launchbadge/sqlx"
|
||||
keywords = ["database", "postgres", "database-management", "migration"]
|
||||
categories = ["database", "command-line-utilities"]
|
||||
default-run = "sqlx"
|
||||
|
||||
[[bin]]
|
||||
name = "sqlx"
|
||||
path = "src/main.rs"
|
||||
|
||||
# enables invocation as `cargo sqlx`; required for `prepare` subcommand
|
||||
[[bin]]
|
||||
name = "cargo-sqlx"
|
||||
path = "src/main.rs"
|
||||
|
||||
[dependencies]
|
||||
dotenv = "0.15"
|
||||
tokio = { version = "0.2", features = ["macros"] }
|
||||
sqlx = { version = "0.3", path = "..", default-features = false, features = [ "runtime-tokio" ] }
|
||||
sqlx = { version = "0.3", path = "..", default-features = false, features = [ "runtime-tokio", "offline" ] }
|
||||
futures = "0.3"
|
||||
structopt = "0.3"
|
||||
chrono = "0.4"
|
||||
|
@ -26,6 +32,9 @@ url = { version = "2.1.1", default-features = false }
|
|||
async-trait = "0.1.30"
|
||||
console = "0.10.0"
|
||||
dialoguer = "0.5.0"
|
||||
serde_json = { version = "1.0.53", features = ["preserve_order"] }
|
||||
serde = "1.0.110"
|
||||
glob = "0.3.0"
|
||||
|
||||
[features]
|
||||
default = [ "postgres", "sqlite", "mysql" ]
|
||||
|
|
|
@ -1,33 +0,0 @@
|
|||
use anyhow::Result;
|
||||
use async_trait::async_trait;
|
||||
|
||||
#[async_trait]
|
||||
pub trait MigrationTransaction {
|
||||
async fn commit(self: Box<Self>) -> Result<()>;
|
||||
async fn rollback(self: Box<Self>) -> Result<()>;
|
||||
async fn check_if_applied(&mut self, migration: &str) -> Result<bool>;
|
||||
async fn execute_migration(&mut self, migration_sql: &str) -> Result<()>;
|
||||
async fn save_applied_migration(&mut self, migration_name: &str) -> Result<()>;
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
pub trait DatabaseMigrator {
|
||||
// Misc info
|
||||
fn database_type(&self) -> String;
|
||||
fn get_database_name(&self) -> Result<String>;
|
||||
|
||||
// Features
|
||||
fn can_migrate_database(&self) -> bool;
|
||||
fn can_create_database(&self) -> bool;
|
||||
fn can_drop_database(&self) -> bool;
|
||||
|
||||
// Database creation
|
||||
async fn check_if_database_exists(&self, db_name: &str) -> Result<bool>;
|
||||
async fn create_database(&self, db_name: &str) -> Result<()>;
|
||||
async fn drop_database(&self, db_name: &str) -> Result<()>;
|
||||
|
||||
// Migration
|
||||
async fn create_migration_table(&self) -> Result<()>;
|
||||
async fn get_migrations(&self) -> Result<Vec<String>>;
|
||||
async fn begin_migration(&self) -> Result<Box<dyn MigrationTransaction>>;
|
||||
}
|
57
cargo-sqlx/src/db.rs
Normal file
57
cargo-sqlx/src/db.rs
Normal file
|
@ -0,0 +1,57 @@
|
|||
use crate::migrator::DatabaseMigrator;
|
||||
use dialoguer::Confirmation;
|
||||
|
||||
use anyhow::bail;
|
||||
|
||||
pub async fn run_create() -> anyhow::Result<()> {
|
||||
let migrator = crate::migrator::get()?;
|
||||
|
||||
if !migrator.can_create_database() {
|
||||
bail!(
|
||||
"Database creation is not implemented for {}",
|
||||
migrator.database_type()
|
||||
);
|
||||
}
|
||||
|
||||
let db_name = migrator.get_database_name()?;
|
||||
let db_exists = migrator.check_if_database_exists(&db_name).await?;
|
||||
|
||||
if !db_exists {
|
||||
println!("Creating database: {}", db_name);
|
||||
Ok(migrator.create_database(&db_name).await?)
|
||||
} else {
|
||||
println!("Database already exists, aborting");
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn run_drop() -> anyhow::Result<()> {
|
||||
let migrator = crate::migrator::get()?;
|
||||
|
||||
if !migrator.can_drop_database() {
|
||||
bail!(
|
||||
"Database drop is not implemented for {}",
|
||||
migrator.database_type()
|
||||
);
|
||||
}
|
||||
|
||||
let db_name = migrator.get_database_name()?;
|
||||
let db_exists = migrator.check_if_database_exists(&db_name).await?;
|
||||
|
||||
if db_exists {
|
||||
if !Confirmation::new()
|
||||
.with_text("\nAre you sure you want to drop the database: {}?")
|
||||
.default(false)
|
||||
.interact()?
|
||||
{
|
||||
println!("Aborting");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
println!("Dropping database: {}", db_name);
|
||||
Ok(migrator.drop_database(&db_name).await?)
|
||||
} else {
|
||||
println!("Database does not exists, aborting");
|
||||
Ok(())
|
||||
}
|
||||
}
|
|
@ -8,21 +8,17 @@ use dotenv::dotenv;
|
|||
|
||||
use structopt::StructOpt;
|
||||
|
||||
use anyhow::{anyhow, Context, Result};
|
||||
use anyhow::{anyhow, Context};
|
||||
use console::style;
|
||||
use dialoguer::Confirmation;
|
||||
|
||||
mod database_migrator;
|
||||
mod mysql;
|
||||
mod postgres;
|
||||
mod sqlite;
|
||||
mod migrator;
|
||||
|
||||
use database_migrator::DatabaseMigrator;
|
||||
use mysql::MySql;
|
||||
use postgres::Postgres;
|
||||
use sqlite::Sqlite;
|
||||
mod db;
|
||||
mod migration;
|
||||
mod prepare;
|
||||
|
||||
const MIGRATION_FOLDER: &'static str = "migrations";
|
||||
use migrator::DatabaseMigrator;
|
||||
|
||||
/// Sqlx commandline tool
|
||||
#[derive(StructOpt, Debug)]
|
||||
|
@ -33,6 +29,26 @@ enum Opt {
|
|||
|
||||
#[structopt(alias = "db")]
|
||||
Database(DatabaseCommand),
|
||||
|
||||
/// Enables offline mode for a project utilizing `query!()` and related macros.
|
||||
/// May only be run as `cargo sqlx prepare`.
|
||||
///
|
||||
/// Saves data for all invocations of `query!()` and friends in the project so that it may be
|
||||
/// built in offline mode, i.e. so compilation does not require connecting to a running database.
|
||||
/// Outputs to `sqlx-data.json` in the current directory.
|
||||
///
|
||||
/// Offline mode can be activated simply by removing `DATABASE_URL` from the environment or
|
||||
/// building without a `.env` file.
|
||||
#[structopt(alias = "prep")]
|
||||
Prepare {
|
||||
/// If this flag is passed, instead of overwriting `sqlx-data.json` in the current directory,
|
||||
/// that file is loaded and compared against the current output of the prepare step; if
|
||||
/// there is a mismatch, an error is reported and the process exits with a nonzero exit code.
|
||||
///
|
||||
/// Intended for use in CI.
|
||||
#[structopt(long)]
|
||||
check: bool,
|
||||
},
|
||||
}
|
||||
|
||||
/// Adds and runs migrations. Alias: mig
|
||||
|
@ -61,285 +77,25 @@ enum DatabaseCommand {
|
|||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<()> {
|
||||
async fn main() -> anyhow::Result<()> {
|
||||
dotenv().ok();
|
||||
|
||||
let db_url_raw = env::var("DATABASE_URL").context("Failed to find 'DATABASE_URL'")?;
|
||||
|
||||
let db_url = Url::parse(&db_url_raw)?;
|
||||
|
||||
// This code is taken from: https://github.com/launchbadge/sqlx/blob/master/sqlx-macros/src/lib.rs#L63
|
||||
match db_url.scheme() {
|
||||
#[cfg(feature = "sqlite")]
|
||||
"sqlite" => run_command(&Sqlite::new(db_url_raw )).await?,
|
||||
#[cfg(not(feature = "sqlite"))]
|
||||
"sqlite" => return Err(anyhow!("Not implemented. DATABASE_URL {} has the scheme of a SQLite database but the `sqlite` feature of sqlx was not enabled",
|
||||
db_url)),
|
||||
|
||||
#[cfg(feature = "postgres")]
|
||||
"postgresql" | "postgres" => run_command(&Postgres::new(db_url_raw)).await?,
|
||||
#[cfg(not(feature = "postgres"))]
|
||||
"postgresql" | "postgres" => Err(anyhow!("DATABASE_URL {} has the scheme of a Postgres database but the `postgres` feature of sqlx was not enabled",
|
||||
db_url)),
|
||||
|
||||
#[cfg(feature = "mysql")]
|
||||
"mysql" | "mariadb" => run_command(&MySql::new(db_url_raw)).await?,
|
||||
#[cfg(not(feature = "mysql"))]
|
||||
"mysql" | "mariadb" => return Err(anyhow!(
|
||||
"DATABASE_URL {} has the scheme of a MySQL/MariaDB database but the `mysql` feature of sqlx was not enabled",
|
||||
db_url
|
||||
)),
|
||||
|
||||
scheme => return Err(anyhow!("unexpected scheme {:?} in DATABASE_URL {}", scheme, db_url)),
|
||||
}
|
||||
|
||||
println!("All done!");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn run_command(migrator: &dyn DatabaseMigrator) -> Result<()> {
|
||||
let opt = Opt::from_args();
|
||||
|
||||
match opt {
|
||||
Opt::Migrate(command) => match command {
|
||||
MigrationCommand::Add { name } => add_migration_file(&name)?,
|
||||
MigrationCommand::Run => run_migrations(migrator).await?,
|
||||
MigrationCommand::List => list_migrations(migrator).await?,
|
||||
MigrationCommand::Add { name } => migration::add_file(&name)?,
|
||||
MigrationCommand::Run => migration::run().await?,
|
||||
MigrationCommand::List => migration::list().await?,
|
||||
},
|
||||
Opt::Database(command) => match command {
|
||||
DatabaseCommand::Create => run_create_database(migrator).await?,
|
||||
DatabaseCommand::Drop => run_drop_database(migrator).await?,
|
||||
DatabaseCommand::Create => db::run_create().await?,
|
||||
DatabaseCommand::Drop => db::run_drop().await?,
|
||||
},
|
||||
Opt::Prepare { check: false } => prepare::run()?,
|
||||
Opt::Prepare { check: true } => prepare::check()?,
|
||||
};
|
||||
|
||||
println!("All done!");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn run_create_database(migrator: &dyn DatabaseMigrator) -> Result<()> {
|
||||
if !migrator.can_create_database() {
|
||||
return Err(anyhow!(
|
||||
"Database creation is not implemented for {}",
|
||||
migrator.database_type()
|
||||
));
|
||||
}
|
||||
|
||||
let db_name = migrator.get_database_name()?;
|
||||
let db_exists = migrator.check_if_database_exists(&db_name).await?;
|
||||
|
||||
if !db_exists {
|
||||
println!("Creating database: {}", db_name);
|
||||
Ok(migrator.create_database(&db_name).await?)
|
||||
} else {
|
||||
println!("Database already exists, aborting");
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
async fn run_drop_database(migrator: &dyn DatabaseMigrator) -> Result<()> {
|
||||
if !migrator.can_drop_database() {
|
||||
return Err(anyhow!(
|
||||
"Database drop is not implemented for {}",
|
||||
migrator.database_type()
|
||||
));
|
||||
}
|
||||
|
||||
let db_name = migrator.get_database_name()?;
|
||||
let db_exists = migrator.check_if_database_exists(&db_name).await?;
|
||||
|
||||
if db_exists {
|
||||
if !Confirmation::new()
|
||||
.with_text(&format!(
|
||||
"\nAre you sure you want to drop the database: {}?",
|
||||
db_name
|
||||
))
|
||||
.default(false)
|
||||
.interact()?
|
||||
{
|
||||
println!("Aborting");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
println!("Dropping database: {}", db_name);
|
||||
Ok(migrator.drop_database(&db_name).await?)
|
||||
} else {
|
||||
println!("Database does not exists, aborting");
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
fn add_migration_file(name: &str) -> Result<()> {
|
||||
use chrono::prelude::*;
|
||||
use std::path::PathBuf;
|
||||
|
||||
fs::create_dir_all(MIGRATION_FOLDER).context("Unable to create migrations directory")?;
|
||||
|
||||
let dt = Utc::now();
|
||||
let mut file_name = dt.format("%Y-%m-%d_%H-%M-%S").to_string();
|
||||
file_name.push_str("_");
|
||||
file_name.push_str(name);
|
||||
file_name.push_str(".sql");
|
||||
|
||||
let mut path = PathBuf::new();
|
||||
path.push(MIGRATION_FOLDER);
|
||||
path.push(&file_name);
|
||||
|
||||
let mut file = File::create(path).context("Failed to create file")?;
|
||||
file.write_all(b"-- Add migration script here")
|
||||
.context("Could not write to file")?;
|
||||
|
||||
println!("Created migration: '{}'", file_name);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub struct Migration {
|
||||
pub name: String,
|
||||
pub sql: String,
|
||||
}
|
||||
|
||||
fn load_migrations() -> Result<Vec<Migration>> {
|
||||
let entries = fs::read_dir(&MIGRATION_FOLDER).context("Could not find 'migrations' dir")?;
|
||||
|
||||
let mut migrations = Vec::new();
|
||||
|
||||
for e in entries {
|
||||
if let Ok(e) = e {
|
||||
if let Ok(meta) = e.metadata() {
|
||||
if !meta.is_file() {
|
||||
continue;
|
||||
}
|
||||
|
||||
if let Some(ext) = e.path().extension() {
|
||||
if ext != "sql" {
|
||||
println!("Wrong ext: {:?}", ext);
|
||||
continue;
|
||||
}
|
||||
} else {
|
||||
continue;
|
||||
}
|
||||
|
||||
let mut file = File::open(e.path())
|
||||
.with_context(|| format!("Failed to open: '{:?}'", e.file_name()))?;
|
||||
let mut contents = String::new();
|
||||
file.read_to_string(&mut contents)
|
||||
.with_context(|| format!("Failed to read: '{:?}'", e.file_name()))?;
|
||||
|
||||
migrations.push(Migration {
|
||||
name: e.file_name().to_str().unwrap().to_string(),
|
||||
sql: contents,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
migrations.sort_by(|a, b| a.name.partial_cmp(&b.name).unwrap());
|
||||
|
||||
Ok(migrations)
|
||||
}
|
||||
|
||||
async fn run_migrations(migrator: &dyn DatabaseMigrator) -> Result<()> {
|
||||
if !migrator.can_migrate_database() {
|
||||
return Err(anyhow!(
|
||||
"Database migrations not supported for {}",
|
||||
migrator.database_type()
|
||||
));
|
||||
}
|
||||
|
||||
migrator.create_migration_table().await?;
|
||||
|
||||
let migrations = load_migrations()?;
|
||||
|
||||
for mig in migrations.iter() {
|
||||
let mut tx = migrator.begin_migration().await?;
|
||||
|
||||
if tx.check_if_applied(&mig.name).await? {
|
||||
println!("Already applied migration: '{}'", mig.name);
|
||||
continue;
|
||||
}
|
||||
println!("Applying migration: '{}'", mig.name);
|
||||
|
||||
tx.execute_migration(&mig.sql)
|
||||
.await
|
||||
.with_context(|| format!("Failed to run migration {:?}", &mig.name))?;
|
||||
|
||||
tx.save_applied_migration(&mig.name)
|
||||
.await
|
||||
.context("Failed to insert migration")?;
|
||||
|
||||
tx.commit().await.context("Failed")?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn list_migrations(migrator: &dyn DatabaseMigrator) -> Result<()> {
|
||||
if !migrator.can_migrate_database() {
|
||||
return Err(anyhow!(
|
||||
"Database migrations not supported for {}",
|
||||
migrator.database_type()
|
||||
));
|
||||
}
|
||||
|
||||
let file_migrations = load_migrations()?;
|
||||
|
||||
if migrator
|
||||
.check_if_database_exists(&migrator.get_database_name()?)
|
||||
.await?
|
||||
{
|
||||
let applied_migrations = migrator.get_migrations().await.unwrap_or_else(|_| {
|
||||
println!("Could not retrive data from migration table");
|
||||
Vec::new()
|
||||
});
|
||||
|
||||
let mut width = 0;
|
||||
for mig in file_migrations.iter() {
|
||||
width = std::cmp::max(width, mig.name.len());
|
||||
}
|
||||
for mig in file_migrations.iter() {
|
||||
let status = if applied_migrations
|
||||
.iter()
|
||||
.find(|&m| mig.name == *m)
|
||||
.is_some()
|
||||
{
|
||||
style("Applied").green()
|
||||
} else {
|
||||
style("Not Applied").yellow()
|
||||
};
|
||||
|
||||
println!("{:width$}\t{}", mig.name, status, width = width);
|
||||
}
|
||||
|
||||
let orphans = check_for_orphans(file_migrations, applied_migrations);
|
||||
|
||||
if let Some(orphans) = orphans {
|
||||
println!("\nFound migrations applied in the database that does not have a corresponding migration file:");
|
||||
for name in orphans {
|
||||
println!("{:width$}\t{}", name, style("Orphan").red(), width = width);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
println!("No database found, listing migrations");
|
||||
|
||||
for mig in file_migrations {
|
||||
println!("{}", mig.name);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn check_for_orphans(
|
||||
file_migrations: Vec<Migration>,
|
||||
applied_migrations: Vec<String>,
|
||||
) -> Option<Vec<String>> {
|
||||
let orphans: Vec<String> = applied_migrations
|
||||
.iter()
|
||||
.filter(|m| !file_migrations.iter().any(|fm| fm.name == **m))
|
||||
.cloned()
|
||||
.collect();
|
||||
|
||||
if orphans.len() > 0 {
|
||||
Some(orphans)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
|
187
cargo-sqlx/src/migration.rs
Normal file
187
cargo-sqlx/src/migration.rs
Normal file
|
@ -0,0 +1,187 @@
|
|||
use anyhow::{bail, Context};
|
||||
use console::style;
|
||||
use std::fs::{self, File};
|
||||
use std::io::{Read, Write};
|
||||
|
||||
const MIGRATION_FOLDER: &'static str = "migrations";
|
||||
|
||||
pub struct Migration {
|
||||
pub name: String,
|
||||
pub sql: String,
|
||||
}
|
||||
|
||||
pub fn add_file(name: &str) -> anyhow::Result<()> {
|
||||
use chrono::prelude::*;
|
||||
use std::path::PathBuf;
|
||||
|
||||
fs::create_dir_all(MIGRATION_FOLDER).context("Unable to create migrations directory")?;
|
||||
|
||||
let dt = Utc::now();
|
||||
let mut file_name = dt.format("%Y-%m-%d_%H-%M-%S").to_string();
|
||||
file_name.push_str("_");
|
||||
file_name.push_str(name);
|
||||
file_name.push_str(".sql");
|
||||
|
||||
let mut path = PathBuf::new();
|
||||
path.push(MIGRATION_FOLDER);
|
||||
path.push(&file_name);
|
||||
|
||||
let mut file = File::create(path).context("Failed to create file")?;
|
||||
file.write_all(b"-- Add migration script here")
|
||||
.context("Could not write to file")?;
|
||||
|
||||
println!("Created migration: '{}'", file_name);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn run() -> anyhow::Result<()> {
|
||||
let migrator = crate::migrator::get()?;
|
||||
|
||||
if !migrator.can_migrate_database() {
|
||||
bail!(
|
||||
"Database migrations not supported for {}",
|
||||
migrator.database_type()
|
||||
);
|
||||
}
|
||||
|
||||
migrator.create_migration_table().await?;
|
||||
|
||||
let migrations = load_migrations()?;
|
||||
|
||||
for mig in migrations.iter() {
|
||||
let mut tx = migrator.begin_migration().await?;
|
||||
|
||||
if tx.check_if_applied(&mig.name).await? {
|
||||
println!("Already applied migration: '{}'", mig.name);
|
||||
continue;
|
||||
}
|
||||
println!("Applying migration: '{}'", mig.name);
|
||||
|
||||
tx.execute_migration(&mig.sql)
|
||||
.await
|
||||
.with_context(|| format!("Failed to run migration {:?}", &mig.name))?;
|
||||
|
||||
tx.save_applied_migration(&mig.name)
|
||||
.await
|
||||
.context("Failed to insert migration")?;
|
||||
|
||||
tx.commit().await.context("Failed")?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn list() -> anyhow::Result<()> {
|
||||
let migrator = crate::migrator::get()?;
|
||||
|
||||
if !migrator.can_migrate_database() {
|
||||
bail!(
|
||||
"Database migrations not supported for {}",
|
||||
migrator.database_type()
|
||||
);
|
||||
}
|
||||
|
||||
let file_migrations = load_migrations()?;
|
||||
|
||||
if migrator
|
||||
.check_if_database_exists(&migrator.get_database_name()?)
|
||||
.await?
|
||||
{
|
||||
let applied_migrations = migrator.get_migrations().await.unwrap_or_else(|_| {
|
||||
println!("Could not retrive data from migration table");
|
||||
Vec::new()
|
||||
});
|
||||
|
||||
let mut width = 0;
|
||||
for mig in file_migrations.iter() {
|
||||
width = std::cmp::max(width, mig.name.len());
|
||||
}
|
||||
for mig in file_migrations.iter() {
|
||||
let status = if applied_migrations
|
||||
.iter()
|
||||
.find(|&m| mig.name == *m)
|
||||
.is_some()
|
||||
{
|
||||
style("Applied").green()
|
||||
} else {
|
||||
style("Not Applied").yellow()
|
||||
};
|
||||
|
||||
println!("{:width$}\t{}", mig.name, status, width = width);
|
||||
}
|
||||
|
||||
let orphans = check_for_orphans(file_migrations, applied_migrations);
|
||||
|
||||
if let Some(orphans) = orphans {
|
||||
println!("\nFound migrations applied in the database that does not have a corresponding migration file:");
|
||||
for name in orphans {
|
||||
println!("{:width$}\t{}", name, style("Orphan").red(), width = width);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
println!("No database found, listing migrations");
|
||||
|
||||
for mig in file_migrations {
|
||||
println!("{}", mig.name);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn load_migrations() -> anyhow::Result<Vec<Migration>> {
|
||||
let entries = fs::read_dir(&MIGRATION_FOLDER).context("Could not find 'migrations' dir")?;
|
||||
|
||||
let mut migrations = Vec::new();
|
||||
|
||||
for e in entries {
|
||||
if let Ok(e) = e {
|
||||
if let Ok(meta) = e.metadata() {
|
||||
if !meta.is_file() {
|
||||
continue;
|
||||
}
|
||||
|
||||
if let Some(ext) = e.path().extension() {
|
||||
if ext != "sql" {
|
||||
println!("Wrong ext: {:?}", ext);
|
||||
continue;
|
||||
}
|
||||
} else {
|
||||
continue;
|
||||
}
|
||||
|
||||
let mut file = File::open(e.path())
|
||||
.with_context(|| format!("Failed to open: '{:?}'", e.file_name()))?;
|
||||
let mut contents = String::new();
|
||||
file.read_to_string(&mut contents)
|
||||
.with_context(|| format!("Failed to read: '{:?}'", e.file_name()))?;
|
||||
|
||||
migrations.push(Migration {
|
||||
name: e.file_name().to_str().unwrap().to_string(),
|
||||
sql: contents,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
migrations.sort_by(|a, b| a.name.partial_cmp(&b.name).unwrap());
|
||||
|
||||
Ok(migrations)
|
||||
}
|
||||
|
||||
fn check_for_orphans(
|
||||
file_migrations: Vec<Migration>,
|
||||
applied_migrations: Vec<String>,
|
||||
) -> Option<Vec<String>> {
|
||||
let orphans: Vec<String> = applied_migrations
|
||||
.iter()
|
||||
.filter(|m| !file_migrations.iter().any(|fm| fm.name == **m))
|
||||
.cloned()
|
||||
.collect();
|
||||
|
||||
if orphans.len() > 0 {
|
||||
Some(orphans)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
72
cargo-sqlx/src/migrator/mod.rs
Normal file
72
cargo-sqlx/src/migrator/mod.rs
Normal file
|
@ -0,0 +1,72 @@
|
|||
use anyhow::{bail, Context, Result};
|
||||
use async_trait::async_trait;
|
||||
use std::env;
|
||||
use url::Url;
|
||||
|
||||
#[cfg(feature = "postgres")]
|
||||
mod postgres;
|
||||
|
||||
#[cfg(feature = "sqlite")]
|
||||
mod sqlite;
|
||||
|
||||
#[async_trait]
|
||||
pub trait MigrationTransaction {
|
||||
async fn commit(self: Box<Self>) -> Result<()>;
|
||||
async fn rollback(self: Box<Self>) -> Result<()>;
|
||||
async fn check_if_applied(&mut self, migration: &str) -> Result<bool>;
|
||||
async fn execute_migration(&mut self, migration_sql: &str) -> Result<()>;
|
||||
async fn save_applied_migration(&mut self, migration_name: &str) -> Result<()>;
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
pub trait DatabaseMigrator {
|
||||
// Misc info
|
||||
fn database_type(&self) -> String;
|
||||
fn get_database_name(&self) -> Result<String>;
|
||||
|
||||
// Features
|
||||
fn can_migrate_database(&self) -> bool;
|
||||
fn can_create_database(&self) -> bool;
|
||||
fn can_drop_database(&self) -> bool;
|
||||
|
||||
// Database creation
|
||||
async fn check_if_database_exists(&self, db_name: &str) -> Result<bool>;
|
||||
async fn create_database(&self, db_name: &str) -> Result<()>;
|
||||
async fn drop_database(&self, db_name: &str) -> Result<()>;
|
||||
|
||||
// Migration
|
||||
async fn create_migration_table(&self) -> Result<()>;
|
||||
async fn get_migrations(&self) -> Result<Vec<String>>;
|
||||
async fn begin_migration(&self) -> Result<Box<dyn MigrationTransaction>>;
|
||||
}
|
||||
|
||||
pub fn get() -> Result<Box<dyn DatabaseMigrator>> {
|
||||
let db_url_raw = env::var("DATABASE_URL").context("Failed to find 'DATABASE_URL'")?;
|
||||
|
||||
let db_url = Url::parse(&db_url_raw)?;
|
||||
|
||||
// This code is taken from: https://github.com/launchbadge/sqlx/blob/master/sqlx-macros/src/lib.rs#L63
|
||||
match db_url.scheme() {
|
||||
#[cfg(feature = "sqlite")]
|
||||
"sqlite" => Ok(Box::new(self::sqlite::Sqlite::new(db_url_raw ))),
|
||||
#[cfg(not(feature = "sqlite"))]
|
||||
"sqlite" => bail!("Not implemented. DATABASE_URL {} has the scheme of a SQLite database but the `sqlite` feature of sqlx was not enabled",
|
||||
db_url),
|
||||
|
||||
#[cfg(feature = "postgres")]
|
||||
"postgresql" | "postgres" => Ok(Box::new(self::postgres::Postgres::new(db_url_raw))),
|
||||
#[cfg(not(feature = "postgres"))]
|
||||
"postgresql" | "postgres" => bail!("DATABASE_URL {} has the scheme of a Postgres database but the `postgres` feature of sqlx was not enabled",
|
||||
db_url),
|
||||
|
||||
#[cfg(feature = "mysql")]
|
||||
"mysql" | "mariadb" => bail!("Not implemented"),
|
||||
#[cfg(not(feature = "mysql"))]
|
||||
"mysql" | "mariadb" => bail!(
|
||||
"DATABASE_URL {} has the scheme of a MySQL/MariaDB database but the `mysql` feature of sqlx was not enabled",
|
||||
db_url
|
||||
),
|
||||
|
||||
scheme => bail!("unexpected scheme {:?} in DATABASE_URL {}", scheme, db_url),
|
||||
}
|
||||
}
|
|
@ -9,7 +9,7 @@ use sqlx::Row;
|
|||
use anyhow::{anyhow, Context, Result};
|
||||
use async_trait::async_trait;
|
||||
|
||||
use crate::database_migrator::{DatabaseMigrator, MigrationTransaction};
|
||||
use crate::migrator::{DatabaseMigrator, MigrationTransaction};
|
||||
|
||||
pub struct Postgres {
|
||||
pub db_url: String,
|
|
@ -9,7 +9,7 @@ use sqlx::SqliteConnection;
|
|||
use anyhow::{anyhow, Context, Result};
|
||||
use async_trait::async_trait;
|
||||
|
||||
use crate::database_migrator::{DatabaseMigrator, MigrationTransaction};
|
||||
use crate::migrator::{DatabaseMigrator, MigrationTransaction};
|
||||
|
||||
pub struct Sqlite {
|
||||
db_url: String,
|
119
cargo-sqlx/src/prepare.rs
Normal file
119
cargo-sqlx/src/prepare.rs
Normal file
|
@ -0,0 +1,119 @@
|
|||
use anyhow::{anyhow, bail, Context};
|
||||
use std::process::Command;
|
||||
use std::{env, fs};
|
||||
|
||||
use std::collections::BTreeMap;
|
||||
use std::fs::File;
|
||||
use std::path::Path;
|
||||
use url::Url;
|
||||
|
||||
type QueryData = BTreeMap<String, serde_json::Value>;
|
||||
type JsonObject = serde_json::Map<String, serde_json::Value>;
|
||||
|
||||
pub fn run() -> anyhow::Result<()> {
|
||||
#[derive(serde::Serialize)]
|
||||
struct DataFile {
|
||||
db: &'static str,
|
||||
#[serde(flatten)]
|
||||
data: QueryData,
|
||||
}
|
||||
|
||||
let db_kind = get_db_kind()?;
|
||||
let data = run_prepare_step()?;
|
||||
|
||||
serde_json::to_writer_pretty(
|
||||
File::create("sqlx-data.json")?,
|
||||
&DataFile { db: db_kind, data },
|
||||
)
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
pub fn check() -> anyhow::Result<()> {
|
||||
let db_kind = get_db_kind()?;
|
||||
let data = run_prepare_step()?;
|
||||
|
||||
let data_file = fs::read("sqlx-data.json").context(
|
||||
"failed to open `sqlx-data.json`; you may need to run `cargo sqlx prepare` first",
|
||||
)?;
|
||||
|
||||
let mut saved_data: QueryData = serde_json::from_slice(&data_file)?;
|
||||
|
||||
let expected_db = saved_data
|
||||
.remove("db")
|
||||
.context("expected key `db` in data file")?;
|
||||
|
||||
let expected_db = expected_db
|
||||
.as_str()
|
||||
.context("expected key `db` to be a string")?;
|
||||
|
||||
if db_kind != expected_db {
|
||||
bail!(
|
||||
"saved prepare data is for {}, not {} (inferred from `DATABASE_URL`)",
|
||||
expected_db,
|
||||
db_kind
|
||||
)
|
||||
}
|
||||
|
||||
if data != saved_data {
|
||||
bail!("`cargo sqlx prepare` needs to be rerun")
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn run_prepare_step() -> anyhow::Result<QueryData> {
|
||||
// path to the Cargo executable
|
||||
let cargo = env::var("CARGO")
|
||||
.context("`prepare` subcommand may only be invoked as `cargo sqlx prepare``")?;
|
||||
|
||||
if !Command::new(cargo).arg("check").status()?.success() {
|
||||
bail!("`cargo check` failed");
|
||||
}
|
||||
|
||||
let save_dir = env::var("CARGO_TARGET_DIR").unwrap_or_else(|_| "target/sqlx".into());
|
||||
let pattern = Path::new(&save_dir).join("/query-*.json");
|
||||
|
||||
let mut data = BTreeMap::new();
|
||||
|
||||
for path in glob::glob(
|
||||
pattern
|
||||
.to_str()
|
||||
.context("CARGO_TARGET_DIR not valid UTF-8")?,
|
||||
)? {
|
||||
let path = path?;
|
||||
let contents = fs::read(&*path)?;
|
||||
let mut query_data: JsonObject = serde_json::from_slice(&contents)?;
|
||||
|
||||
// we lift the `hash` key to the outer map
|
||||
let hash = query_data
|
||||
.remove("hash")
|
||||
.context("expected key `hash` in query data")?;
|
||||
|
||||
if let serde_json::Value::String(hash) = hash {
|
||||
data.insert(hash, serde_json::Value::Object(query_data));
|
||||
} else {
|
||||
bail!(
|
||||
"expected key `hash` in query data to be string, was {:?} instead; file: {}",
|
||||
hash,
|
||||
path.display()
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
Ok(data)
|
||||
}
|
||||
|
||||
fn get_db_kind() -> anyhow::Result<&'static str> {
|
||||
let db_url = dotenv::var("DATABASE_URL")
|
||||
.map_err(|_| anyhow!("`DATABASE_URL` must be set to use the `prepare` subcommand"))?;
|
||||
|
||||
let db_url = Url::parse(&db_url)?;
|
||||
|
||||
// these should match the values of `DatabaseExt::NAME` in `sqlx-macros`
|
||||
match db_url.scheme() {
|
||||
"postgres" | "postgresql" => Ok("PostgreSQL"),
|
||||
"mysql" | "mariadb" => Ok("MySQL/MariaDB"),
|
||||
"sqlite" => Ok("SQLite"),
|
||||
_ => bail!("unexpected scheme in database URL: {}", db_url.scheme()),
|
||||
}
|
||||
}
|
|
@ -47,7 +47,7 @@ heck = "0.3"
|
|||
proc-macro2 = { version = "1.0.9", default-features = false }
|
||||
sqlx-core = { version = "0.3.5", default-features = false, path = "../sqlx-core" }
|
||||
serde = { version = "1.0", optional = true }
|
||||
serde_json = { version = "1.0", features = [ "raw_value" ], optional = true }
|
||||
serde_json = { version = "1.0", features = [ "preserve_order" ], optional = true }
|
||||
sha2 = { version = "0.8.1", optional = true }
|
||||
syn = { version = "1.0.16", default-features = false, features = [ "full" ] }
|
||||
quote = { version = "1.0.2", default-features = false }
|
||||
|
|
|
@ -23,6 +23,8 @@ use syn::export::Span;
|
|||
pub struct QueryData<DB: Database> {
|
||||
pub(super) query: String,
|
||||
pub(super) describe: Describe<DB>,
|
||||
#[cfg(feature = "offline")]
|
||||
pub(super) hash: String,
|
||||
}
|
||||
|
||||
impl<DB: Database> QueryData<DB> {
|
||||
|
@ -33,6 +35,8 @@ impl<DB: Database> QueryData<DB> {
|
|||
Ok(QueryData {
|
||||
query: query.into(),
|
||||
describe: conn.describe(query).await?,
|
||||
#[cfg(feature = "offline")]
|
||||
hash: offline::hash_string(query),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -57,6 +61,8 @@ pub mod offline {
|
|||
pub db_name: String,
|
||||
pub query: String,
|
||||
pub describe: serde_json::Value,
|
||||
#[serde(skip)]
|
||||
pub hash: String,
|
||||
}
|
||||
|
||||
impl DynQueryData {
|
||||
|
@ -82,11 +88,14 @@ pub mod offline {
|
|||
{
|
||||
pub fn from_dyn_data(dyn_data: DynQueryData) -> crate::Result<Self> {
|
||||
assert!(!dyn_data.db_name.is_empty());
|
||||
assert!(!dyn_data.hash.is_empty());
|
||||
|
||||
if DB::NAME == dyn_data.db_name {
|
||||
let describe: Describe<DB> = serde_json::from_value(dyn_data.describe)?;
|
||||
Ok(QueryData {
|
||||
query: dyn_data.query,
|
||||
describe,
|
||||
hash: dyn_data.hash,
|
||||
})
|
||||
} else {
|
||||
Err(format!(
|
||||
|
@ -115,7 +124,7 @@ pub mod offline {
|
|||
}
|
||||
}
|
||||
|
||||
fn hash_string(query: &str) -> String {
|
||||
pub fn hash_string(query: &str) -> String {
|
||||
// picked `sha2` because it's already in the dependency tree for both MySQL and Postgres
|
||||
use sha2::{Digest, Sha256};
|
||||
|
||||
|
@ -156,6 +165,7 @@ pub mod offline {
|
|||
|
||||
return if query_data.query == self.query {
|
||||
query_data.db_name = db_name;
|
||||
query_data.hash = self.hash;
|
||||
Ok(query_data)
|
||||
} else {
|
||||
Err(serde::de::Error::custom(format_args!(
|
||||
|
|
|
@ -122,7 +122,7 @@ pub fn expand_from_file(input: QueryMacroInput, file: PathBuf) -> crate::Result<
|
|||
#[cfg(feature = "sqlite")]
|
||||
sqlx_core::sqlite::Sqlite::NAME => expand_with_data(
|
||||
input,
|
||||
QueryData::<sqlx::sqlite::Sqlite>::from_dyn_data(query_data)?,
|
||||
QueryData::<sqlx_core::sqlite::Sqlite>::from_dyn_data(query_data)?,
|
||||
),
|
||||
_ => Err(format!(
|
||||
"found query data for {} but the feature for that database was not enabled",
|
||||
|
|
Loading…
Reference in a new issue