mirror of
https://github.com/launchbadge/sqlx
synced 2024-11-10 06:24:16 +00:00
fix: ensure migration progress is not lost for PG, mysql and sqlite (#1991)
* fix: ensure migration progress is not lost for PG Fixes #1966. * fix: ensure migration progress is not lost for sqlite This is similar to #1966. * fix: ensure reverse migration progress is not lost for PG See #1966. * fix: ensure reverse migration progress is not lost for sqlite See #1966. * fix: ensure migration progress is not lost for mysql This is similar to #1966. * fix: ensure reverse migration progress is not lost for mysql See #1966. * test: check migration type as well * test: extend migrations testing * fix: work around MySQL implicit commits * refactor: simplify migration testing
This commit is contained in:
parent
ddffaa7dde
commit
5e56da87e0
35 changed files with 617 additions and 55 deletions
15
Cargo.toml
15
Cargo.toml
|
@ -212,6 +212,11 @@ name = "sqlite-test-attr"
|
|||
path = "tests/sqlite/test-attr.rs"
|
||||
required-features = ["sqlite", "macros", "migrate"]
|
||||
|
||||
[[test]]
|
||||
name = "sqlite-migrate"
|
||||
path = "tests/sqlite/migrate.rs"
|
||||
required-features = ["sqlite", "macros", "migrate"]
|
||||
|
||||
#
|
||||
# MySQL
|
||||
#
|
||||
|
@ -241,6 +246,11 @@ name = "mysql-test-attr"
|
|||
path = "tests/mysql/test-attr.rs"
|
||||
required-features = ["mysql", "macros", "migrate"]
|
||||
|
||||
[[test]]
|
||||
name = "mysql-migrate"
|
||||
path = "tests/mysql/migrate.rs"
|
||||
required-features = ["mysql", "macros", "migrate"]
|
||||
|
||||
#
|
||||
# PostgreSQL
|
||||
#
|
||||
|
@ -275,6 +285,11 @@ name = "postgres-test-attr"
|
|||
path = "tests/postgres/test-attr.rs"
|
||||
required-features = ["postgres", "macros", "migrate"]
|
||||
|
||||
[[test]]
|
||||
name = "postgres-migrate"
|
||||
path = "tests/postgres/migrate.rs"
|
||||
required-features = ["postgres", "macros", "migrate"]
|
||||
|
||||
#
|
||||
# Microsoft SQL Server (MSSQL)
|
||||
#
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/// Migration Type represents the type of migration
|
||||
#[derive(Debug, Copy, Clone)]
|
||||
#[derive(Debug, Copy, Clone, PartialEq)]
|
||||
pub enum MigrationType {
|
||||
/// Simple migration are single file migrations with no up / down queries
|
||||
Simple,
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
use crate::connection::ConnectOptions;
|
||||
use crate::connection::{ConnectOptions, Connection};
|
||||
use crate::error::Error;
|
||||
use crate::executor::Executor;
|
||||
use crate::migrate::MigrateError;
|
||||
|
@ -209,28 +209,66 @@ CREATE TABLE IF NOT EXISTS _sqlx_migrations (
|
|||
migration: &'m Migration,
|
||||
) -> BoxFuture<'m, Result<Duration, MigrateError>> {
|
||||
Box::pin(async move {
|
||||
// Use a single transaction for the actual migration script and the essential bookeeping so we never
|
||||
// execute migrations twice. See https://github.com/launchbadge/sqlx/issues/1966.
|
||||
// The `execution_time` however can only be measured for the whole transaction. This value _only_ exists for
|
||||
// data lineage and debugging reasons, so it is not super important if it is lost. So we initialize it to -1
|
||||
// and update it once the actual transaction completed.
|
||||
let mut tx = self.begin().await?;
|
||||
let start = Instant::now();
|
||||
|
||||
let res = self.execute(&*migration.sql).await;
|
||||
|
||||
let elapsed = start.elapsed();
|
||||
|
||||
// For MySQL we cannot really isolate migrations due to implicit commits caused by table modification, see
|
||||
// https://dev.mysql.com/doc/refman/8.0/en/implicit-commit.html
|
||||
//
|
||||
// To somewhat try to detect this, we first insert the migration into the migration table with
|
||||
// `success=FALSE` and later modify the flag.
|
||||
//
|
||||
// language=MySQL
|
||||
let _ = query(
|
||||
r#"
|
||||
INSERT INTO _sqlx_migrations ( version, description, success, checksum, execution_time )
|
||||
VALUES ( ?, ?, ?, ?, ? )
|
||||
VALUES ( ?, ?, FALSE, ?, -1 )
|
||||
"#,
|
||||
)
|
||||
.bind(migration.version)
|
||||
.bind(&*migration.description)
|
||||
.bind(res.is_ok())
|
||||
.bind(&*migration.checksum)
|
||||
.bind(elapsed.as_nanos() as i64)
|
||||
.execute(self)
|
||||
.execute(&mut tx)
|
||||
.await?;
|
||||
|
||||
res?;
|
||||
let _ = tx.execute(&*migration.sql).await?;
|
||||
|
||||
// language=MySQL
|
||||
let _ = query(
|
||||
r#"
|
||||
UPDATE _sqlx_migrations
|
||||
SET success = TRUE
|
||||
WHERE version = ?
|
||||
"#,
|
||||
)
|
||||
.bind(migration.version)
|
||||
.execute(&mut tx)
|
||||
.await?;
|
||||
|
||||
tx.commit().await?;
|
||||
|
||||
// Update `elapsed_time`.
|
||||
// NOTE: The process may disconnect/die at this point, so the elapsed time value might be lost. We accept
|
||||
// this small risk since this value is not super important.
|
||||
|
||||
let elapsed = start.elapsed();
|
||||
|
||||
let _ = query(
|
||||
r#"
|
||||
UPDATE _sqlx_migrations
|
||||
SET execution_time = ?
|
||||
WHERE version = ?
|
||||
"#,
|
||||
)
|
||||
.bind(elapsed.as_nanos() as i64)
|
||||
.bind(migration.version)
|
||||
.execute(self)
|
||||
.await?;
|
||||
|
||||
Ok(elapsed)
|
||||
})
|
||||
|
@ -241,18 +279,41 @@ CREATE TABLE IF NOT EXISTS _sqlx_migrations (
|
|||
migration: &'m Migration,
|
||||
) -> BoxFuture<'m, Result<Duration, MigrateError>> {
|
||||
Box::pin(async move {
|
||||
// Use a single transaction for the actual migration script and the essential bookeeping so we never
|
||||
// execute migrations twice. See https://github.com/launchbadge/sqlx/issues/1966.
|
||||
let mut tx = self.begin().await?;
|
||||
let start = Instant::now();
|
||||
|
||||
self.execute(&*migration.sql).await?;
|
||||
// For MySQL we cannot really isolate migrations due to implicit commits caused by table modification, see
|
||||
// https://dev.mysql.com/doc/refman/8.0/en/implicit-commit.html
|
||||
//
|
||||
// To somewhat try to detect this, we first insert the migration into the migration table with
|
||||
// `success=FALSE` and later remove the migration altogether.
|
||||
//
|
||||
// language=MySQL
|
||||
let _ = query(
|
||||
r#"
|
||||
UPDATE _sqlx_migrations
|
||||
SET success = FALSE
|
||||
WHERE version = ?
|
||||
"#,
|
||||
)
|
||||
.bind(migration.version)
|
||||
.execute(&mut tx)
|
||||
.await?;
|
||||
|
||||
let elapsed = start.elapsed();
|
||||
tx.execute(&*migration.sql).await?;
|
||||
|
||||
// language=SQL
|
||||
let _ = query(r#"DELETE FROM _sqlx_migrations WHERE version = ?"#)
|
||||
.bind(migration.version)
|
||||
.execute(self)
|
||||
.execute(&mut tx)
|
||||
.await?;
|
||||
|
||||
tx.commit().await?;
|
||||
|
||||
let elapsed = start.elapsed();
|
||||
|
||||
Ok(elapsed)
|
||||
})
|
||||
}
|
||||
|
|
|
@ -222,23 +222,44 @@ CREATE TABLE IF NOT EXISTS _sqlx_migrations (
|
|||
let mut tx = self.begin().await?;
|
||||
let start = Instant::now();
|
||||
|
||||
// Use a single transaction for the actual migration script and the essential bookeeping so we never
|
||||
// execute migrations twice. See https://github.com/launchbadge/sqlx/issues/1966.
|
||||
// The `execution_time` however can only be measured for the whole transaction. This value _only_ exists for
|
||||
// data lineage and debugging reasons, so it is not super important if it is lost. So we initialize it to -1
|
||||
// and update it once the actual transaction completed.
|
||||
let _ = tx.execute(&*migration.sql).await?;
|
||||
|
||||
// language=SQL
|
||||
let _ = query(
|
||||
r#"
|
||||
INSERT INTO _sqlx_migrations ( version, description, success, checksum, execution_time )
|
||||
VALUES ( $1, $2, TRUE, $3, -1 )
|
||||
"#,
|
||||
)
|
||||
.bind(migration.version)
|
||||
.bind(&*migration.description)
|
||||
.bind(&*migration.checksum)
|
||||
.execute(&mut tx)
|
||||
.await?;
|
||||
|
||||
tx.commit().await?;
|
||||
|
||||
// Update `elapsed_time`.
|
||||
// NOTE: The process may disconnect/die at this point, so the elapsed time value might be lost. We accept
|
||||
// this small risk since this value is not super important.
|
||||
|
||||
let elapsed = start.elapsed();
|
||||
|
||||
// language=SQL
|
||||
let _ = query(
|
||||
r#"
|
||||
INSERT INTO _sqlx_migrations ( version, description, success, checksum, execution_time )
|
||||
VALUES ( $1, $2, TRUE, $3, $4 )
|
||||
UPDATE _sqlx_migrations
|
||||
SET execution_time = $1
|
||||
WHERE version = $2
|
||||
"#,
|
||||
)
|
||||
.bind(migration.version)
|
||||
.bind(&*migration.description)
|
||||
.bind(&*migration.checksum)
|
||||
.bind(elapsed.as_nanos() as i64)
|
||||
.bind(migration.version)
|
||||
.execute(self)
|
||||
.await?;
|
||||
|
||||
|
@ -251,21 +272,23 @@ CREATE TABLE IF NOT EXISTS _sqlx_migrations (
|
|||
migration: &'m Migration,
|
||||
) -> BoxFuture<'m, Result<Duration, MigrateError>> {
|
||||
Box::pin(async move {
|
||||
// Use a single transaction for the actual migration script and the essential bookeeping so we never
|
||||
// execute migrations twice. See https://github.com/launchbadge/sqlx/issues/1966.
|
||||
let mut tx = self.begin().await?;
|
||||
let start = Instant::now();
|
||||
|
||||
let _ = tx.execute(&*migration.sql).await?;
|
||||
|
||||
tx.commit().await?;
|
||||
|
||||
let elapsed = start.elapsed();
|
||||
|
||||
// language=SQL
|
||||
let _ = query(r#"DELETE FROM _sqlx_migrations WHERE version = $1"#)
|
||||
.bind(migration.version)
|
||||
.execute(self)
|
||||
.execute(&mut tx)
|
||||
.await?;
|
||||
|
||||
tx.commit().await?;
|
||||
|
||||
let elapsed = start.elapsed();
|
||||
|
||||
Ok(elapsed)
|
||||
})
|
||||
}
|
||||
|
|
|
@ -173,23 +173,44 @@ CREATE TABLE IF NOT EXISTS _sqlx_migrations (
|
|||
let mut tx = self.begin().await?;
|
||||
let start = Instant::now();
|
||||
|
||||
// Use a single transaction for the actual migration script and the essential bookeeping so we never
|
||||
// execute migrations twice. See https://github.com/launchbadge/sqlx/issues/1966.
|
||||
// The `execution_time` however can only be measured for the whole transaction. This value _only_ exists for
|
||||
// data lineage and debugging reasons, so it is not super important if it is lost. So we initialize it to -1
|
||||
// and update it once the actual transaction completed.
|
||||
let _ = tx.execute(&*migration.sql).await?;
|
||||
|
||||
// language=SQL
|
||||
let _ = query(
|
||||
r#"
|
||||
INSERT INTO _sqlx_migrations ( version, description, success, checksum, execution_time )
|
||||
VALUES ( ?1, ?2, TRUE, ?3, -1 )
|
||||
"#,
|
||||
)
|
||||
.bind(migration.version)
|
||||
.bind(&*migration.description)
|
||||
.bind(&*migration.checksum)
|
||||
.execute(&mut tx)
|
||||
.await?;
|
||||
|
||||
tx.commit().await?;
|
||||
|
||||
// Update `elapsed_time`.
|
||||
// NOTE: The process may disconnect/die at this point, so the elapsed time value might be lost. We accept
|
||||
// this small risk since this value is not super important.
|
||||
|
||||
let elapsed = start.elapsed();
|
||||
|
||||
// language=SQL
|
||||
let _ = query(
|
||||
r#"
|
||||
INSERT INTO _sqlx_migrations ( version, description, success, checksum, execution_time )
|
||||
VALUES ( ?1, ?2, TRUE, ?3, ?4 )
|
||||
UPDATE _sqlx_migrations
|
||||
SET execution_time = ?1
|
||||
WHERE version = ?2
|
||||
"#,
|
||||
)
|
||||
.bind(migration.version)
|
||||
.bind(&*migration.description)
|
||||
.bind(&*migration.checksum)
|
||||
.bind(elapsed.as_nanos() as i64)
|
||||
.bind(migration.version)
|
||||
.execute(self)
|
||||
.await?;
|
||||
|
||||
|
@ -202,21 +223,23 @@ CREATE TABLE IF NOT EXISTS _sqlx_migrations (
|
|||
migration: &'m Migration,
|
||||
) -> BoxFuture<'m, Result<Duration, MigrateError>> {
|
||||
Box::pin(async move {
|
||||
// Use a single transaction for the actual migration script and the essential bookeeping so we never
|
||||
// execute migrations twice. See https://github.com/launchbadge/sqlx/issues/1966.
|
||||
let mut tx = self.begin().await?;
|
||||
let start = Instant::now();
|
||||
|
||||
let _ = tx.execute(&*migration.sql).await?;
|
||||
|
||||
tx.commit().await?;
|
||||
|
||||
let elapsed = start.elapsed();
|
||||
|
||||
// language=SQL
|
||||
let _ = query(r#"DELETE FROM _sqlx_migrations WHERE version = ?1"#)
|
||||
.bind(migration.version)
|
||||
.execute(self)
|
||||
.execute(&mut tx)
|
||||
.await?;
|
||||
|
||||
tx.commit().await?;
|
||||
|
||||
let elapsed = start.elapsed();
|
||||
|
||||
Ok(elapsed)
|
||||
})
|
||||
}
|
||||
|
|
|
@ -1,20 +1,29 @@
|
|||
use sqlx::migrate::Migrator;
|
||||
use std::path::Path;
|
||||
|
||||
static EMBEDDED: Migrator = sqlx::migrate!("tests/migrate/migrations");
|
||||
static EMBEDDED_SIMPLE: Migrator = sqlx::migrate!("tests/migrate/migrations_simple");
|
||||
static EMBEDDED_REVERSIBLE: Migrator = sqlx::migrate!("tests/migrate/migrations_reversible");
|
||||
|
||||
#[sqlx_macros::test]
|
||||
async fn same_output() -> anyhow::Result<()> {
|
||||
let runtime = Migrator::new(Path::new("tests/migrate/migrations")).await?;
|
||||
let runtime_simple = Migrator::new(Path::new("tests/migrate/migrations_simple")).await?;
|
||||
let runtime_reversible =
|
||||
Migrator::new(Path::new("tests/migrate/migrations_reversible")).await?;
|
||||
|
||||
assert_eq!(runtime.migrations.len(), EMBEDDED.migrations.len());
|
||||
|
||||
for (e, r) in EMBEDDED.iter().zip(runtime.iter()) {
|
||||
assert_eq!(e.version, r.version);
|
||||
assert_eq!(e.description, r.description);
|
||||
assert_eq!(e.sql, r.sql);
|
||||
assert_eq!(e.checksum, r.checksum);
|
||||
}
|
||||
assert_same(&EMBEDDED_SIMPLE, &runtime_simple);
|
||||
assert_same(&EMBEDDED_REVERSIBLE, &runtime_reversible);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn assert_same(embedded: &Migrator, runtime: &Migrator) {
|
||||
assert_eq!(runtime.migrations.len(), embedded.migrations.len());
|
||||
|
||||
for (e, r) in embedded.iter().zip(runtime.iter()) {
|
||||
assert_eq!(e.version, r.version);
|
||||
assert_eq!(e.description, r.description);
|
||||
assert_eq!(e.migration_type, r.migration_type);
|
||||
assert_eq!(e.sql, r.sql);
|
||||
assert_eq!(e.checksum, r.checksum);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,6 +0,0 @@
|
|||
CREATE TABLE tweet (
|
||||
id BIGINT NOT NULL PRIMARY KEY,
|
||||
text TEXT NOT NULL,
|
||||
is_sent BOOLEAN NOT NULL DEFAULT TRUE,
|
||||
owner_id BIGINT
|
||||
);
|
|
@ -1,5 +0,0 @@
|
|||
CREATE TABLE accounts (
|
||||
id INTEGER NOT NULL PRIMARY KEY,
|
||||
name TEXT NOT NULL,
|
||||
is_active BOOLEAN
|
||||
);
|
|
@ -0,0 +1 @@
|
|||
DROP TABLE migrations_reversible_test;
|
|
@ -0,0 +1,7 @@
|
|||
CREATE TABLE migrations_reversible_test (
|
||||
some_id BIGINT NOT NULL PRIMARY KEY,
|
||||
some_payload BIGINT NOT NUll
|
||||
);
|
||||
|
||||
INSERT INTO migrations_reversible_test (some_id, some_payload)
|
||||
VALUES (1, 100);
|
|
@ -0,0 +1,2 @@
|
|||
UPDATE migrations_reversible_test
|
||||
SET some_payload = some_payload - 1;
|
|
@ -0,0 +1,2 @@
|
|||
UPDATE migrations_reversible_test
|
||||
SET some_payload = some_payload + 1;
|
|
@ -0,0 +1,7 @@
|
|||
CREATE TABLE migrations_simple_test (
|
||||
some_id BIGINT NOT NULL PRIMARY KEY,
|
||||
some_payload BIGINT NOT NUll
|
||||
);
|
||||
|
||||
INSERT INTO migrations_simple_test (some_id, some_payload)
|
||||
VALUES (1, 100);
|
|
@ -0,0 +1,34 @@
|
|||
-- Perform a tricky conversion of the payload.
|
||||
--
|
||||
-- This script will only succeed once and will fail if executed twice.
|
||||
|
||||
-- set up temporary target column
|
||||
ALTER TABLE migrations_simple_test
|
||||
ADD some_payload_tmp TEXT;
|
||||
|
||||
-- perform conversion
|
||||
-- This will fail if `some_payload` is already a string column due to the addition.
|
||||
-- We add a suffix after the addition to ensure that the SQL database does not silently cast the string back to an
|
||||
-- integer.
|
||||
UPDATE migrations_simple_test
|
||||
SET some_payload_tmp = CONCAT(CAST((some_payload + 10) AS TEXT), '_suffix');
|
||||
|
||||
-- remove original column including the content
|
||||
ALTER TABLE migrations_simple_test
|
||||
DROP COLUMN some_payload;
|
||||
|
||||
-- prepare new payload column (nullable, so we can copy over the data)
|
||||
ALTER TABLE migrations_simple_test
|
||||
ADD some_payload TEXT;
|
||||
|
||||
-- copy new values
|
||||
UPDATE migrations_simple_test
|
||||
SET some_payload = some_payload_tmp;
|
||||
|
||||
-- "freeze" column
|
||||
ALTER TABLE migrations_simple_test
|
||||
ALTER COLUMN some_payload SET NOT NULL;
|
||||
|
||||
-- clean up
|
||||
ALTER TABLE migrations_simple_test
|
||||
DROP COLUMN some_payload_tmp;
|
78
tests/mysql/migrate.rs
Normal file
78
tests/mysql/migrate.rs
Normal file
|
@ -0,0 +1,78 @@
|
|||
use sqlx::migrate::Migrator;
|
||||
use sqlx::mysql::{MySql, MySqlConnection};
|
||||
use sqlx::pool::PoolConnection;
|
||||
use sqlx::Executor;
|
||||
use sqlx::Row;
|
||||
use std::path::Path;
|
||||
|
||||
#[sqlx::test(migrations = false)]
|
||||
async fn simple(mut conn: PoolConnection<MySql>) -> anyhow::Result<()> {
|
||||
clean_up(&mut conn).await?;
|
||||
|
||||
let migrator = Migrator::new(Path::new("tests/mysql/migrations_simple")).await?;
|
||||
|
||||
// run migration
|
||||
migrator.run(&mut conn).await?;
|
||||
|
||||
// check outcome
|
||||
let res: String = conn
|
||||
.fetch_one("SELECT some_payload FROM migrations_simple_test")
|
||||
.await?
|
||||
.get(0);
|
||||
assert_eq!(res, "110_suffix");
|
||||
|
||||
// running it a 2nd time should still work
|
||||
migrator.run(&mut conn).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[sqlx::test(migrations = false)]
|
||||
async fn reversible(mut conn: PoolConnection<MySql>) -> anyhow::Result<()> {
|
||||
clean_up(&mut conn).await?;
|
||||
|
||||
let migrator = Migrator::new(Path::new("tests/mysql/migrations_reversible")).await?;
|
||||
|
||||
// run migration
|
||||
migrator.run(&mut conn).await?;
|
||||
|
||||
// check outcome
|
||||
let res: i64 = conn
|
||||
.fetch_one("SELECT some_payload FROM migrations_reversible_test")
|
||||
.await?
|
||||
.get(0);
|
||||
assert_eq!(res, 101);
|
||||
|
||||
// roll back nothing (last version)
|
||||
migrator.undo(&mut conn, 20220721125033).await?;
|
||||
|
||||
// check outcome
|
||||
let res: i64 = conn
|
||||
.fetch_one("SELECT some_payload FROM migrations_reversible_test")
|
||||
.await?
|
||||
.get(0);
|
||||
assert_eq!(res, 101);
|
||||
|
||||
// roll back one version
|
||||
migrator.undo(&mut conn, 20220721124650).await?;
|
||||
|
||||
// check outcome
|
||||
let res: i64 = conn
|
||||
.fetch_one("SELECT some_payload FROM migrations_reversible_test")
|
||||
.await?
|
||||
.get(0);
|
||||
assert_eq!(res, 100);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Ensure that we have a clean initial state.
|
||||
async fn clean_up(conn: &mut MySqlConnection) -> anyhow::Result<()> {
|
||||
conn.execute("DROP TABLE migrations_simple_test").await.ok();
|
||||
conn.execute("DROP TABLE migrations_reversible_test")
|
||||
.await
|
||||
.ok();
|
||||
conn.execute("DROP TABLE _sqlx_migrations").await.ok();
|
||||
|
||||
Ok(())
|
||||
}
|
|
@ -0,0 +1 @@
|
|||
DROP TABLE migrations_reversible_test;
|
|
@ -0,0 +1,7 @@
|
|||
CREATE TABLE migrations_reversible_test (
|
||||
some_id BIGINT NOT NULL PRIMARY KEY,
|
||||
some_payload BIGINT NOT NUll
|
||||
);
|
||||
|
||||
INSERT INTO migrations_reversible_test (some_id, some_payload)
|
||||
VALUES (1, 100);
|
|
@ -0,0 +1,2 @@
|
|||
UPDATE migrations_reversible_test
|
||||
SET some_payload = some_payload - 1;
|
|
@ -0,0 +1,2 @@
|
|||
UPDATE migrations_reversible_test
|
||||
SET some_payload = some_payload + 1;
|
|
@ -0,0 +1,7 @@
|
|||
CREATE TABLE migrations_simple_test (
|
||||
some_id BIGINT NOT NULL PRIMARY KEY,
|
||||
some_payload BIGINT NOT NUll
|
||||
);
|
||||
|
||||
INSERT INTO migrations_simple_test (some_id, some_payload)
|
||||
VALUES (1, 100);
|
|
@ -0,0 +1,34 @@
|
|||
-- Perform a tricky conversion of the payload.
|
||||
--
|
||||
-- This script will only succeed once and will fail if executed twice.
|
||||
|
||||
-- set up temporary target column
|
||||
ALTER TABLE migrations_simple_test
|
||||
ADD some_payload_tmp TEXT;
|
||||
|
||||
-- perform conversion
|
||||
-- This will fail if `some_payload` is already a string column due to the addition.
|
||||
-- We add a suffix after the addition to ensure that the SQL database does not silently cast the string back to an
|
||||
-- integer.
|
||||
UPDATE migrations_simple_test
|
||||
SET some_payload_tmp = CONCAT(CAST((some_payload + 10) AS CHAR(3)), '_suffix');
|
||||
|
||||
-- remove original column including the content
|
||||
ALTER TABLE migrations_simple_test
|
||||
DROP COLUMN some_payload;
|
||||
|
||||
-- prepare new payload column (nullable, so we can copy over the data)
|
||||
ALTER TABLE migrations_simple_test
|
||||
ADD some_payload TEXT;
|
||||
|
||||
-- copy new values
|
||||
UPDATE migrations_simple_test
|
||||
SET some_payload = some_payload_tmp;
|
||||
|
||||
-- "freeze" column
|
||||
ALTER TABLE migrations_simple_test
|
||||
MODIFY some_payload TEXT NOT NULL;
|
||||
|
||||
-- clean up
|
||||
ALTER TABLE migrations_simple_test
|
||||
DROP COLUMN some_payload_tmp;
|
78
tests/postgres/migrate.rs
Normal file
78
tests/postgres/migrate.rs
Normal file
|
@ -0,0 +1,78 @@
|
|||
use sqlx::migrate::Migrator;
|
||||
use sqlx::pool::PoolConnection;
|
||||
use sqlx::postgres::{PgConnection, Postgres};
|
||||
use sqlx::Executor;
|
||||
use sqlx::Row;
|
||||
use std::path::Path;
|
||||
|
||||
#[sqlx::test(migrations = false)]
|
||||
async fn simple(mut conn: PoolConnection<Postgres>) -> anyhow::Result<()> {
|
||||
clean_up(&mut conn).await?;
|
||||
|
||||
let migrator = Migrator::new(Path::new("tests/postgres/migrations_simple")).await?;
|
||||
|
||||
// run migration
|
||||
migrator.run(&mut conn).await?;
|
||||
|
||||
// check outcome
|
||||
let res: String = conn
|
||||
.fetch_one("SELECT some_payload FROM migrations_simple_test")
|
||||
.await?
|
||||
.get(0);
|
||||
assert_eq!(res, "110_suffix");
|
||||
|
||||
// running it a 2nd time should still work
|
||||
migrator.run(&mut conn).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[sqlx::test(migrations = false)]
|
||||
async fn reversible(mut conn: PoolConnection<Postgres>) -> anyhow::Result<()> {
|
||||
clean_up(&mut conn).await?;
|
||||
|
||||
let migrator = Migrator::new(Path::new("tests/postgres/migrations_reversible")).await?;
|
||||
|
||||
// run migration
|
||||
migrator.run(&mut conn).await?;
|
||||
|
||||
// check outcome
|
||||
let res: i64 = conn
|
||||
.fetch_one("SELECT some_payload FROM migrations_reversible_test")
|
||||
.await?
|
||||
.get(0);
|
||||
assert_eq!(res, 101);
|
||||
|
||||
// roll back nothing (last version)
|
||||
migrator.undo(&mut conn, 20220721125033).await?;
|
||||
|
||||
// check outcome
|
||||
let res: i64 = conn
|
||||
.fetch_one("SELECT some_payload FROM migrations_reversible_test")
|
||||
.await?
|
||||
.get(0);
|
||||
assert_eq!(res, 101);
|
||||
|
||||
// roll back one version
|
||||
migrator.undo(&mut conn, 20220721124650).await?;
|
||||
|
||||
// check outcome
|
||||
let res: i64 = conn
|
||||
.fetch_one("SELECT some_payload FROM migrations_reversible_test")
|
||||
.await?
|
||||
.get(0);
|
||||
assert_eq!(res, 100);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Ensure that we have a clean initial state.
|
||||
async fn clean_up(conn: &mut PgConnection) -> anyhow::Result<()> {
|
||||
conn.execute("DROP TABLE migrations_simple_test").await.ok();
|
||||
conn.execute("DROP TABLE migrations_reversible_test")
|
||||
.await
|
||||
.ok();
|
||||
conn.execute("DROP TABLE _sqlx_migrations").await.ok();
|
||||
|
||||
Ok(())
|
||||
}
|
|
@ -0,0 +1 @@
|
|||
DROP TABLE migrations_reversible_test;
|
|
@ -0,0 +1,7 @@
|
|||
CREATE TABLE migrations_reversible_test (
|
||||
some_id BIGINT NOT NULL PRIMARY KEY,
|
||||
some_payload BIGINT NOT NUll
|
||||
);
|
||||
|
||||
INSERT INTO migrations_reversible_test (some_id, some_payload)
|
||||
VALUES (1, 100);
|
|
@ -0,0 +1,2 @@
|
|||
UPDATE migrations_reversible_test
|
||||
SET some_payload = some_payload - 1;
|
|
@ -0,0 +1,2 @@
|
|||
UPDATE migrations_reversible_test
|
||||
SET some_payload = some_payload + 1;
|
|
@ -0,0 +1,7 @@
|
|||
CREATE TABLE migrations_simple_test (
|
||||
some_id BIGINT NOT NULL PRIMARY KEY,
|
||||
some_payload BIGINT NOT NUll
|
||||
);
|
||||
|
||||
INSERT INTO migrations_simple_test (some_id, some_payload)
|
||||
VALUES (1, 100);
|
|
@ -0,0 +1,34 @@
|
|||
-- Perform a tricky conversion of the payload.
|
||||
--
|
||||
-- This script will only succeed once and will fail if executed twice.
|
||||
|
||||
-- set up temporary target column
|
||||
ALTER TABLE migrations_simple_test
|
||||
ADD some_payload_tmp TEXT;
|
||||
|
||||
-- perform conversion
|
||||
-- This will fail if `some_payload` is already a string column due to the addition.
|
||||
-- We add a suffix after the addition to ensure that the SQL database does not silently cast the string back to an
|
||||
-- integer.
|
||||
UPDATE migrations_simple_test
|
||||
SET some_payload_tmp = CONCAT(CAST((some_payload + 10) AS TEXT), '_suffix');
|
||||
|
||||
-- remove original column including the content
|
||||
ALTER TABLE migrations_simple_test
|
||||
DROP COLUMN some_payload;
|
||||
|
||||
-- prepare new payload column (nullable, so we can copy over the data)
|
||||
ALTER TABLE migrations_simple_test
|
||||
ADD some_payload TEXT;
|
||||
|
||||
-- copy new values
|
||||
UPDATE migrations_simple_test
|
||||
SET some_payload = some_payload_tmp;
|
||||
|
||||
-- "freeze" column
|
||||
ALTER TABLE migrations_simple_test
|
||||
ALTER COLUMN some_payload SET NOT NULL;
|
||||
|
||||
-- clean up
|
||||
ALTER TABLE migrations_simple_test
|
||||
DROP COLUMN some_payload_tmp;
|
78
tests/sqlite/migrate.rs
Normal file
78
tests/sqlite/migrate.rs
Normal file
|
@ -0,0 +1,78 @@
|
|||
use sqlx::migrate::Migrator;
|
||||
use sqlx::pool::PoolConnection;
|
||||
use sqlx::sqlite::{Sqlite, SqliteConnection};
|
||||
use sqlx::Executor;
|
||||
use sqlx::Row;
|
||||
use std::path::Path;
|
||||
|
||||
#[sqlx::test(migrations = false)]
|
||||
async fn simple(mut conn: PoolConnection<Sqlite>) -> anyhow::Result<()> {
|
||||
clean_up(&mut conn).await?;
|
||||
|
||||
let migrator = Migrator::new(Path::new("tests/sqlite/migrations_simple")).await?;
|
||||
|
||||
// run migration
|
||||
migrator.run(&mut conn).await?;
|
||||
|
||||
// check outcome
|
||||
let res: String = conn
|
||||
.fetch_one("SELECT some_payload FROM migrations_simple_test")
|
||||
.await?
|
||||
.get(0);
|
||||
assert_eq!(res, "110_suffix");
|
||||
|
||||
// running it a 2nd time should still work
|
||||
migrator.run(&mut conn).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[sqlx::test(migrations = false)]
|
||||
async fn reversible(mut conn: PoolConnection<Sqlite>) -> anyhow::Result<()> {
|
||||
clean_up(&mut conn).await?;
|
||||
|
||||
let migrator = Migrator::new(Path::new("tests/sqlite/migrations_reversible")).await?;
|
||||
|
||||
// run migration
|
||||
migrator.run(&mut conn).await?;
|
||||
|
||||
// check outcome
|
||||
let res: i64 = conn
|
||||
.fetch_one("SELECT some_payload FROM migrations_reversible_test")
|
||||
.await?
|
||||
.get(0);
|
||||
assert_eq!(res, 101);
|
||||
|
||||
// roll back nothing (last version)
|
||||
migrator.undo(&mut conn, 20220721125033).await?;
|
||||
|
||||
// check outcome
|
||||
let res: i64 = conn
|
||||
.fetch_one("SELECT some_payload FROM migrations_reversible_test")
|
||||
.await?
|
||||
.get(0);
|
||||
assert_eq!(res, 101);
|
||||
|
||||
// roll back one version
|
||||
migrator.undo(&mut conn, 20220721124650).await?;
|
||||
|
||||
// check outcome
|
||||
let res: i64 = conn
|
||||
.fetch_one("SELECT some_payload FROM migrations_reversible_test")
|
||||
.await?
|
||||
.get(0);
|
||||
assert_eq!(res, 100);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Ensure that we have a clean initial state.
|
||||
async fn clean_up(conn: &mut SqliteConnection) -> anyhow::Result<()> {
|
||||
conn.execute("DROP TABLE migrations_simple_test").await.ok();
|
||||
conn.execute("DROP TABLE migrations_reversible_test")
|
||||
.await
|
||||
.ok();
|
||||
conn.execute("DROP TABLE _sqlx_migrations").await.ok();
|
||||
|
||||
Ok(())
|
||||
}
|
|
@ -0,0 +1 @@
|
|||
DROP TABLE migrations_reversible_test;
|
|
@ -0,0 +1,7 @@
|
|||
CREATE TABLE migrations_reversible_test (
|
||||
some_id BIGINT NOT NULL PRIMARY KEY,
|
||||
some_payload BIGINT NOT NUll
|
||||
);
|
||||
|
||||
INSERT INTO migrations_reversible_test (some_id, some_payload)
|
||||
VALUES (1, 100);
|
|
@ -0,0 +1,2 @@
|
|||
UPDATE migrations_reversible_test
|
||||
SET some_payload = some_payload - 1;
|
|
@ -0,0 +1,2 @@
|
|||
UPDATE migrations_reversible_test
|
||||
SET some_payload = some_payload + 1;
|
|
@ -0,0 +1,7 @@
|
|||
CREATE TABLE migrations_simple_test (
|
||||
some_id BIGINT NOT NULL PRIMARY KEY,
|
||||
some_payload BIGINT NOT NUll
|
||||
);
|
||||
|
||||
INSERT INTO migrations_simple_test (some_id, some_payload)
|
||||
VALUES (1, 100);
|
|
@ -0,0 +1,30 @@
|
|||
-- Perform a tricky conversion of the payload.
|
||||
--
|
||||
-- This script will only succeed once and will fail if executed twice.
|
||||
|
||||
-- set up temporary target column
|
||||
ALTER TABLE migrations_simple_test
|
||||
ADD some_payload_tmp TEXT;
|
||||
|
||||
-- perform conversion
|
||||
-- This will fail if `some_payload` is already a string column due to the addition.
|
||||
-- We add a suffix after the addition to ensure that the SQL database does not silently cast the string back to an
|
||||
-- integer.
|
||||
UPDATE migrations_simple_test
|
||||
SET some_payload_tmp = CAST((some_payload + 10) AS TEXT) || '_suffix';
|
||||
|
||||
-- remove original column including the content
|
||||
ALTER TABLE migrations_simple_test
|
||||
DROP COLUMN some_payload;
|
||||
|
||||
-- prepare new payload column (nullable, so we can copy over the data)
|
||||
ALTER TABLE migrations_simple_test
|
||||
ADD some_payload TEXT;
|
||||
|
||||
-- copy new values
|
||||
UPDATE migrations_simple_test
|
||||
SET some_payload = some_payload_tmp;
|
||||
|
||||
-- clean up
|
||||
ALTER TABLE migrations_simple_test
|
||||
DROP COLUMN some_payload_tmp;
|
Loading…
Reference in a new issue