mirror of
https://github.com/launchbadge/sqlx
synced 2024-11-10 06:24:16 +00:00
fix(prepare): store temporary query files inside the workspace
This commit is contained in:
parent
70934d7cd2
commit
cf3ce13d27
4 changed files with 39 additions and 4 deletions
8
FAQ.md
8
FAQ.md
|
@ -200,11 +200,17 @@ as an ergonomic choice it does _not_ block committing if `cargo sqlx prepare` fa
|
||||||
|
|
||||||
We're working on a way for the macros to save their data to the filesystem automatically which should be part of SQLx 0.7,
|
We're working on a way for the macros to save their data to the filesystem automatically which should be part of SQLx 0.7,
|
||||||
so your pre-commit hook would then just need to stage the changed files. This can be enabled by creating a directory
|
so your pre-commit hook would then just need to stage the changed files. This can be enabled by creating a directory
|
||||||
and setting the `SQLX_OFFLINE_DIR` environment variable to it before compiling, e.g.
|
and setting the `SQLX_OFFLINE_DIR` environment variable to it before compiling.
|
||||||
|
Additionally, if you're not using Cargo or have a nonstandard setup, you may want to set the `SQLX_TMP`
|
||||||
|
variable in order to store temporary query files somewhere that isn't picked up by git.
|
||||||
|
These files should get cleaned up automatically, but they may not if there's a failure. For example:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
$ mkdir .sqlx
|
$ mkdir .sqlx
|
||||||
$ export SQLX_OFFLINE_DIR="./.sqlx"`
|
$ export SQLX_OFFLINE_DIR="./.sqlx"`
|
||||||
|
$ # Optional and only useful if using a nonstandard setup, ensures temp files won't get picked up by git on failure
|
||||||
|
$ mkdir ./my-custom-target/sqlx
|
||||||
|
$ export SQLX_TMP="./my-custom-target/sqlx-tmp"
|
||||||
$ cargo check
|
$ cargo check
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
|
@ -147,6 +147,14 @@ fn run_prepare_step(ctx: &PrepareCtx, cache_dir: &Path) -> anyhow::Result<()> {
|
||||||
"Failed to create query cache directory: {:?}",
|
"Failed to create query cache directory: {:?}",
|
||||||
cache_dir
|
cache_dir
|
||||||
))?;
|
))?;
|
||||||
|
|
||||||
|
// Create directory to hold temporary query files before they get persisted to SQLX_OFFLINE_DIR
|
||||||
|
let tmp_dir = ctx.metadata.target_directory().join("sqlx-tmp");
|
||||||
|
fs::create_dir_all(&tmp_dir).context(format!(
|
||||||
|
"Failed to create temporary query cache directory: {:?}",
|
||||||
|
cache_dir
|
||||||
|
))?;
|
||||||
|
|
||||||
// Only delete sqlx-*.json files to avoid accidentally deleting any user data.
|
// Only delete sqlx-*.json files to avoid accidentally deleting any user data.
|
||||||
for query_file in glob_query_files(cache_dir).context("Failed to read query cache files")? {
|
for query_file in glob_query_files(cache_dir).context("Failed to read query cache files")? {
|
||||||
fs::remove_file(&query_file)
|
fs::remove_file(&query_file)
|
||||||
|
@ -163,6 +171,7 @@ fn run_prepare_step(ctx: &PrepareCtx, cache_dir: &Path) -> anyhow::Result<()> {
|
||||||
check_command
|
check_command
|
||||||
.arg("check")
|
.arg("check")
|
||||||
.args(&ctx.cargo_args)
|
.args(&ctx.cargo_args)
|
||||||
|
.env("SQLX_TMP", tmp_dir)
|
||||||
.env("DATABASE_URL", &ctx.connect_opts.database_url)
|
.env("DATABASE_URL", &ctx.connect_opts.database_url)
|
||||||
.env("SQLX_OFFLINE", "false")
|
.env("SQLX_OFFLINE", "false")
|
||||||
.env("SQLX_OFFLINE_DIR", cache_dir);
|
.env("SQLX_OFFLINE_DIR", cache_dir);
|
||||||
|
|
|
@ -157,10 +157,17 @@ where
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(super) fn save_in(&self, dir: impl AsRef<Path>) -> crate::Result<()> {
|
pub(super) fn save_in(
|
||||||
|
&self,
|
||||||
|
dir: impl AsRef<Path>,
|
||||||
|
tmp_dir: impl AsRef<Path>,
|
||||||
|
) -> crate::Result<()> {
|
||||||
// Output to a temporary file first, then move it atomically to avoid clobbering
|
// Output to a temporary file first, then move it atomically to avoid clobbering
|
||||||
// other invocations trying to write to the same path.
|
// other invocations trying to write to the same path.
|
||||||
let mut tmp_file = tempfile::NamedTempFile::new()
|
|
||||||
|
// Use a temp directory inside the workspace to avoid potential issues
|
||||||
|
// with persisting the file across filesystems.
|
||||||
|
let mut tmp_file = tempfile::NamedTempFile::new_in(tmp_dir)
|
||||||
.map_err(|err| format!("failed to create query file: {:?}", err))?;
|
.map_err(|err| format!("failed to create query file: {:?}", err))?;
|
||||||
serde_json::to_writer_pretty(tmp_file.as_file_mut(), self)
|
serde_json::to_writer_pretty(tmp_file.as_file_mut(), self)
|
||||||
.map_err(|err| format!("failed to serialize query data to file: {:?}", err))?;
|
.map_err(|err| format!("failed to serialize query data to file: {:?}", err))?;
|
||||||
|
|
|
@ -357,6 +357,19 @@ where
|
||||||
if let Ok(dir) = env("SQLX_OFFLINE_DIR") {
|
if let Ok(dir) = env("SQLX_OFFLINE_DIR") {
|
||||||
let path = PathBuf::from(&dir);
|
let path = PathBuf::from(&dir);
|
||||||
|
|
||||||
|
// Prefer SQLX_TMP if set explicitly.
|
||||||
|
// Otherwise fallback to CARGO_TARGET_DIR and then the standard target directory.
|
||||||
|
let tmp_dir = if let Ok(tmp_dir) = env("SQLX_TMP") {
|
||||||
|
PathBuf::from(tmp_dir)
|
||||||
|
} else if let Ok(target_dir) = env("CARGO_TARGET_DIR") {
|
||||||
|
PathBuf::from(target_dir)
|
||||||
|
} else {
|
||||||
|
let tmp_target = PathBuf::from("./target/sqlx");
|
||||||
|
fs::create_dir_all(&tmp_target)
|
||||||
|
.map_err(|e| format!("Error creating cache directory: {e:?}"))?;
|
||||||
|
tmp_target
|
||||||
|
};
|
||||||
|
|
||||||
match fs::metadata(&path) {
|
match fs::metadata(&path) {
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
if e.kind() != io::ErrorKind::NotFound {
|
if e.kind() != io::ErrorKind::NotFound {
|
||||||
|
@ -376,7 +389,7 @@ where
|
||||||
}
|
}
|
||||||
|
|
||||||
// .sqlx exists and is a directory, store data.
|
// .sqlx exists and is a directory, store data.
|
||||||
data.save_in(path)?;
|
data.save_in(path, tmp_dir)?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue