Break drivers out into separate crates, clean up some technical debt (#2039)

* WIP rt refactors

* refactor: break drivers out into separate crates

also cleans up significant technical debt
This commit is contained in:
Austin Bonander 2023-02-01 16:47:29 -08:00
parent f0d6924f92
commit b5312c3b6f
400 changed files with 7104 additions and 11058 deletions

View file

@ -33,8 +33,8 @@ jobs:
runs-on: ubuntu-20.04
strategy:
matrix:
runtime: [async-std, tokio, actix]
tls: [native-tls, rustls]
runtime: [async-std, tokio]
tls: [native-tls, rustls, none]
steps:
- uses: actions/checkout@v2
@ -54,22 +54,22 @@ jobs:
args: >
--manifest-path sqlx-core/Cargo.toml
--no-default-features
--features offline,all-databases,all-types,migrate,runtime-${{ matrix.runtime }}-${{ matrix.tls }}
--features json,offline,migrate,_rt-${{ matrix.runtime }},_tls-${{ matrix.tls }}
- uses: actions-rs/cargo@v1
with:
command: check
args: >
--no-default-features
--features offline,all-databases,all-types,migrate,runtime-${{ matrix.runtime }}-${{ matrix.tls }},macros
--features all-databases,_unstable-all-types,runtime-${{ matrix.runtime }},tls-${{ matrix.tls }},macros
test:
name: Unit Test
runs-on: ubuntu-20.04
strategy:
matrix:
runtime: [async-std, tokio, actix]
tls: [native-tls, rustls]
runtime: [async-std, tokio]
tls: [native-tls, rustls, none]
steps:
- uses: actions/checkout@v2
@ -88,7 +88,7 @@ jobs:
command: test
args: >
--manifest-path sqlx-core/Cargo.toml
--features offline,all-databases,all-types,runtime-${{ matrix.runtime }}-${{ matrix.tls }}
--features json,_rt-${{ matrix.runtime }},_tls-${{ matrix.tls }}
cli:
name: CLI Binaries
@ -139,8 +139,7 @@ jobs:
runs-on: ubuntu-20.04
strategy:
matrix:
runtime: [async-std, tokio, actix]
tls: [native-tls, rustls]
runtime: [async-std, tokio]
needs: check
steps:
- uses: actions/checkout@v2
@ -157,16 +156,18 @@ jobs:
with:
key: ${{ runner.os }}-sqlite-${{ matrix.runtime }}-${{ matrix.tls }}
- run: echo "using ${DATABASE_URL}"
- uses: actions-rs/cargo@v1
with:
command: test
args: >
--no-default-features
--features any,macros,migrate,sqlite,all-types,runtime-${{ matrix.runtime }}-${{ matrix.tls }}
--features any,macros,sqlite,_unstable-all-types,runtime-${{ matrix.runtime }}
--
--test-threads=1
env:
DATABASE_URL: sqlite://tests/sqlite/sqlite.db
DATABASE_URL: sqlite:tests/sqlite/sqlite.db
RUSTFLAGS: --cfg sqlite_ipaddr
LD_LIBRARY_PATH: /tmp/sqlite3-lib
@ -176,8 +177,8 @@ jobs:
strategy:
matrix:
postgres: [14, 10]
runtime: [async-std, tokio, actix]
tls: [native-tls, rustls]
runtime: [async-std, tokio]
tls: [native-tls, rustls, none]
needs: check
steps:
- uses: actions/checkout@v2
@ -200,7 +201,7 @@ jobs:
with:
command: build
args: >
--features postgres,all-types,runtime-${{ matrix.runtime }}-${{ matrix.tls }}
--features postgres,_unstable-all-types,runtime-${{ matrix.runtime }},tls-${{ matrix.tls }}
- run: |
docker-compose -f tests/docker-compose.yml run -d -p 5432:5432 --name postgres_${{ matrix.postgres }} postgres_${{ matrix.postgres }}
@ -211,7 +212,7 @@ jobs:
command: test
args: >
--no-default-features
--features any,postgres,macros,all-types,runtime-${{ matrix.runtime }}-${{ matrix.tls }}
--features any,postgres,macros,_unstable-all-types,runtime-${{ matrix.runtime }},tls-${{ matrix.tls }}
env:
DATABASE_URL: postgres://postgres:password@localhost:5432/sqlx
# FIXME: needed to disable `ltree` tests in Postgres 9.6
@ -219,11 +220,12 @@ jobs:
RUSTFLAGS: --cfg postgres_${{ matrix.postgres }}
- uses: actions-rs/cargo@v1
if: matrix.tls != 'none'
with:
command: test
args: >
--no-default-features
--features any,postgres,macros,migrate,all-types,runtime-${{ matrix.runtime }}-${{ matrix.tls }}
--features any,postgres,macros,_unstable-all-types,runtime-${{ matrix.runtime }},tls-${{ matrix.tls }}
env:
DATABASE_URL: postgres://postgres:password@localhost:5432/sqlx?sslmode=verify-ca&sslrootcert=.%2Ftests%2Fcerts%2Fca.crt
# FIXME: needed to disable `ltree` tests in Postgres 9.6
@ -236,8 +238,8 @@ jobs:
strategy:
matrix:
mysql: [8, 5_7]
runtime: [async-std, tokio, actix]
tls: [native-tls, rustls]
runtime: [async-std, tokio]
tls: [native-tls, rustls, none]
needs: check
steps:
- uses: actions/checkout@v2
@ -256,7 +258,7 @@ jobs:
with:
command: build
args: >
--features mysql,all-types,runtime-${{ matrix.runtime }}-${{ matrix.tls }}
--features mysql,_unstable-all-types,runtime-${{ matrix.runtime }},tls-${{ matrix.tls }}
- run: docker-compose -f tests/docker-compose.yml run -d -p 3306:3306 mysql_${{ matrix.mysql }}
- run: sleep 60
@ -266,7 +268,7 @@ jobs:
command: test
args: >
--no-default-features
--features any,mysql,macros,migrate,all-types,runtime-${{ matrix.runtime }}-${{ matrix.tls }}
--features any,mysql,macros,_unstable-all-types,runtime-${{ matrix.runtime }},tls-${{ matrix.tls }}
env:
DATABASE_URL: mysql://root:password@localhost:3306/sqlx?ssl-mode=disabled
@ -277,7 +279,7 @@ jobs:
command: test
args: >
--no-default-features
--features any,mysql,macros,migrate,all-types,runtime-${{ matrix.runtime }}-${{ matrix.tls }}
--features any,mysql,macros,_unstable-all-types,runtime-${{ matrix.runtime }},tls-${{ matrix.tls }}
env:
DATABASE_URL: mysql://root:password@localhost:3306/sqlx
@ -287,8 +289,8 @@ jobs:
strategy:
matrix:
mariadb: [10_6, 10_3]
runtime: [async-std, tokio, actix]
tls: [native-tls, rustls]
runtime: [async-std, tokio]
tls: [native-tls, rustls, none]
needs: check
steps:
- uses: actions/checkout@v2
@ -307,7 +309,7 @@ jobs:
with:
command: build
args: >
--features mysql,all-types,runtime-${{ matrix.runtime }}-${{ matrix.tls }}
--features mysql,_unstable-all-types,runtime-${{ matrix.runtime }},tls-${{ matrix.tls }}
- run: docker-compose -f tests/docker-compose.yml run -d -p 3306:3306 mariadb_${{ matrix.mariadb }}
- run: sleep 30
@ -317,46 +319,6 @@ jobs:
command: test
args: >
--no-default-features
--features any,mysql,macros,migrate,all-types,runtime-${{ matrix.runtime }}-${{ matrix.tls }}
--features any,mysql,macros,_unstable-all-types,runtime-${{ matrix.runtime }},tls-${{ matrix.tls }}
env:
DATABASE_URL: mysql://root:password@localhost:3306/sqlx
mssql:
name: MSSQL
runs-on: ubuntu-20.04
strategy:
matrix:
mssql: [2019, 2017]
runtime: [async-std, tokio, actix]
tls: [native-tls, rustls]
needs: check
steps:
- uses: actions/checkout@v2
- uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: stable
override: true
- uses: Swatinem/rust-cache@v1
with:
key: ${{ runner.os }}-mssql-${{ matrix.runtime }}-${{ matrix.tls }}-${{ hashFiles('**/Cargo.lock') }}
- uses: actions-rs/cargo@v1
with:
command: build
args: >
--features mssql,all-types,runtime-${{ matrix.runtime }}-${{ matrix.tls }}
- run: docker-compose -f tests/docker-compose.yml run -d -p 1433:1433 mssql_${{ matrix.mssql }}
- run: sleep 80 # MSSQL takes a "bit" to startup
- uses: actions-rs/cargo@v1
with:
command: test
args: >
--no-default-features
--features any,mssql,macros,migrate,all-types,runtime-${{ matrix.runtime }}-${{ matrix.tls }}
env:
DATABASE_URL: mssql://sa:Password123!@localhost/sqlx

1481
Cargo.lock generated

File diff suppressed because it is too large Load diff

View file

@ -2,11 +2,14 @@
members = [
".",
"sqlx-core",
"sqlx-rt",
"sqlx-macros",
"sqlx-macros-core",
"sqlx-test",
"sqlx-cli",
"sqlx-bench",
"sqlx-mysql",
"sqlx-postgres",
"sqlx-sqlite",
"examples/mysql/todos",
"examples/postgres/axum-social-with-tests",
"examples/postgres/files",
@ -18,15 +21,11 @@ members = [
"examples/sqlite/todos",
]
[package]
name = "sqlx"
[workspace.package]
version = "0.6.2"
license = "MIT OR Apache-2.0"
readme = "README.md"
repository = "https://github.com/launchbadge/sqlx"
documentation = "https://docs.rs/sqlx"
description = "🧰 The Rust SQL Toolkit. An async, pure Rust SQL crate featuring compile-time checked queries without a DSL. Supports PostgreSQL, MySQL, and SQLite."
edition = "2021"
repository = "https://github.com/launchbadge/sqlx"
keywords = ["database", "async", "postgres", "mysql", "sqlite"]
categories = ["database", "asynchronous"]
authors = [
@ -36,28 +35,31 @@ authors = [
"Daniel Akhterov <akhterovd@gmail.com>",
]
[package]
name = "sqlx"
readme = "README.md"
documentation = "https://docs.rs/sqlx"
description = "🧰 The Rust SQL Toolkit. An async, pure Rust SQL crate featuring compile-time checked queries without a DSL. Supports PostgreSQL, MySQL, and SQLite."
version.workspace = true
license.workspace = true
edition.workspace = true
authors.workspace = true
repository.workspace = true
[package.metadata.docs.rs]
features = ["all", "runtime-tokio-native-tls"]
rustdoc-args = ["--cfg", "docsrs"]
[features]
default = ["macros", "migrate"]
default = ["any", "macros", "migrate", "json"]
macros = ["sqlx-macros"]
migrate = ["sqlx-macros/migrate", "sqlx-core/migrate"]
# [deprecated] TLS is not possible to disable due to it being conditional on multiple features
# Hopefully Cargo can handle this in the future
tls = []
# offline building support in `sqlx-macros`
offline = ["sqlx-macros/offline", "sqlx-core/offline"]
migrate = ["sqlx-core/migrate", "sqlx-macros?/migrate", "sqlx-mysql?/migrate", "sqlx-postgres?/migrate", "sqlx-sqlite?/migrate"]
# intended mainly for CI and docs
all = ["tls", "all-databases", "all-types"]
all-databases = ["mysql", "sqlite", "postgres", "mssql", "any"]
all-types = [
all-databases = ["mysql", "sqlite", "postgres", "any"]
_unstable-all-types = [
"bigdecimal",
"decimal",
"rust_decimal",
"json",
"time",
"chrono",
@ -65,69 +67,89 @@ all-types = [
"mac_address",
"uuid",
"bit-vec",
"bstr",
"git2",
]
# previous runtimes, available as features for error messages better than just
# "feature doesn't exist"
runtime-actix = []
runtime-async-std = []
runtime-tokio = []
# Base runtime features without TLS
runtime-async-std = ["_rt-async-std", "sqlx-core/_rt-async-std", "sqlx-macros/_rt-async-std"]
runtime-tokio = ["_rt-tokio", "sqlx-core/_rt-tokio", "sqlx-macros/_rt-tokio"]
# actual runtimes
runtime-actix-native-tls = ["runtime-tokio-native-tls"]
runtime-async-std-native-tls = [
"sqlx-core/runtime-async-std-native-tls",
"sqlx-macros/runtime-async-std-native-tls",
"_rt-async-std",
]
runtime-tokio-native-tls = [
"sqlx-core/runtime-tokio-native-tls",
"sqlx-macros/runtime-tokio-native-tls",
"_rt-tokio",
]
# TLS features
tls-native-tls = ["sqlx-core/_tls-native-tls", "sqlx-macros/_tls-native-tls"]
tls-rustls = ["sqlx-core/_tls-rustls", "sqlx-macros/_tls-rustls"]
runtime-actix-rustls = ["runtime-tokio-rustls"]
runtime-async-std-rustls = [
"sqlx-core/runtime-async-std-rustls",
"sqlx-macros/runtime-async-std-rustls",
"_rt-async-std",
]
runtime-tokio-rustls = [
"sqlx-core/runtime-tokio-rustls",
"sqlx-macros/runtime-tokio-rustls",
"_rt-tokio",
]
# No-op feature used by the workflows to compile without TLS enabled. Not meant for general use.
tls-none = []
# Legacy Runtime + TLS features
runtime-async-std-native-tls = ["runtime-async-std", "tls-native-tls"]
runtime-async-std-rustls = ["runtime-async-std", "tls-rustls"]
runtime-tokio-native-tls = ["runtime-tokio", "tls-native-tls"]
runtime-tokio-rustls = ["runtime-tokio", "tls-rustls"]
# for conditional compilation
_rt-async-std = []
_rt-tokio = []
# database
any = ["sqlx-core/any"]
postgres = ["sqlx-core/postgres", "sqlx-macros/postgres"]
mysql = ["sqlx-core/mysql", "sqlx-macros/mysql"]
sqlite = ["sqlx-core/sqlite", "sqlx-macros/sqlite"]
mssql = ["sqlx-core/mssql", "sqlx-macros/mssql"]
any = ["sqlx-core/any", "sqlx-mysql?/any", "sqlx-postgres?/any", "sqlx-sqlite?/any"]
postgres = ["sqlx-postgres", "sqlx-macros?/postgres"]
mysql = ["sqlx-mysql", "sqlx-macros?/mysql"]
sqlite = ["sqlx-sqlite", "sqlx-macros?/sqlite"]
# types
bigdecimal = ["sqlx-core/bigdecimal", "sqlx-macros/bigdecimal"]
decimal = ["sqlx-core/decimal", "sqlx-macros/decimal"]
chrono = ["sqlx-core/chrono", "sqlx-macros/chrono"]
ipnetwork = ["sqlx-core/ipnetwork", "sqlx-macros/ipnetwork"]
mac_address = ["sqlx-core/mac_address", "sqlx-macros/mac_address"]
uuid = ["sqlx-core/uuid", "sqlx-macros/uuid"]
json = ["sqlx-core/json", "sqlx-macros/json"]
time = ["sqlx-core/time", "sqlx-macros/time"]
bit-vec = ["sqlx-core/bit-vec", "sqlx-macros/bit-vec"]
bstr = ["sqlx-core/bstr"]
git2 = ["sqlx-core/git2"]
json = ["sqlx-macros?/json", "sqlx-mysql?/json", "sqlx-postgres?/json", "sqlx-sqlite?/json"]
bigdecimal = ["sqlx-core/bigdecimal", "sqlx-macros?/bigdecimal", "sqlx-mysql?/bigdecimal", "sqlx-postgres?/bigdecimal"]
bit-vec = ["sqlx-core/bit-vec", "sqlx-macros?/bit-vec", "sqlx-postgres?/bit-vec"]
chrono = ["sqlx-core/chrono", "sqlx-macros?/chrono", "sqlx-mysql?/chrono", "sqlx-postgres?/chrono", "sqlx-sqlite?/chrono"]
ipnetwork = ["sqlx-core/ipnetwork", "sqlx-macros?/ipnetwork", "sqlx-postgres?/ipnetwork"]
mac_address = ["sqlx-core/mac_address", "sqlx-macros?/mac_address", "sqlx-postgres?/mac_address"]
rust_decimal = ["sqlx-core/rust_decimal", "sqlx-macros?/rust_decimal", "sqlx-mysql?/rust_decimal", "sqlx-postgres?/rust_decimal"]
time = ["sqlx-core/time", "sqlx-macros?/time", "sqlx-mysql?/time", "sqlx-postgres?/time", "sqlx-sqlite?/time"]
uuid = ["sqlx-core/uuid", "sqlx-macros?/uuid", "sqlx-mysql?/uuid", "sqlx-postgres?/uuid", "sqlx-sqlite?/uuid"]
[workspace.dependencies]
# Driver crates
sqlx-mysql = { version = "=0.6.2", path = "sqlx-mysql" }
sqlx-postgres = { version = "=0.6.2", path = "sqlx-postgres" }
sqlx-sqlite = { version = "=0.6.2", path = "sqlx-sqlite" }
# Facade crate (for reference from sqlx-cli)
sqlx = {version = "=0.6.2", path = "." }
# Common type integrations shared by multiple driver crates.
# These are optional unless enabled in a workspace crate.
bigdecimal = "0.3.0"
bit-vec = "0.6.3"
chrono = "0.4.22"
ipnetwork = "0.20.0"
mac_address = "1.1.3"
rust_decimal = "1.26.1"
time = { version = "0.3.14", features = ["formatting", "parsing", "macros"] }
uuid = "1.1.2"
# Common utility crates
dotenvy = { version = "0.15.0", default-features = false }
# Runtimes
[workspace.dependencies.async-std]
version = "1"
[workspace.dependencies.tokio]
version = "1"
features = ["time", "net", "sync", "fs", "io-util", "rt"]
default-features = false
[dependencies]
sqlx-core = { version = "0.6.2", path = "sqlx-core", default-features = false }
sqlx-core = { version = "0.6.2", path = "sqlx-core", features = ["offline", "migrate"], default-features = false }
sqlx-macros = { version = "0.6.2", path = "sqlx-macros", default-features = false, optional = true }
sqlx-mysql = { workspace = true, optional = true }
sqlx-postgres = { workspace = true, optional = true }
sqlx-sqlite = { workspace = true, optional = true }
[dev-dependencies]
anyhow = "1.0.52"
time_ = { version = "0.3.2", package = "time" }
@ -137,7 +159,6 @@ async-std = { version = "1.10.0", features = ["attributes"] }
tokio = { version = "1.15.0", features = ["full"] }
dotenvy = "0.15.0"
trybuild = "1.0.53"
sqlx-rt = { path = "./sqlx-rt" }
sqlx-test = { path = "./sqlx-test" }
paste = "1.0.6"
serde = { version = "1.0.132", features = ["derive"] }
@ -203,7 +224,7 @@ path = "tests/sqlite/derives.rs"
required-features = ["sqlite", "macros"]
[[test]]
name = "sqlcipher"
name = "sqlite-sqlcipher"
path = "tests/sqlite/sqlcipher.rs"
required-features = ["sqlite"]
@ -289,27 +310,3 @@ required-features = ["postgres", "macros", "migrate"]
name = "postgres-migrate"
path = "tests/postgres/migrate.rs"
required-features = ["postgres", "macros", "migrate"]
#
# Microsoft SQL Server (MSSQL)
#
[[test]]
name = "mssql"
path = "tests/mssql/mssql.rs"
required-features = ["mssql"]
[[test]]
name = "mssql-types"
path = "tests/mssql/types.rs"
required-features = ["mssql"]
[[test]]
name = "mssql-describe"
path = "tests/mssql/describe.rs"
required-features = ["mssql"]
[[test]]
name = "mssql-macros"
path = "tests/mssql/macros.rs"
required-features = ["mssql", "macros"]

View file

@ -123,28 +123,57 @@ SQLx is compatible with the [`async-std`], [`tokio`] and [`actix`] runtimes; and
```toml
# Cargo.toml
[dependencies]
# PICK ONE OF THE FOLLOWING:
# tokio (no TLS)
sqlx = { version = "0.6", features = [ "runtime-tokio" ] }
# tokio + native-tls
sqlx = { version = "0.6", features = [ "runtime-tokio", "tls-native" ] }
# tokio + rustls
sqlx = { version = "0.6", features = [ "runtime-tokio-rustls" ] }
sqlx = { version = "0.6", features = [ "runtime-tokio", "tls-rustls" ] }
# async-std (no TLS)
sqlx = { version = "0.6", features = [ "runtime-async-std" ] }
# async-std + native-tls
sqlx = { version = "0.6", features = [ "runtime-async-std-native-tls" ] }
sqlx = { version = "0.6", features = [ "runtime-async-std", "tls-native" ] }
# async-std + rustls
sqlx = { version = "0.6", features = [ "runtime-async-std", "tls-rustls" ] }
```
<small><small>The runtime and TLS backend not being separate feature sets to select is a workaround for a [Cargo issue](https://github.com/rust-lang/cargo/issues/3494).</small></small>
#### Cargo Feature Flags
For backwards-compatibility reasons, the runtime and TLS features can either be chosen together as a single feature,
or separately.
For forward-compatibility, you should use the separate runtime and TLS features as the combination features may
be removed in the future.
- `runtime-async-std`: Use the `async-std` runtime without enabling a TLS backend.
- `runtime-async-std-native-tls`: Use the `async-std` runtime and `native-tls` TLS backend.
- `runtime-async-std-rustls`: Use the `async-std` runtime and `rustls` TLS backend.
- `runtime-tokio`: Use the `tokio` runtime without enabling a TLS backend.
- `runtime-tokio-native-tls`: Use the `tokio` runtime and `native-tls` TLS backend.
- `runtime-tokio-rustls`: Use the `tokio` runtime and `rustls` TLS backend.
- `runtime-actix`: Use the `actix` runtime without enabling a TLS backend.
- `runtime-actix-native-tls`: Use the `actix` runtime and `native-tls` TLS backend.
- `runtime-actix-rustls`: Use the `actix` runtime and `rustls` TLS backend.
- Actix-web is fully compatible with Tokio and so a separate runtime feature is no longer needed.
The above three features exist only for backwards compatibility, and are in fact merely aliases to their
`runtime-tokio` counterparts.
- `tls-native`: Use the `native-tls` TLS backend (OpenSSL on *nix, SChannel on Windows, Secure Transport on macOS).
- `tls-rustls`: Use the `rustls` TLS backend (crossplatform backend, only supports TLS 1.2 and 1.3).
- `postgres`: Add support for the Postgres database server.
- `mysql`: Add support for the MySQL/MariaDB database server.
@ -177,8 +206,6 @@ sqlx = { version = "0.6", features = [ "runtime-async-std-native-tls" ] }
- `json`: Add support for `JSON` and `JSONB` (in postgres) using the `serde_json` crate.
- `tls`: Add support for TLS connections.
- `offline`: Enables building the macros in offline mode when a live database is not available (such as CI).
- Requires `sqlx-cli` installed to use. See [sqlx-cli/README.md][readme-offline].

View file

@ -7,6 +7,6 @@ edition = "2021"
[dependencies]
anyhow = "1.0"
sqlx = { path = "../../../", features = ["postgres", "offline", "runtime-tokio-native-tls"] }
sqlx = { path = "../../../", features = ["postgres", "runtime-tokio-native-tls"] }
tokio = { version = "1.20.0", features = ["macros"]}
dotenvy = "0.15.0"

View file

@ -5,6 +5,6 @@ edition = "2021"
workspace = "../../../"
[dependencies]
sqlx = { path = "../../../", features = [ "postgres", "tls" ] }
sqlx = { path = "../../../", features = [ "postgres" ] }
futures = "0.3.1"
tokio = { version = "1.20.0", features = ["macros"]}

View file

@ -7,7 +7,7 @@ workspace = "../../../"
[dependencies]
anyhow = "1.0"
futures = "0.3"
sqlx = { path = "../../../", features = ["postgres", "offline", "runtime-tokio-native-tls"] }
sqlx = { path = "../../../", features = ["postgres", "runtime-tokio-native-tls"] }
structopt = "0.3"
tokio = { version = "1.20.0", features = ["macros"]}
dotenvy = "0.15.0"

View file

@ -7,7 +7,7 @@ workspace = "../../../"
[dependencies]
anyhow = "1.0"
futures = "0.3"
sqlx = { path = "../../../", features = ["postgres", "offline", "runtime-tokio-native-tls"] }
sqlx = { path = "../../../", features = ["postgres", "runtime-tokio-native-tls"] }
structopt = "0.3"
tokio = { version = "1.20.0", features = ["macros"]}
dotenvy = "0.15.0"

View file

@ -5,6 +5,6 @@ edition = "2021"
workspace = "../../../"
[dependencies]
sqlx = { path = "../../../", features = [ "postgres", "tls", "runtime-tokio-native-tls" ] }
sqlx = { path = "../../../", features = [ "postgres", "runtime-tokio-native-tls" ] }
futures = "0.3.1"
tokio = { version = "1.20.0", features = ["macros"]}

View file

@ -9,21 +9,17 @@ publish = false
runtime-actix-native-tls = ["runtime-tokio-native-tls"]
runtime-async-std-native-tls = [
"sqlx/runtime-async-std-native-tls",
"sqlx-rt/runtime-async-std-native-tls",
]
runtime-tokio-native-tls = [
"sqlx/runtime-tokio-native-tls",
"sqlx-rt/runtime-tokio-native-tls",
]
runtime-actix-rustls = ["runtime-tokio-rustls"]
runtime-async-std-rustls = [
"sqlx/runtime-async-std-rustls",
"sqlx-rt/runtime-async-std-rustls",
]
runtime-tokio-rustls = [
"sqlx/runtime-tokio-rustls",
"sqlx-rt/runtime-tokio-rustls",
]
postgres = ["sqlx/postgres"]
@ -34,7 +30,6 @@ criterion = "0.3.3"
dotenvy = "0.15.0"
once_cell = "1.4"
sqlx = { version = "0.6", path = "../", default-features = false, features = ["macros"] }
sqlx-rt = { version = "0.6", path = "../sqlx-rt", default-features = false }
chrono = "0.4.19"

View file

@ -23,7 +23,7 @@ fn bench_pgpool_acquire(c: &mut Criterion) {
}
fn do_bench_acquire(b: &mut Bencher, concurrent: u32, fair: bool) {
let pool = sqlx_rt::block_on(
let pool = sqlx::__rt::block_on(
PgPoolOptions::new()
// we don't want timeouts because we want to see how the pool degrades
.acquire_timeout(Duration::from_secs(3600))
@ -41,8 +41,8 @@ fn do_bench_acquire(b: &mut Bencher, concurrent: u32, fair: bool) {
for _ in 0..concurrent {
let pool = pool.clone();
sqlx_rt::enter_runtime(|| {
sqlx_rt::spawn(async move {
sqlx::__rt::enter_runtime(|| {
sqlx::__rt::spawn(async move {
while !pool.is_closed() {
let conn = match pool.acquire().await {
Ok(conn) => conn,
@ -51,7 +51,7 @@ fn do_bench_acquire(b: &mut Bencher, concurrent: u32, fair: bool) {
};
// pretend we're using the connection
sqlx_rt::sleep(Duration::from_micros(500)).await;
sqlx::__rt::sleep(Duration::from_micros(500)).await;
drop(criterion::black_box(conn));
}
})
@ -59,7 +59,7 @@ fn do_bench_acquire(b: &mut Bencher, concurrent: u32, fair: bool) {
}
b.iter_custom(|iters| {
sqlx_rt::block_on(async {
sqlx::__rt::block_on(async {
// take the start time inside the future to make sure we only count once it's running
let start = Instant::now();
for _ in 0..iters {
@ -73,7 +73,7 @@ fn do_bench_acquire(b: &mut Bencher, concurrent: u32, fair: bool) {
})
});
sqlx_rt::block_on(pool.close());
sqlx::__rt::block_on(pool.close());
}
criterion_group!(pg_pool, bench_pgpool_acquire);

View file

@ -8,7 +8,7 @@ struct Test {
}
fn main() -> sqlx::Result<()> {
sqlx_rt::block_on(async {
sqlx::__rt::block_on(async {
let mut conn = sqlx::SqliteConnection::connect("sqlite://test.db?mode=rwc").await?;
let delete_sql = "DROP TABLE IF EXISTS test";
conn.execute(delete_sql).await?;

View file

@ -27,10 +27,9 @@ path = "src/bin/cargo-sqlx.rs"
[dependencies]
dotenvy = "0.15.0"
tokio = { version = "1.15.0", features = ["macros", "rt", "rt-multi-thread"] }
sqlx = { version = "0.6.2", path = "..", default-features = false, features = [
sqlx = { workspace = true, default-features = false, features = [
"migrate",
"any",
"offline",
] }
futures = "0.3.19"
clap = { version = "3.1.0", features = ["derive", "env"] }
@ -52,14 +51,14 @@ filetime = "0.2"
backoff = { version = "0.4.0", features = ["futures", "tokio"] }
[features]
default = ["postgres", "sqlite", "mysql", "native-tls"]
#default = ["postgres", "sqlite", "mysql", "native-tls"]
rustls = ["sqlx/runtime-tokio-rustls"]
native-tls = ["sqlx/runtime-tokio-native-tls"]
# databases
mysql = ["sqlx/mysql"]
postgres = ["sqlx/postgres"]
sqlite = ["sqlx/sqlite"]
#mysql = ["sqlx/mysql"]
#postgres = ["sqlx/postgres"]
#sqlite = ["sqlx/sqlite"]
# workaround for musl + openssl issues
openssl-vendored = ["openssl/vendored"]

View file

@ -23,42 +23,24 @@ pub async fn run(opt: Opt) -> Result<()> {
source,
description,
reversible,
} => migrate::add(source.resolve(&migrate.source), &description, reversible).await?,
} => migrate::add(&source, &description, reversible).await?,
MigrateCommand::Run {
source,
dry_run,
ignore_missing,
connect_opts,
} => {
migrate::run(
source.resolve(&migrate.source),
&connect_opts,
dry_run,
*ignore_missing,
)
.await?
}
} => migrate::run(&source, &connect_opts, dry_run, *ignore_missing).await?,
MigrateCommand::Revert {
source,
dry_run,
ignore_missing,
connect_opts,
} => {
migrate::revert(
source.resolve(&migrate.source),
&connect_opts,
dry_run,
*ignore_missing,
)
.await?
}
} => migrate::revert(&source, &connect_opts, dry_run, *ignore_missing).await?,
MigrateCommand::Info {
source,
connect_opts,
} => migrate::info(source.resolve(&migrate.source), &connect_opts).await?,
MigrateCommand::BuildScript { source, force } => {
migrate::build_script(source.resolve(&migrate.source), force)?
}
} => migrate::info(&source, &connect_opts).await?,
MigrateCommand::BuildScript { source, force } => migrate::build_script(&source, force)?,
},
Command::Database(database) => match database.command {
@ -98,6 +80,8 @@ pub async fn run(opt: Opt) -> Result<()> {
/// Attempt to connect to the database server, retrying up to `ops.connect_timeout`.
async fn connect(opts: &ConnectOpts) -> sqlx::Result<AnyConnection> {
sqlx::any::install_default_drivers();
retry_connect_errors(opts, AnyConnection::connect).await
}

View file

@ -94,11 +94,6 @@ pub enum DatabaseCommand {
/// Group of commands for creating and running migrations.
#[derive(Parser, Debug)]
pub struct MigrateOpt {
/// Path to folder containing migrations.
/// Warning: deprecated, use <SUBCOMMAND> --source <SOURCE>
#[clap(long, default_value = "migrations")]
pub source: String,
#[clap(subcommand)]
pub command: MigrateCommand,
}
@ -111,7 +106,7 @@ pub enum MigrateCommand {
description: String,
#[clap(flatten)]
source: SourceOverride,
source: Source,
/// If true, creates a pair of up and down migration files with same version
/// else creates a single sql file
@ -122,7 +117,7 @@ pub enum MigrateCommand {
/// Run all pending migrations.
Run {
#[clap(flatten)]
source: SourceOverride,
source: Source,
/// List all the migrations to be run without applying
#[clap(long)]
@ -138,7 +133,7 @@ pub enum MigrateCommand {
/// Revert the latest migration with a down file.
Revert {
#[clap(flatten)]
source: SourceOverride,
source: Source,
/// List the migration to be reverted without applying
#[clap(long)]
@ -154,7 +149,7 @@ pub enum MigrateCommand {
/// List all available migrations.
Info {
#[clap(flatten)]
source: SourceOverride,
source: Source,
#[clap(flatten)]
connect_opts: ConnectOpts,
@ -165,7 +160,7 @@ pub enum MigrateCommand {
/// Must be run in a Cargo project root.
BuildScript {
#[clap(flatten)]
source: SourceOverride,
source: Source,
/// Overwrite the build script if it already exists.
#[clap(long)]
@ -189,27 +184,6 @@ impl Deref for Source {
}
}
/// Argument for overriding migration scripts source.
// Note: once `MigrateOpt.source` is removed, usage can be replaced with `Source`.
#[derive(Args, Debug)]
pub struct SourceOverride {
/// Path to folder containing migrations [default: migrations]
#[clap(long)]
source: Option<String>,
}
impl SourceOverride {
/// Override command's `source` flag value with subcommand's
/// `source` flag value when provided.
#[inline]
pub(super) fn resolve<'a>(&'a self, source: &'a str) -> &'a str {
match self.source {
Some(ref source) => source,
None => source,
}
}
}
/// Argument for the database URL.
#[derive(Args, Debug)]
pub struct ConnectOpts {

View file

@ -29,12 +29,10 @@ pub async fn run(
merge: bool,
cargo_args: Vec<String>,
) -> anyhow::Result<()> {
// Ensure the database server is available.
crate::connect(connect_opts).await?.close().await?;
let db = check_backend_and_get_name(connect_opts).await?;
let url = &connect_opts.database_url;
let db_kind = get_db_kind(url)?;
let data = run_prepare_step(url, merge, cargo_args)?;
if data.is_empty() {
@ -48,10 +46,7 @@ pub async fn run(
BufWriter::new(
File::create("sqlx-data.json").context("failed to create/open `sqlx-data.json`")?,
),
&DataFile {
db: db_kind.to_owned(),
data,
},
&DataFile { db, data },
)
.context("failed to write to `sqlx-data.json`")?;
@ -68,12 +63,10 @@ pub async fn check(
merge: bool,
cargo_args: Vec<String>,
) -> anyhow::Result<()> {
// Ensure the database server is available.
crate::connect(connect_opts).await?.close().await?;
let db = check_backend_and_get_name(connect_opts).await?;
let url = &connect_opts.database_url;
let db_kind = get_db_kind(url)?;
let data = run_prepare_step(url, merge, cargo_args)?;
let data_file = File::open("sqlx-data.json").context(
@ -85,11 +78,11 @@ pub async fn check(
data: saved_data,
} = serde_json::from_reader(BufReader::new(data_file))?;
if db_kind != expected_db {
if db != expected_db {
bail!(
"saved prepare data is for {}, not {} (inferred from `DATABASE_URL`)",
expected_db,
db_kind
db
)
}
@ -317,23 +310,14 @@ fn minimal_project_recompile_action(metadata: &Metadata) -> anyhow::Result<Proje
})
}
fn get_db_kind(url: &str) -> anyhow::Result<&'static str> {
let options = AnyConnectOptions::from_str(&url)?;
// these should match the values of `DatabaseExt::NAME` in `sqlx-macros`
match options.kind() {
#[cfg(feature = "postgres")]
AnyKind::Postgres => Ok("PostgreSQL"),
#[cfg(feature = "mysql")]
AnyKind::MySql => Ok("MySQL"),
#[cfg(feature = "sqlite")]
AnyKind::Sqlite => Ok("SQLite"),
#[cfg(feature = "mssql")]
AnyKind::Mssql => Ok("MSSQL"),
}
/// Ensure the database server is available.
///
/// Returns the `Database::NAME` of the backend on success.
async fn check_backend_and_get_name(opts: &ConnectOpts) -> anyhow::Result<String> {
let conn = crate::connect(opts).await?;
let db = conn.backend_name().to_string();
conn.close().await?;
Ok(db)
}
#[cfg(test)]

View file

@ -1,116 +1,59 @@
[package]
name = "sqlx-core"
version = "0.6.2"
repository = "https://github.com/launchbadge/sqlx"
description = "Core of SQLx, the rust SQL toolkit. Not intended to be used directly."
license = "MIT OR Apache-2.0"
edition = "2021"
authors = [
"Ryan Leckey <leckey.ryan@gmail.com>",
"Austin Bonander <austin.bonander@gmail.com>",
"Chloe Ross <orangesnowfox@gmail.com>",
"Daniel Akhterov <akhterovd@gmail.com>",
]
version.workspace = true
license.workspace = true
edition.workspace = true
authors.workspace = true
repository.workspace = true
[package.metadata.docs.rs]
features = ["all-databases", "all-types", "offline", "runtime-tokio-native-tls"]
features = ["offline", "runtime-tokio-native-tls"]
[features]
default = ["migrate"]
default = []
migrate = ["sha2", "crc"]
# databases
all-databases = ["postgres", "mysql", "sqlite", "mssql", "any"]
postgres = [
"md-5",
"sha2",
"base64",
"sha1",
"rand",
"hmac",
"futures-channel/sink",
"futures-util/sink",
"json",
"dirs",
"whoami",
"hkdf"
]
mysql = [
"sha1",
"sha2",
"generic-array",
"num-bigint",
"digest",
"rand",
"rsa",
]
sqlite = ["libsqlite3-sys", "futures-executor", "flume"]
mssql = ["uuid", "encoding_rs", "regex"]
any = []
# types
all-types = [
"chrono",
"time",
"bigdecimal",
"decimal",
"ipnetwork",
"mac_address",
"json",
"uuid",
"bit-vec",
]
bigdecimal = ["bigdecimal_", "num-bigint"]
decimal = ["rust_decimal", "num-bigint"]
json = ["serde", "serde_json"]
# runtimes
runtime-actix-native-tls = ["runtime-tokio-native-tls"]
runtime-async-std-native-tls = [
"sqlx-rt/runtime-async-std-native-tls",
"sqlx/runtime-async-std-native-tls",
"_tls-native-tls",
"_rt-async-std",
]
runtime-tokio-native-tls = [
"sqlx-rt/runtime-tokio-native-tls",
"sqlx/runtime-tokio-native-tls",
"_tls-native-tls",
"_rt-tokio",
]
runtime-actix-rustls = ['runtime-tokio-rustls']
runtime-async-std-rustls = [
"sqlx-rt/runtime-async-std-rustls",
"sqlx/runtime-async-std-rustls",
"_tls-rustls",
"_rt-async-std",
]
runtime-tokio-rustls = [
"sqlx-rt/runtime-tokio-rustls",
"sqlx/runtime-tokio-rustls",
"_tls-rustls",
"_rt-tokio"
]
# for conditional compilation
_rt-async-std = []
_rt-tokio = ["tokio-stream"]
_tls-native-tls = []
_rt-async-std = ["async-std", "async-io"]
_rt-tokio = ["tokio", "tokio-stream"]
_tls-native-tls = ["native-tls"]
_tls-rustls = ["rustls", "rustls-pemfile", "webpki-roots"]
_tls-none = []
# support offline/decoupled building (enables serialization of `Describe`)
offline = ["serde", "either/serde"]
[dependencies]
# Runtimes
async-std = { workspace = true, optional = true }
tokio = { workspace = true, optional = true }
# TLS
native-tls = { version = "0.2.10", optional = true }
rustls = { version = "0.20.6", features = ["dangerous_configuration"], optional = true }
rustls-pemfile = { version = "1.0", optional = true }
webpki-roots = { version = "0.22.0", optional = true }
# Type Integrations
bit-vec = { workspace = true, optional = true }
bigdecimal = { workspace = true, optional = true }
rust_decimal = { workspace = true, optional = true }
time = { workspace = true, optional = true }
ipnetwork = { workspace = true, optional = true }
mac_address = { workspace = true, optional = true }
uuid = { workspace = true, optional = true }
async-io = { version = "1.9.0", optional = true }
paste = "1.0.6"
ahash = "0.7.6"
atoi = "1.0"
sqlx-rt = { path = "../sqlx-rt", version = "0.6.2" }
base64 = { version = "0.13.0", default-features = false, optional = true, features = ["std"] }
bigdecimal_ = { version = "0.3.0", optional = true, package = "bigdecimal" }
rust_decimal = { version = "1.19.0", optional = true }
bit-vec = { version = "0.6.3", optional = true }
bitflags = { version = "1.3.2", default-features = false }
bytes = "1.1.0"
byteorder = { version = "1.4.3", default-features = false, features = ["std"] }
@ -118,65 +61,41 @@ chrono = { version = "0.4.19", default-features = false, features = ["clock"], o
crc = { version = "3", optional = true }
crossbeam-queue = "0.3.2"
digest = { version = "0.10.0", default-features = false, optional = true, features = ["std"] }
dirs = { version = "4.0.0", optional = true }
encoding_rs = { version = "0.8.30", optional = true }
either = "1.6.1"
futures-channel = { version = "0.3.19", default-features = false, features = ["sink", "alloc", "std"] }
futures-core = { version = "0.3.19", default-features = false }
futures-io = "0.3.24"
futures-intrusive = "0.4.0"
futures-util = { version = "0.3.19", default-features = false, features = ["alloc", "sink"] }
# used by the SQLite worker thread to block on the async mutex that locks the database handle
futures-executor = { version = "0.3.19", optional = true }
flume = { version = "0.10.9", optional = true, default-features = false, features = ["async"] }
futures-util = { version = "0.3.19", default-features = false, features = ["alloc", "sink", "io"] }
generic-array = { version = "0.14.4", default-features = false, optional = true }
hex = "0.4.3"
hmac = { version = "0.12.0", default-features = false, optional = true }
itoa = "1.0.1"
ipnetwork = { version = "0.19.0", default-features = false, optional = true }
mac_address = { version = "1.1.2", default-features = false, optional = true }
libc = "0.2.112"
libsqlite3-sys = { version = "0.25.1", optional = true, default-features = false, features = [
"pkg-config",
"vcpkg",
"bundled",
"unlock_notify"
] }
log = { version = "0.4.14", default-features = false }
md-5 = { version = "0.10.0", default-features = false, optional = true }
memchr = { version = "2.4.1", default-features = false }
num-bigint = { version = "0.4.0", default-features = false, optional = true, features = ["std"] }
once_cell = "1.9.0"
percent-encoding = "2.1.0"
rand = { version = "0.8.4", default-features = false, optional = true, features = ["std", "std_rng"] }
regex = { version = "1.5.5", optional = true }
rsa = { version = "0.6.0", optional = true }
rustls = { version = "0.20.1", features = ["dangerous_configuration"], optional = true }
rustls-pemfile = { version = "1.0", optional = true }
serde = { version = "1.0.132", features = ["derive", "rc"], optional = true }
serde_json = { version = "1.0.73", features = ["raw_value"], optional = true }
sha1 = { version = "0.10.1", default-features = false, optional = true }
sha2 = { version = "0.10.0", default-features = false, optional = true }
sqlformat = "0.2.0"
thiserror = "1.0.30"
time = { version = "0.3.2", features = ["macros", "formatting", "parsing"], optional = true }
tokio-stream = { version = "0.1.8", features = ["fs"], optional = true }
smallvec = "1.7.0"
url = { version = "2.2.2", default-features = false }
uuid = { version = "1.0", default-features = false, optional = true, features = ["std"] }
webpki-roots = { version = "0.22.0", optional = true }
whoami = { version = "1.2.1", optional = true }
stringprep = "0.1.2"
bstr = { version = "0.2.17", default-features = false, features = ["std"], optional = true }
git2 = { version = "0.14", default-features = false, optional = true }
hashlink = "0.8.0"
# NOTE: *must* remain below 1.7.0 to allow users to avoid the `ahash` cyclic dependency problem by pinning the version
# https://github.com/tkaitchuck/aHash/issues/95#issuecomment-874150078
indexmap = "1.6.0"
hkdf = { version = "0.12.0", optional = true }
event-listener = "2.5.2"
dotenvy = "0.15"
[dev-dependencies]
sqlx = { version = "0.6.2", path = "..", features = ["postgres", "sqlite", "mysql"] }
sqlx = { version = "0.6.2", path = "..", features = ["postgres", "sqlite", "mysql", "migrate"] }
tokio = { version = "1", features = ["rt"] }

View file

@ -1,6 +1,7 @@
use crate::database::Database;
use crate::error::Error;
use crate::pool::{MaybePoolConnection, Pool, PoolConnection};
use crate::transaction::Transaction;
use futures_core::future::BoxFuture;
use std::ops::{Deref, DerefMut};
@ -97,18 +98,18 @@ impl<'a, DB: Database> Acquire<'a> for &'_ Pool<DB> {
}
}
#[allow(unused_macros)]
#[macro_export]
macro_rules! impl_acquire {
($DB:ident, $C:ident) => {
impl<'c> crate::acquire::Acquire<'c> for &'c mut $C {
impl<'c> $crate::acquire::Acquire<'c> for &'c mut $C {
type Database = $DB;
type Connection = &'c mut <$DB as crate::database::Database>::Connection;
type Connection = &'c mut <$DB as $crate::database::Database>::Connection;
#[inline]
fn acquire(
self,
) -> futures_core::future::BoxFuture<'c, Result<Self::Connection, crate::error::Error>>
) -> futures_core::future::BoxFuture<'c, Result<Self::Connection, $crate::error::Error>>
{
Box::pin(futures_util::future::ok(self))
}
@ -118,59 +119,9 @@ macro_rules! impl_acquire {
self,
) -> futures_core::future::BoxFuture<
'c,
Result<crate::transaction::Transaction<'c, $DB>, crate::error::Error>,
Result<$crate::transaction::Transaction<'c, $DB>, $crate::error::Error>,
> {
crate::transaction::Transaction::begin(self)
}
}
impl<'c> crate::acquire::Acquire<'c> for &'c mut crate::pool::PoolConnection<$DB> {
type Database = $DB;
type Connection = &'c mut <$DB as crate::database::Database>::Connection;
#[inline]
fn acquire(
self,
) -> futures_core::future::BoxFuture<'c, Result<Self::Connection, crate::error::Error>>
{
Box::pin(futures_util::future::ok(&mut **self))
}
#[inline]
fn begin(
self,
) -> futures_core::future::BoxFuture<
'c,
Result<crate::transaction::Transaction<'c, $DB>, crate::error::Error>,
> {
crate::transaction::Transaction::begin(&mut **self)
}
}
impl<'c, 't> crate::acquire::Acquire<'t>
for &'t mut crate::transaction::Transaction<'c, $DB>
{
type Database = $DB;
type Connection = &'t mut <$DB as crate::database::Database>::Connection;
#[inline]
fn acquire(
self,
) -> futures_core::future::BoxFuture<'t, Result<Self::Connection, crate::error::Error>>
{
Box::pin(futures_util::future::ok(&mut **self))
}
#[inline]
fn begin(
self,
) -> futures_core::future::BoxFuture<
't,
Result<crate::transaction::Transaction<'t, $DB>, crate::error::Error>,
> {
crate::transaction::Transaction::begin(&mut **self)
$crate::transaction::Transaction::begin(self)
}
}
};

View file

@ -1,133 +1,72 @@
use crate::any::Any;
use crate::any::value::AnyValueKind;
use crate::any::{Any, AnyValueRef};
use crate::arguments::Arguments;
use crate::encode::Encode;
use crate::types::Type;
use std::borrow::Cow;
use std::marker::PhantomData;
#[derive(Default)]
pub struct AnyArguments<'q> {
values: Vec<Box<dyn Encode<'q, Any> + Send + 'q>>,
#[doc(hidden)]
pub values: AnyArgumentBuffer<'q>,
}
impl<'q> Arguments<'q> for AnyArguments<'q> {
type Database = Any;
fn reserve(&mut self, additional: usize, _size: usize) {
self.values.reserve(additional);
self.values.0.reserve(additional);
}
fn add<T>(&mut self, value: T)
where
T: 'q + Send + Encode<'q, Self::Database> + Type<Self::Database>,
{
self.values.push(Box::new(value));
value.encode(&mut self.values);
}
}
pub struct AnyArgumentBuffer<'q>(pub(crate) AnyArgumentBufferKind<'q>);
pub struct AnyArgumentBuffer<'q>(#[doc(hidden)] pub Vec<AnyValueKind<'q>>);
pub(crate) enum AnyArgumentBufferKind<'q> {
#[cfg(feature = "postgres")]
Postgres(
crate::postgres::PgArguments,
std::marker::PhantomData<&'q ()>,
),
#[cfg(feature = "mysql")]
MySql(
crate::mysql::MySqlArguments,
std::marker::PhantomData<&'q ()>,
),
#[cfg(feature = "sqlite")]
Sqlite(crate::sqlite::SqliteArguments<'q>),
#[cfg(feature = "mssql")]
Mssql(
crate::mssql::MssqlArguments,
std::marker::PhantomData<&'q ()>,
),
}
// control flow inferred type bounds would be fun
// the compiler should know the branch is totally unreachable
#[cfg(feature = "sqlite")]
#[allow(irrefutable_let_patterns)]
impl<'q> From<AnyArguments<'q>> for crate::sqlite::SqliteArguments<'q> {
fn from(args: AnyArguments<'q>) -> Self {
let mut buf = AnyArgumentBuffer(AnyArgumentBufferKind::Sqlite(Default::default()));
for value in args.values {
let _ = value.encode_by_ref(&mut buf);
}
if let AnyArgumentBufferKind::Sqlite(args) = buf.0 {
args
} else {
unreachable!()
impl<'q> Default for AnyArguments<'q> {
fn default() -> Self {
AnyArguments {
values: AnyArgumentBuffer(vec![]),
}
}
}
#[cfg(feature = "mysql")]
#[allow(irrefutable_let_patterns)]
impl<'q> From<AnyArguments<'q>> for crate::mysql::MySqlArguments {
fn from(args: AnyArguments<'q>) -> Self {
let mut buf = AnyArgumentBuffer(AnyArgumentBufferKind::MySql(
Default::default(),
std::marker::PhantomData,
));
impl<'q> AnyArguments<'q> {
#[doc(hidden)]
pub fn convert_to<'a, A: Arguments<'a>>(&'a self) -> A
where
'q: 'a,
Option<i32>: Type<A::Database> + Encode<'a, A::Database>,
bool: Type<A::Database> + Encode<'a, A::Database>,
i16: Type<A::Database> + Encode<'a, A::Database>,
i32: Type<A::Database> + Encode<'a, A::Database>,
i64: Type<A::Database> + Encode<'a, A::Database>,
f32: Type<A::Database> + Encode<'a, A::Database>,
f64: Type<A::Database> + Encode<'a, A::Database>,
&'a str: Type<A::Database> + Encode<'a, A::Database>,
&'a [u8]: Type<A::Database> + Encode<'a, A::Database>,
{
let mut out = A::default();
for value in args.values {
let _ = value.encode_by_ref(&mut buf);
for arg in &self.values.0 {
match arg {
AnyValueKind::Null => out.add(Option::<i32>::None),
AnyValueKind::Bool(b) => out.add(b),
AnyValueKind::SmallInt(i) => out.add(i),
AnyValueKind::Integer(i) => out.add(i),
AnyValueKind::BigInt(i) => out.add(i),
AnyValueKind::Real(r) => out.add(r),
AnyValueKind::Double(d) => out.add(d),
AnyValueKind::Text(t) => out.add(&**t),
AnyValueKind::Blob(b) => out.add(&**b),
}
}
if let AnyArgumentBufferKind::MySql(args, _) = buf.0 {
args
} else {
unreachable!()
}
}
}
#[cfg(feature = "mssql")]
#[allow(irrefutable_let_patterns)]
impl<'q> From<AnyArguments<'q>> for crate::mssql::MssqlArguments {
fn from(args: AnyArguments<'q>) -> Self {
let mut buf = AnyArgumentBuffer(AnyArgumentBufferKind::Mssql(
Default::default(),
std::marker::PhantomData,
));
for value in args.values {
let _ = value.encode_by_ref(&mut buf);
}
if let AnyArgumentBufferKind::Mssql(args, _) = buf.0 {
args
} else {
unreachable!()
}
}
}
#[cfg(feature = "postgres")]
#[allow(irrefutable_let_patterns)]
impl<'q> From<AnyArguments<'q>> for crate::postgres::PgArguments {
fn from(args: AnyArguments<'q>) -> Self {
let mut buf = AnyArgumentBuffer(AnyArgumentBufferKind::Postgres(
Default::default(),
std::marker::PhantomData,
));
for value in args.values {
let _ = value.encode_by_ref(&mut buf);
}
if let AnyArgumentBufferKind::Postgres(args, _) = buf.0 {
args
} else {
unreachable!()
}
out
}
}

View file

@ -1,443 +1,31 @@
use crate::any::{Any, AnyTypeInfo};
use crate::any::{Any, AnyTypeInfo, AnyValue};
use crate::column::{Column, ColumnIndex};
#[cfg(feature = "postgres")]
use crate::postgres::{PgColumn, PgRow, PgStatement};
#[cfg(feature = "mysql")]
use crate::mysql::{MySqlColumn, MySqlRow, MySqlStatement};
#[cfg(feature = "sqlite")]
use crate::sqlite::{SqliteColumn, SqliteRow, SqliteStatement};
#[cfg(feature = "mssql")]
use crate::mssql::{MssqlColumn, MssqlRow, MssqlStatement};
use crate::ext::ustr::UStr;
#[derive(Debug, Clone)]
pub struct AnyColumn {
pub(crate) kind: AnyColumnKind,
pub(crate) type_info: AnyTypeInfo,
// NOTE: these fields are semver-exempt. See crate root docs for details.
#[doc(hidden)]
pub ordinal: usize,
#[doc(hidden)]
pub name: UStr,
#[doc(hidden)]
pub type_info: AnyTypeInfo,
}
impl crate::column::private_column::Sealed for AnyColumn {}
#[derive(Debug, Clone)]
pub(crate) enum AnyColumnKind {
#[cfg(feature = "postgres")]
Postgres(PgColumn),
#[cfg(feature = "mysql")]
MySql(MySqlColumn),
#[cfg(feature = "sqlite")]
Sqlite(SqliteColumn),
#[cfg(feature = "mssql")]
Mssql(MssqlColumn),
}
impl Column for AnyColumn {
type Database = Any;
fn ordinal(&self) -> usize {
match &self.kind {
#[cfg(feature = "postgres")]
AnyColumnKind::Postgres(row) => row.ordinal(),
#[cfg(feature = "mysql")]
AnyColumnKind::MySql(row) => row.ordinal(),
#[cfg(feature = "sqlite")]
AnyColumnKind::Sqlite(row) => row.ordinal(),
#[cfg(feature = "mssql")]
AnyColumnKind::Mssql(row) => row.ordinal(),
}
self.ordinal
}
fn name(&self) -> &str {
match &self.kind {
#[cfg(feature = "postgres")]
AnyColumnKind::Postgres(row) => row.name(),
#[cfg(feature = "mysql")]
AnyColumnKind::MySql(row) => row.name(),
#[cfg(feature = "sqlite")]
AnyColumnKind::Sqlite(row) => row.name(),
#[cfg(feature = "mssql")]
AnyColumnKind::Mssql(row) => row.name(),
}
&self.name
}
fn type_info(&self) -> &AnyTypeInfo {
&self.type_info
}
}
// FIXME: Find a nice way to auto-generate the below or petition Rust to add support for #[cfg]
// to trait bounds
// all 4
#[cfg(all(
feature = "postgres",
feature = "mysql",
feature = "mssql",
feature = "sqlite"
))]
pub trait AnyColumnIndex:
ColumnIndex<PgRow>
+ for<'q> ColumnIndex<PgStatement<'q>>
+ ColumnIndex<MySqlRow>
+ for<'q> ColumnIndex<MySqlStatement<'q>>
+ ColumnIndex<MssqlRow>
+ for<'q> ColumnIndex<MssqlStatement<'q>>
+ ColumnIndex<SqliteRow>
+ for<'q> ColumnIndex<SqliteStatement<'q>>
{
}
#[cfg(all(
feature = "postgres",
feature = "mysql",
feature = "mssql",
feature = "sqlite"
))]
impl<I: ?Sized> AnyColumnIndex for I where
I: ColumnIndex<PgRow>
+ for<'q> ColumnIndex<PgStatement<'q>>
+ ColumnIndex<MySqlRow>
+ for<'q> ColumnIndex<MySqlStatement<'q>>
+ ColumnIndex<MssqlRow>
+ for<'q> ColumnIndex<MssqlStatement<'q>>
+ ColumnIndex<SqliteRow>
+ for<'q> ColumnIndex<SqliteStatement<'q>>
{
}
// only 3 (4)
#[cfg(all(
not(feature = "mssql"),
all(feature = "postgres", feature = "mysql", feature = "sqlite")
))]
pub trait AnyColumnIndex:
ColumnIndex<PgRow>
+ for<'q> ColumnIndex<PgStatement<'q>>
+ ColumnIndex<MySqlRow>
+ for<'q> ColumnIndex<MySqlStatement<'q>>
+ ColumnIndex<SqliteRow>
+ for<'q> ColumnIndex<SqliteStatement<'q>>
{
}
#[cfg(all(
not(feature = "mssql"),
all(feature = "postgres", feature = "mysql", feature = "sqlite")
))]
impl<I: ?Sized> AnyColumnIndex for I where
I: ColumnIndex<PgRow>
+ for<'q> ColumnIndex<PgStatement<'q>>
+ ColumnIndex<MySqlRow>
+ for<'q> ColumnIndex<MySqlStatement<'q>>
+ ColumnIndex<SqliteRow>
+ for<'q> ColumnIndex<SqliteStatement<'q>>
{
}
#[cfg(all(
not(feature = "mysql"),
all(feature = "postgres", feature = "mssql", feature = "sqlite")
))]
pub trait AnyColumnIndex:
ColumnIndex<PgRow>
+ for<'q> ColumnIndex<PgStatement<'q>>
+ ColumnIndex<MssqlRow>
+ for<'q> ColumnIndex<MssqlStatement<'q>>
+ ColumnIndex<SqliteRow>
+ for<'q> ColumnIndex<SqliteStatement<'q>>
{
}
#[cfg(all(
not(feature = "mysql"),
all(feature = "postgres", feature = "mssql", feature = "sqlite")
))]
impl<I: ?Sized> AnyColumnIndex for I where
I: ColumnIndex<PgRow>
+ for<'q> ColumnIndex<PgStatement<'q>>
+ ColumnIndex<MssqlRow>
+ for<'q> ColumnIndex<MssqlStatement<'q>>
+ ColumnIndex<SqliteRow>
+ for<'q> ColumnIndex<SqliteStatement<'q>>
{
}
#[cfg(all(
not(feature = "sqlite"),
all(feature = "postgres", feature = "mysql", feature = "mssql")
))]
pub trait AnyColumnIndex:
ColumnIndex<PgRow>
+ for<'q> ColumnIndex<PgStatement<'q>>
+ ColumnIndex<MySqlRow>
+ for<'q> ColumnIndex<MySqlStatement<'q>>
+ ColumnIndex<MssqlRow>
+ for<'q> ColumnIndex<MssqlStatement<'q>>
{
}
#[cfg(all(
not(feature = "sqlite"),
all(feature = "postgres", feature = "mysql", feature = "mssql")
))]
impl<I: ?Sized> AnyColumnIndex for I where
I: ColumnIndex<PgRow>
+ for<'q> ColumnIndex<PgStatement<'q>>
+ ColumnIndex<MySqlRow>
+ for<'q> ColumnIndex<MySqlStatement<'q>>
+ ColumnIndex<MssqlRow>
+ for<'q> ColumnIndex<MssqlStatement<'q>>
{
}
#[cfg(all(
not(feature = "postgres"),
all(feature = "sqlite", feature = "mysql", feature = "mssql")
))]
pub trait AnyColumnIndex:
ColumnIndex<SqliteRow>
+ for<'q> ColumnIndex<SqliteStatement<'q>>
+ ColumnIndex<MySqlRow>
+ for<'q> ColumnIndex<MySqlStatement<'q>>
+ ColumnIndex<MssqlRow>
+ for<'q> ColumnIndex<MssqlStatement<'q>>
{
}
#[cfg(all(
not(feature = "postgres"),
all(feature = "sqlite", feature = "mysql", feature = "mssql")
))]
impl<I: ?Sized> AnyColumnIndex for I where
I: ColumnIndex<SqliteRow>
+ for<'q> ColumnIndex<SqliteStatement<'q>>
+ ColumnIndex<MySqlRow>
+ for<'q> ColumnIndex<MySqlStatement<'q>>
+ ColumnIndex<MssqlRow>
+ for<'q> ColumnIndex<MssqlStatement<'q>>
{
}
// only 2 (6)
#[cfg(all(
not(any(feature = "mssql", feature = "sqlite")),
all(feature = "postgres", feature = "mysql")
))]
pub trait AnyColumnIndex:
ColumnIndex<PgRow>
+ for<'q> ColumnIndex<PgStatement<'q>>
+ ColumnIndex<MySqlRow>
+ for<'q> ColumnIndex<MySqlStatement<'q>>
{
}
#[cfg(all(
not(any(feature = "mssql", feature = "sqlite")),
all(feature = "postgres", feature = "mysql")
))]
impl<I: ?Sized> AnyColumnIndex for I where
I: ColumnIndex<PgRow>
+ for<'q> ColumnIndex<PgStatement<'q>>
+ ColumnIndex<MySqlRow>
+ for<'q> ColumnIndex<MySqlStatement<'q>>
{
}
#[cfg(all(
not(any(feature = "mysql", feature = "sqlite")),
all(feature = "postgres", feature = "mssql")
))]
pub trait AnyColumnIndex:
ColumnIndex<PgRow>
+ for<'q> ColumnIndex<PgStatement<'q>>
+ ColumnIndex<MssqlRow>
+ for<'q> ColumnIndex<MssqlStatement<'q>>
{
}
#[cfg(all(
not(any(feature = "mysql", feature = "sqlite")),
all(feature = "postgres", feature = "mssql")
))]
impl<I: ?Sized> AnyColumnIndex for I where
I: ColumnIndex<PgRow>
+ for<'q> ColumnIndex<PgStatement<'q>>
+ ColumnIndex<MssqlRow>
+ for<'q> ColumnIndex<MssqlStatement<'q>>
{
}
#[cfg(all(
not(any(feature = "mysql", feature = "mssql")),
all(feature = "postgres", feature = "sqlite")
))]
pub trait AnyColumnIndex:
ColumnIndex<PgRow>
+ for<'q> ColumnIndex<PgStatement<'q>>
+ ColumnIndex<SqliteRow>
+ for<'q> ColumnIndex<SqliteStatement<'q>>
{
}
#[cfg(all(
not(any(feature = "mysql", feature = "mssql")),
all(feature = "postgres", feature = "sqlite")
))]
impl<I: ?Sized> AnyColumnIndex for I where
I: ColumnIndex<PgRow>
+ for<'q> ColumnIndex<PgStatement<'q>>
+ ColumnIndex<SqliteRow>
+ for<'q> ColumnIndex<SqliteStatement<'q>>
{
}
#[cfg(all(
not(any(feature = "postgres", feature = "sqlite")),
all(feature = "mssql", feature = "mysql")
))]
pub trait AnyColumnIndex:
ColumnIndex<MssqlRow>
+ for<'q> ColumnIndex<MssqlStatement<'q>>
+ ColumnIndex<MySqlRow>
+ for<'q> ColumnIndex<MySqlStatement<'q>>
{
}
#[cfg(all(
not(any(feature = "postgres", feature = "sqlite")),
all(feature = "mssql", feature = "mysql")
))]
impl<I: ?Sized> AnyColumnIndex for I where
I: ColumnIndex<MssqlRow>
+ for<'q> ColumnIndex<MssqlStatement<'q>>
+ ColumnIndex<MySqlRow>
+ for<'q> ColumnIndex<MySqlStatement<'q>>
{
}
#[cfg(all(
not(any(feature = "postgres", feature = "mysql")),
all(feature = "mssql", feature = "sqlite")
))]
pub trait AnyColumnIndex:
ColumnIndex<MssqlRow>
+ for<'q> ColumnIndex<MssqlStatement<'q>>
+ ColumnIndex<SqliteRow>
+ for<'q> ColumnIndex<SqliteStatement<'q>>
{
}
#[cfg(all(
not(any(feature = "postgres", feature = "mysql")),
all(feature = "mssql", feature = "sqlite")
))]
impl<I: ?Sized> AnyColumnIndex for I where
I: ColumnIndex<MssqlRow>
+ for<'q> ColumnIndex<MssqlStatement<'q>>
+ ColumnIndex<SqliteRow>
+ for<'q> ColumnIndex<SqliteStatement<'q>>
{
}
#[cfg(all(
not(any(feature = "postgres", feature = "mssql")),
all(feature = "mysql", feature = "sqlite")
))]
pub trait AnyColumnIndex:
ColumnIndex<MySqlRow>
+ for<'q> ColumnIndex<MySqlStatement<'q>>
+ ColumnIndex<SqliteRow>
+ for<'q> ColumnIndex<SqliteStatement<'q>>
{
}
#[cfg(all(
not(any(feature = "postgres", feature = "mssql")),
all(feature = "mysql", feature = "sqlite")
))]
impl<I: ?Sized> AnyColumnIndex for I where
I: ColumnIndex<MySqlRow>
+ for<'q> ColumnIndex<MySqlStatement<'q>>
+ ColumnIndex<SqliteRow>
+ for<'q> ColumnIndex<SqliteStatement<'q>>
{
}
// only 1 (4)
#[cfg(all(
not(any(feature = "mysql", feature = "mssql", feature = "sqlite")),
feature = "postgres"
))]
pub trait AnyColumnIndex: ColumnIndex<PgRow> + for<'q> ColumnIndex<PgStatement<'q>> {}
#[cfg(all(
not(any(feature = "mysql", feature = "mssql", feature = "sqlite")),
feature = "postgres"
))]
impl<I: ?Sized> AnyColumnIndex for I where
I: ColumnIndex<PgRow> + for<'q> ColumnIndex<PgStatement<'q>>
{
}
#[cfg(all(
not(any(feature = "postgres", feature = "mssql", feature = "sqlite")),
feature = "mysql"
))]
pub trait AnyColumnIndex: ColumnIndex<MySqlRow> + for<'q> ColumnIndex<MySqlStatement<'q>> {}
#[cfg(all(
not(any(feature = "postgres", feature = "mssql", feature = "sqlite")),
feature = "mysql"
))]
impl<I: ?Sized> AnyColumnIndex for I where
I: ColumnIndex<MySqlRow> + for<'q> ColumnIndex<MySqlStatement<'q>>
{
}
#[cfg(all(
not(any(feature = "mysql", feature = "postgres", feature = "sqlite")),
feature = "mssql"
))]
pub trait AnyColumnIndex: ColumnIndex<MssqlRow> + for<'q> ColumnIndex<MssqlStatement<'q>> {}
#[cfg(all(
not(any(feature = "mysql", feature = "postgres", feature = "sqlite")),
feature = "mssql"
))]
impl<I: ?Sized> AnyColumnIndex for I where
I: ColumnIndex<MssqlRow> + for<'q> ColumnIndex<MssqlStatement<'q>>
{
}
#[cfg(all(
not(any(feature = "mysql", feature = "mssql", feature = "postgres")),
feature = "sqlite"
))]
pub trait AnyColumnIndex:
ColumnIndex<SqliteRow> + for<'q> ColumnIndex<SqliteStatement<'q>>
{
}
#[cfg(all(
not(any(feature = "mysql", feature = "mssql", feature = "postgres")),
feature = "sqlite"
))]
impl<I: ?Sized> AnyColumnIndex for I where
I: ColumnIndex<SqliteRow> + for<'q> ColumnIndex<SqliteStatement<'q>>
{
}

View file

@ -0,0 +1,87 @@
use crate::any::{Any, AnyArguments, AnyQueryResult, AnyRow, AnyStatement, AnyTypeInfo};
use crate::describe::Describe;
use crate::transaction::Transaction;
use either::Either;
use futures_core::future::BoxFuture;
use futures_core::stream::BoxStream;
use std::fmt::Debug;
pub trait AnyConnectionBackend: std::any::Any + Debug + Send + 'static {
/// The backend name.
fn name(&self) -> &str;
/// Explicitly close this database connection.
///
/// This method is **not required** for safe and consistent operation. However, it is
/// recommended to call it instead of letting a connection `drop` as the database backend
/// will be faster at cleaning up resources.
fn close(self: Box<Self>) -> BoxFuture<'static, crate::Result<()>>;
/// Immediately close the connection without sending a graceful shutdown.
///
/// This should still at least send a TCP `FIN` frame to let the server know we're dying.
#[doc(hidden)]
fn close_hard(self: Box<Self>) -> BoxFuture<'static, crate::Result<()>>;
/// Checks if a connection to the database is still valid.
fn ping(&mut self) -> BoxFuture<'_, crate::Result<()>>;
/// Begin a new transaction or establish a savepoint within the active transaction.
///
/// Returns a [`Transaction`] for controlling and tracking the new transaction.
fn begin(&mut self) -> BoxFuture<'_, crate::Result<()>>;
fn commit(&mut self) -> BoxFuture<'_, crate::Result<()>>;
fn rollback(&mut self) -> BoxFuture<'_, crate::Result<()>>;
fn start_rollback(&mut self);
/// The number of statements currently cached in the connection.
fn cached_statements_size(&self) -> usize {
0
}
/// Removes all statements from the cache, closing them on the server if
/// needed.
fn clear_cached_statements(&mut self) -> BoxFuture<'_, crate::Result<()>> {
Box::pin(async move { Ok(()) })
}
#[doc(hidden)]
fn flush(&mut self) -> BoxFuture<'_, crate::Result<()>>;
#[doc(hidden)]
fn should_flush(&self) -> bool;
#[cfg(feature = "migrate")]
fn as_migrate(&mut self) -> crate::Result<&mut (dyn crate::migrate::Migrate + Send + 'static)> {
Err(crate::Error::Configuration(
format!(
"{} driver does not support migrations or `migrate` feature was not enabled",
self.name()
)
.into(),
))
}
fn fetch_many<'q>(
&'q mut self,
query: &'q str,
arguments: Option<AnyArguments<'q>>,
) -> BoxStream<'q, crate::Result<Either<AnyQueryResult, AnyRow>>>;
fn fetch_optional<'q>(
&'q mut self,
query: &'q str,
arguments: Option<AnyArguments<'q>>,
) -> BoxFuture<'q, crate::Result<Option<AnyRow>>>;
fn prepare_with<'c, 'q: 'c>(
&'c mut self,
sql: &'q str,
parameters: &[AnyTypeInfo],
) -> BoxFuture<'c, crate::Result<AnyStatement<'q>>>;
fn describe<'q>(&'q mut self, sql: &'q str) -> BoxFuture<'q, crate::Result<Describe<Any>>>;
}

View file

@ -1,40 +1,11 @@
use crate::any::connection::AnyConnectionKind;
use crate::any::options::{AnyConnectOptions, AnyConnectOptionsKind};
use crate::any::options::AnyConnectOptions;
use crate::any::AnyConnection;
use crate::connection::Connection;
use crate::error::Error;
impl AnyConnection {
pub(crate) async fn establish(options: &AnyConnectOptions) -> Result<Self, Error> {
match &options.0 {
#[cfg(feature = "mysql")]
AnyConnectOptionsKind::MySql(options) => {
crate::mysql::MySqlConnection::connect_with(options)
.await
.map(AnyConnectionKind::MySql)
}
#[cfg(feature = "postgres")]
AnyConnectOptionsKind::Postgres(options) => {
crate::postgres::PgConnection::connect_with(options)
.await
.map(AnyConnectionKind::Postgres)
}
#[cfg(feature = "sqlite")]
AnyConnectOptionsKind::Sqlite(options) => {
crate::sqlite::SqliteConnection::connect_with(options)
.await
.map(AnyConnectionKind::Sqlite)
}
#[cfg(feature = "mssql")]
AnyConnectOptionsKind::Mssql(options) => {
crate::mssql::MssqlConnection::connect_with(options)
.await
.map(AnyConnectionKind::Mssql)
}
}
.map(AnyConnection)
let driver = crate::any::driver::from_url(&options.database_url)?;
(driver.connect)(options).await
}
}

View file

@ -1,4 +1,3 @@
use crate::any::connection::AnyConnectionKind;
use crate::any::{
Any, AnyColumn, AnyConnection, AnyQueryResult, AnyRow, AnyStatement, AnyTypeInfo,
};
@ -20,36 +19,10 @@ impl<'c> Executor<'c> for &'c mut AnyConnection {
) -> BoxStream<'e, Result<Either<AnyQueryResult, AnyRow>, Error>>
where
'c: 'e,
E: Execute<'q, Self::Database>,
E: Execute<'q, Any>,
{
let arguments = query.take_arguments();
let query = query.sql();
match &mut self.0 {
#[cfg(feature = "postgres")]
AnyConnectionKind::Postgres(conn) => conn
.fetch_many((query, arguments.map(Into::into)))
.map_ok(|v| v.map_right(Into::into).map_left(Into::into))
.boxed(),
#[cfg(feature = "mysql")]
AnyConnectionKind::MySql(conn) => conn
.fetch_many((query, arguments.map(Into::into)))
.map_ok(|v| v.map_right(Into::into).map_left(Into::into))
.boxed(),
#[cfg(feature = "sqlite")]
AnyConnectionKind::Sqlite(conn) => conn
.fetch_many((query, arguments.map(Into::into)))
.map_ok(|v| v.map_right(Into::into).map_left(Into::into))
.boxed(),
#[cfg(feature = "mssql")]
AnyConnectionKind::Mssql(conn) => conn
.fetch_many((query, arguments.map(Into::into)))
.map_ok(|v| v.map_right(Into::into).map_left(Into::into))
.boxed(),
}
self.backend.fetch_many(query.sql(), arguments)
}
fn fetch_optional<'e, 'q: 'e, E: 'q>(
@ -61,61 +34,18 @@ impl<'c> Executor<'c> for &'c mut AnyConnection {
E: Execute<'q, Self::Database>,
{
let arguments = query.take_arguments();
let query = query.sql();
Box::pin(async move {
Ok(match &mut self.0 {
#[cfg(feature = "postgres")]
AnyConnectionKind::Postgres(conn) => conn
.fetch_optional((query, arguments.map(Into::into)))
.await?
.map(Into::into),
#[cfg(feature = "mysql")]
AnyConnectionKind::MySql(conn) => conn
.fetch_optional((query, arguments.map(Into::into)))
.await?
.map(Into::into),
#[cfg(feature = "sqlite")]
AnyConnectionKind::Sqlite(conn) => conn
.fetch_optional((query, arguments.map(Into::into)))
.await?
.map(Into::into),
#[cfg(feature = "mssql")]
AnyConnectionKind::Mssql(conn) => conn
.fetch_optional((query, arguments.map(Into::into)))
.await?
.map(Into::into),
})
})
self.backend.fetch_optional(query.sql(), arguments)
}
fn prepare_with<'e, 'q: 'e>(
self,
sql: &'q str,
_parameters: &[AnyTypeInfo],
parameters: &[AnyTypeInfo],
) -> BoxFuture<'e, Result<AnyStatement<'q>, Error>>
where
'c: 'e,
{
Box::pin(async move {
Ok(match &mut self.0 {
// To match other databases here, we explicitly ignore the parameter types
#[cfg(feature = "postgres")]
AnyConnectionKind::Postgres(conn) => conn.prepare(sql).await.map(Into::into)?,
#[cfg(feature = "mysql")]
AnyConnectionKind::MySql(conn) => conn.prepare(sql).await.map(Into::into)?,
#[cfg(feature = "sqlite")]
AnyConnectionKind::Sqlite(conn) => conn.prepare(sql).await.map(Into::into)?,
#[cfg(feature = "mssql")]
AnyConnectionKind::Mssql(conn) => conn.prepare(sql).await.map(Into::into)?,
})
})
self.backend.prepare_with(sql, parameters)
}
fn describe<'e, 'q: 'e>(
@ -125,21 +55,7 @@ impl<'c> Executor<'c> for &'c mut AnyConnection {
where
'c: 'e,
{
Box::pin(async move {
Ok(match &mut self.0 {
#[cfg(feature = "postgres")]
AnyConnectionKind::Postgres(conn) => conn.describe(sql).await.map(map_describe)?,
#[cfg(feature = "mysql")]
AnyConnectionKind::MySql(conn) => conn.describe(sql).await.map(map_describe)?,
#[cfg(feature = "sqlite")]
AnyConnectionKind::Sqlite(conn) => conn.describe(sql).await.map(map_describe)?,
#[cfg(feature = "mssql")]
AnyConnectionKind::Mssql(conn) => conn.describe(sql).await.map(map_describe)?,
})
})
self.backend.describe(sql)
}
}

View file

@ -1,22 +1,17 @@
use futures_core::future::BoxFuture;
use std::marker::PhantomData;
use url::Url;
use crate::any::{Any, AnyConnectOptions, AnyKind};
use crate::connection::Connection;
use crate::connection::{ConnectOptions, Connection};
use crate::error::Error;
#[cfg(feature = "postgres")]
use crate::postgres;
use crate::database::Database;
pub use backend::AnyConnectionBackend;
#[cfg(feature = "sqlite")]
use crate::sqlite;
#[cfg(feature = "mssql")]
use crate::mssql;
#[cfg(feature = "mysql")]
use crate::mysql;
use crate::transaction::Transaction;
mod backend;
mod establish;
mod executor;
@ -30,89 +25,41 @@ mod executor;
/// sqlite://a.sqlite
/// ```
#[derive(Debug)]
pub struct AnyConnection(pub(super) AnyConnectionKind);
#[derive(Debug)]
// Used internally in `sqlx-macros`
#[doc(hidden)]
pub enum AnyConnectionKind {
#[cfg(feature = "postgres")]
Postgres(postgres::PgConnection),
#[cfg(feature = "mssql")]
Mssql(mssql::MssqlConnection),
#[cfg(feature = "mysql")]
MySql(mysql::MySqlConnection),
#[cfg(feature = "sqlite")]
Sqlite(sqlite::SqliteConnection),
}
impl AnyConnectionKind {
pub fn kind(&self) -> AnyKind {
match self {
#[cfg(feature = "postgres")]
AnyConnectionKind::Postgres(_) => AnyKind::Postgres,
#[cfg(feature = "mysql")]
AnyConnectionKind::MySql(_) => AnyKind::MySql,
#[cfg(feature = "sqlite")]
AnyConnectionKind::Sqlite(_) => AnyKind::Sqlite,
#[cfg(feature = "mssql")]
AnyConnectionKind::Mssql(_) => AnyKind::Mssql,
}
}
pub struct AnyConnection {
pub(crate) backend: Box<dyn AnyConnectionBackend>,
}
impl AnyConnection {
pub fn kind(&self) -> AnyKind {
self.0.kind()
/// Returns the name of the database backend in use (e.g. PostgreSQL, MySQL, SQLite, etc.)
pub fn backend_name(&self) -> &str {
self.backend.name()
}
// Used internally in `sqlx-macros`
#[doc(hidden)]
pub fn private_get_mut(&mut self) -> &mut AnyConnectionKind {
&mut self.0
pub(crate) fn connect<DB: Database>(
options: &AnyConnectOptions,
) -> BoxFuture<'_, crate::Result<Self>>
where
DB::Connection: AnyConnectionBackend,
<DB::Connection as Connection>::Options:
for<'a> TryFrom<&'a AnyConnectOptions, Error = Error>,
{
let res = TryFrom::try_from(options);
Box::pin(async {
let options: <DB::Connection as Connection>::Options = res?;
Ok(AnyConnection {
backend: Box::new(options.connect().await?),
})
})
}
}
macro_rules! delegate_to {
($self:ident.$method:ident($($arg:ident),*)) => {
match &$self.0 {
#[cfg(feature = "postgres")]
AnyConnectionKind::Postgres(conn) => conn.$method($($arg),*),
#[cfg(feature = "mysql")]
AnyConnectionKind::MySql(conn) => conn.$method($($arg),*),
#[cfg(feature = "sqlite")]
AnyConnectionKind::Sqlite(conn) => conn.$method($($arg),*),
#[cfg(feature = "mssql")]
AnyConnectionKind::Mssql(conn) => conn.$method($($arg),*),
}
};
}
macro_rules! delegate_to_mut {
($self:ident.$method:ident($($arg:ident),*)) => {
match &mut $self.0 {
#[cfg(feature = "postgres")]
AnyConnectionKind::Postgres(conn) => conn.$method($($arg),*),
#[cfg(feature = "mysql")]
AnyConnectionKind::MySql(conn) => conn.$method($($arg),*),
#[cfg(feature = "sqlite")]
AnyConnectionKind::Sqlite(conn) => conn.$method($($arg),*),
#[cfg(feature = "mssql")]
AnyConnectionKind::Mssql(conn) => conn.$method($($arg),*),
}
};
#[cfg(feature = "migrate")]
pub(crate) fn get_migrate(
&mut self,
) -> crate::Result<&mut (dyn crate::migrate::Migrate + Send + 'static)> {
self.backend.as_migrate()
}
}
impl Connection for AnyConnection {
@ -121,39 +68,15 @@ impl Connection for AnyConnection {
type Options = AnyConnectOptions;
fn close(self) -> BoxFuture<'static, Result<(), Error>> {
match self.0 {
#[cfg(feature = "postgres")]
AnyConnectionKind::Postgres(conn) => conn.close(),
#[cfg(feature = "mysql")]
AnyConnectionKind::MySql(conn) => conn.close(),
#[cfg(feature = "sqlite")]
AnyConnectionKind::Sqlite(conn) => conn.close(),
#[cfg(feature = "mssql")]
AnyConnectionKind::Mssql(conn) => conn.close(),
}
self.backend.close()
}
fn close_hard(self) -> BoxFuture<'static, Result<(), Error>> {
match self.0 {
#[cfg(feature = "postgres")]
AnyConnectionKind::Postgres(conn) => conn.close_hard(),
#[cfg(feature = "mysql")]
AnyConnectionKind::MySql(conn) => conn.close_hard(),
#[cfg(feature = "sqlite")]
AnyConnectionKind::Sqlite(conn) => conn.close_hard(),
#[cfg(feature = "mssql")]
AnyConnectionKind::Mssql(conn) => conn.close_hard(),
}
self.backend.close()
}
fn ping(&mut self) -> BoxFuture<'_, Result<(), Error>> {
delegate_to_mut!(self.ping())
self.backend.ping()
}
fn begin(&mut self) -> BoxFuture<'_, Result<Transaction<'_, Self::Database>, Error>>
@ -164,74 +87,20 @@ impl Connection for AnyConnection {
}
fn cached_statements_size(&self) -> usize {
match &self.0 {
#[cfg(feature = "postgres")]
AnyConnectionKind::Postgres(conn) => conn.cached_statements_size(),
#[cfg(feature = "mysql")]
AnyConnectionKind::MySql(conn) => conn.cached_statements_size(),
#[cfg(feature = "sqlite")]
AnyConnectionKind::Sqlite(conn) => conn.cached_statements_size(),
// no cache
#[cfg(feature = "mssql")]
AnyConnectionKind::Mssql(_) => 0,
}
self.backend.cached_statements_size()
}
fn clear_cached_statements(&mut self) -> BoxFuture<'_, Result<(), Error>> {
match &mut self.0 {
#[cfg(feature = "postgres")]
AnyConnectionKind::Postgres(conn) => conn.clear_cached_statements(),
#[cfg(feature = "mysql")]
AnyConnectionKind::MySql(conn) => conn.clear_cached_statements(),
#[cfg(feature = "sqlite")]
AnyConnectionKind::Sqlite(conn) => conn.clear_cached_statements(),
// no cache
#[cfg(feature = "mssql")]
AnyConnectionKind::Mssql(_) => Box::pin(futures_util::future::ok(())),
}
fn clear_cached_statements(&mut self) -> BoxFuture<'_, crate::Result<()>> {
self.backend.clear_cached_statements()
}
#[doc(hidden)]
fn flush(&mut self) -> BoxFuture<'_, Result<(), Error>> {
delegate_to_mut!(self.flush())
self.backend.flush()
}
#[doc(hidden)]
fn should_flush(&self) -> bool {
delegate_to!(self.should_flush())
}
}
#[cfg(feature = "postgres")]
impl From<postgres::PgConnection> for AnyConnection {
fn from(conn: postgres::PgConnection) -> Self {
AnyConnection(AnyConnectionKind::Postgres(conn))
}
}
#[cfg(feature = "mssql")]
impl From<mssql::MssqlConnection> for AnyConnection {
fn from(conn: mssql::MssqlConnection) -> Self {
AnyConnection(AnyConnectionKind::Mssql(conn))
}
}
#[cfg(feature = "mysql")]
impl From<mysql::MySqlConnection> for AnyConnection {
fn from(conn: mysql::MySqlConnection) -> Self {
AnyConnection(AnyConnectionKind::MySql(conn))
}
}
#[cfg(feature = "sqlite")]
impl From<sqlite::SqliteConnection> for AnyConnection {
fn from(conn: sqlite::SqliteConnection) -> Self {
AnyConnection(AnyConnectionKind::Sqlite(conn))
self.backend.should_flush()
}
}

View file

@ -3,6 +3,7 @@ use crate::any::{
AnyStatement, AnyTransactionManager, AnyTypeInfo, AnyValue, AnyValueRef,
};
use crate::database::{Database, HasArguments, HasStatement, HasStatementCache, HasValueRef};
use std::marker::PhantomData;
/// Opaque database driver. Capable of being used in place of any SQLx database driver. The actual
/// driver used will be selected at runtime, from the connection url.
@ -23,6 +24,9 @@ impl Database for Any {
type TypeInfo = AnyTypeInfo;
type Value = AnyValue;
const NAME: &'static str = "Any";
const URL_SCHEMES: &'static [&'static str] = &[];
}
impl<'r> HasValueRef<'r> for Any {

View file

@ -1,363 +0,0 @@
use crate::decode::Decode;
use crate::types::Type;
#[cfg(feature = "postgres")]
use crate::postgres::Postgres;
#[cfg(feature = "mysql")]
use crate::mysql::MySql;
#[cfg(feature = "mssql")]
use crate::mssql::Mssql;
#[cfg(feature = "sqlite")]
use crate::sqlite::Sqlite;
// Implements Decode for any T where T supports Decode for any database that has support currently
// compiled into SQLx
macro_rules! impl_any_decode {
($ty:ty) => {
impl<'r> crate::decode::Decode<'r, crate::any::Any> for $ty
where
$ty: crate::any::AnyDecode<'r>,
{
fn decode(
value: crate::any::AnyValueRef<'r>,
) -> Result<Self, crate::error::BoxDynError> {
match value.kind {
#[cfg(feature = "mysql")]
crate::any::value::AnyValueRefKind::MySql(value) => {
<$ty as crate::decode::Decode<'r, crate::mysql::MySql>>::decode(value)
}
#[cfg(feature = "sqlite")]
crate::any::value::AnyValueRefKind::Sqlite(value) => {
<$ty as crate::decode::Decode<'r, crate::sqlite::Sqlite>>::decode(value)
}
#[cfg(feature = "mssql")]
crate::any::value::AnyValueRefKind::Mssql(value) => {
<$ty as crate::decode::Decode<'r, crate::mssql::Mssql>>::decode(value)
}
#[cfg(feature = "postgres")]
crate::any::value::AnyValueRefKind::Postgres(value) => {
<$ty as crate::decode::Decode<'r, crate::postgres::Postgres>>::decode(value)
}
}
}
}
};
}
// FIXME: Find a nice way to auto-generate the below or petition Rust to add support for #[cfg]
// to trait bounds
// all 4
#[cfg(all(
feature = "postgres",
feature = "mysql",
feature = "mssql",
feature = "sqlite"
))]
pub trait AnyDecode<'r>:
Decode<'r, Postgres>
+ Type<Postgres>
+ Decode<'r, MySql>
+ Type<MySql>
+ Decode<'r, Mssql>
+ Type<Mssql>
+ Decode<'r, Sqlite>
+ Type<Sqlite>
{
}
#[cfg(all(
feature = "postgres",
feature = "mysql",
feature = "mssql",
feature = "sqlite"
))]
impl<'r, T> AnyDecode<'r> for T where
T: Decode<'r, Postgres>
+ Type<Postgres>
+ Decode<'r, MySql>
+ Type<MySql>
+ Decode<'r, Mssql>
+ Type<Mssql>
+ Decode<'r, Sqlite>
+ Type<Sqlite>
{
}
// only 3 (4)
#[cfg(all(
not(feature = "mssql"),
all(feature = "postgres", feature = "mysql", feature = "sqlite")
))]
pub trait AnyDecode<'r>:
Decode<'r, Postgres>
+ Type<Postgres>
+ Decode<'r, MySql>
+ Type<MySql>
+ Decode<'r, Sqlite>
+ Type<Sqlite>
{
}
#[cfg(all(
not(feature = "mssql"),
all(feature = "postgres", feature = "mysql", feature = "sqlite")
))]
impl<'r, T> AnyDecode<'r> for T where
T: Decode<'r, Postgres>
+ Type<Postgres>
+ Decode<'r, MySql>
+ Type<MySql>
+ Decode<'r, Sqlite>
+ Type<Sqlite>
{
}
#[cfg(all(
not(feature = "mysql"),
all(feature = "postgres", feature = "mssql", feature = "sqlite")
))]
pub trait AnyDecode<'r>:
Decode<'r, Postgres>
+ Type<Postgres>
+ Decode<'r, Mssql>
+ Type<Mssql>
+ Decode<'r, Sqlite>
+ Type<Sqlite>
{
}
#[cfg(all(
not(feature = "mysql"),
all(feature = "postgres", feature = "mssql", feature = "sqlite")
))]
impl<'r, T> AnyDecode<'r> for T where
T: Decode<'r, Postgres>
+ Type<Postgres>
+ Decode<'r, Mssql>
+ Type<Mssql>
+ Decode<'r, Sqlite>
+ Type<Sqlite>
{
}
#[cfg(all(
not(feature = "sqlite"),
all(feature = "postgres", feature = "mysql", feature = "mssql")
))]
pub trait AnyDecode<'r>:
Decode<'r, Postgres>
+ Type<Postgres>
+ Decode<'r, MySql>
+ Type<MySql>
+ Decode<'r, Mssql>
+ Type<Mssql>
{
}
#[cfg(all(
not(feature = "sqlite"),
all(feature = "postgres", feature = "mysql", feature = "mssql")
))]
impl<'r, T> AnyDecode<'r> for T where
T: Decode<'r, Postgres>
+ Type<Postgres>
+ Decode<'r, MySql>
+ Type<MySql>
+ Decode<'r, Mssql>
+ Type<Mssql>
{
}
#[cfg(all(
not(feature = "postgres"),
all(feature = "sqlite", feature = "mysql", feature = "mssql")
))]
pub trait AnyDecode<'r>:
Decode<'r, Sqlite>
+ Type<Sqlite>
+ Decode<'r, MySql>
+ Type<MySql>
+ Decode<'r, Mssql>
+ Type<Mssql>
{
}
#[cfg(all(
not(feature = "postgres"),
all(feature = "sqlite", feature = "mysql", feature = "mssql")
))]
impl<'r, T> AnyDecode<'r> for T where
T: Decode<'r, Sqlite>
+ Type<Sqlite>
+ Decode<'r, MySql>
+ Type<MySql>
+ Decode<'r, Mssql>
+ Type<Mssql>
{
}
// only 2 (6)
#[cfg(all(
not(any(feature = "mssql", feature = "sqlite")),
all(feature = "postgres", feature = "mysql")
))]
pub trait AnyDecode<'r>:
Decode<'r, Postgres> + Type<Postgres> + Decode<'r, MySql> + Type<MySql>
{
}
#[cfg(all(
not(any(feature = "mssql", feature = "sqlite")),
all(feature = "postgres", feature = "mysql")
))]
impl<'r, T> AnyDecode<'r> for T where
T: Decode<'r, Postgres> + Type<Postgres> + Decode<'r, MySql> + Type<MySql>
{
}
#[cfg(all(
not(any(feature = "mysql", feature = "sqlite")),
all(feature = "postgres", feature = "mssql")
))]
pub trait AnyDecode<'r>:
Decode<'r, Postgres> + Type<Postgres> + Decode<'r, Mssql> + Type<Mssql>
{
}
#[cfg(all(
not(any(feature = "mysql", feature = "sqlite")),
all(feature = "postgres", feature = "mssql")
))]
impl<'r, T> AnyDecode<'r> for T where
T: Decode<'r, Postgres> + Type<Postgres> + Decode<'r, Mssql> + Type<Mssql>
{
}
#[cfg(all(
not(any(feature = "mysql", feature = "mssql")),
all(feature = "postgres", feature = "sqlite")
))]
pub trait AnyDecode<'r>:
Decode<'r, Postgres> + Type<Postgres> + Decode<'r, Sqlite> + Type<Sqlite>
{
}
#[cfg(all(
not(any(feature = "mysql", feature = "mssql")),
all(feature = "postgres", feature = "sqlite")
))]
impl<'r, T> AnyDecode<'r> for T where
T: Decode<'r, Postgres> + Type<Postgres> + Decode<'r, Sqlite> + Type<Sqlite>
{
}
#[cfg(all(
not(any(feature = "postgres", feature = "sqlite")),
all(feature = "mssql", feature = "mysql")
))]
pub trait AnyDecode<'r>: Decode<'r, Mssql> + Type<Mssql> + Decode<'r, MySql> + Type<MySql> {}
#[cfg(all(
not(any(feature = "postgres", feature = "sqlite")),
all(feature = "mssql", feature = "mysql")
))]
impl<'r, T> AnyDecode<'r> for T where
T: Decode<'r, Mssql> + Type<Mssql> + Decode<'r, MySql> + Type<MySql>
{
}
#[cfg(all(
not(any(feature = "postgres", feature = "mysql")),
all(feature = "mssql", feature = "sqlite")
))]
pub trait AnyDecode<'r>:
Decode<'r, Mssql> + Type<Mssql> + Decode<'r, Sqlite> + Type<Sqlite>
{
}
#[cfg(all(
not(any(feature = "postgres", feature = "mysql")),
all(feature = "mssql", feature = "sqlite")
))]
impl<'r, T> AnyDecode<'r> for T where
T: Decode<'r, Mssql> + Type<Mssql> + Decode<'r, Sqlite> + Type<Sqlite>
{
}
#[cfg(all(
not(any(feature = "postgres", feature = "mssql")),
all(feature = "mysql", feature = "sqlite")
))]
pub trait AnyDecode<'r>:
Decode<'r, MySql> + Type<MySql> + Decode<'r, Sqlite> + Type<Sqlite>
{
}
#[cfg(all(
not(any(feature = "postgres", feature = "mssql")),
all(feature = "mysql", feature = "sqlite")
))]
impl<'r, T> AnyDecode<'r> for T where
T: Decode<'r, MySql> + Type<MySql> + Decode<'r, Sqlite> + Type<Sqlite>
{
}
// only 1 (4)
#[cfg(all(
not(any(feature = "mysql", feature = "mssql", feature = "sqlite")),
feature = "postgres"
))]
pub trait AnyDecode<'r>: Decode<'r, Postgres> + Type<Postgres> {}
#[cfg(all(
not(any(feature = "mysql", feature = "mssql", feature = "sqlite")),
feature = "postgres"
))]
impl<'r, T> AnyDecode<'r> for T where T: Decode<'r, Postgres> + Type<Postgres> {}
#[cfg(all(
not(any(feature = "postgres", feature = "mssql", feature = "sqlite")),
feature = "mysql"
))]
pub trait AnyDecode<'r>: Decode<'r, MySql> + Type<MySql> {}
#[cfg(all(
not(any(feature = "postgres", feature = "mssql", feature = "sqlite")),
feature = "mysql"
))]
impl<'r, T> AnyDecode<'r> for T where T: Decode<'r, MySql> + Type<MySql> {}
#[cfg(all(
not(any(feature = "mysql", feature = "postgres", feature = "sqlite")),
feature = "mssql"
))]
pub trait AnyDecode<'r>: Decode<'r, Mssql> + Type<Mssql> {}
#[cfg(all(
not(any(feature = "mysql", feature = "postgres", feature = "sqlite")),
feature = "mssql"
))]
impl<'r, T> AnyDecode<'r> for T where T: Decode<'r, Mssql> + Type<Mssql> {}
#[cfg(all(
not(any(feature = "mysql", feature = "mssql", feature = "postgres")),
feature = "sqlite"
))]
pub trait AnyDecode<'r>: Decode<'r, Sqlite> + Type<Sqlite> {}
#[cfg(all(
not(any(feature = "mysql", feature = "mssql", feature = "postgres")),
feature = "sqlite"
))]
impl<'r, T> AnyDecode<'r> for T where T: Decode<'r, Sqlite> + Type<Sqlite> {}

153
sqlx-core/src/any/driver.rs Normal file
View file

@ -0,0 +1,153 @@
use crate::any::connection::AnyConnectionBackend;
use crate::any::{
Any, AnyArguments, AnyConnectOptions, AnyConnection, AnyQueryResult, AnyRow, AnyStatement,
AnyTypeInfo,
};
use crate::common::DebugFn;
use crate::connection::Connection;
use crate::database::Database;
use crate::describe::Describe;
use crate::error::BoxDynError;
use crate::transaction::Transaction;
use crate::Error;
use either::Either;
use futures_core::future::BoxFuture;
use futures_core::stream::BoxStream;
use once_cell::sync::OnceCell;
use std::fmt::{Debug, Formatter};
use std::marker::PhantomData;
use url::Url;
static DRIVERS: OnceCell<&'static [AnyDriver]> = OnceCell::new();
#[macro_export]
macro_rules! declare_driver_with_optional_migrate {
($name:ident = $db:path) => {
#[cfg(feature = "migrate")]
pub const $name: $crate::any::driver::AnyDriver =
$crate::any::driver::AnyDriver::with_migrate::<$db>();
#[cfg(not(feature = "migrate"))]
pub const $name: $crate::any::driver::AnyDriver =
$crate::any::driver::AnyDriver::without_migrate::<$db>();
};
}
#[non_exhaustive]
pub struct AnyDriver {
pub(crate) name: &'static str,
pub(crate) url_schemes: &'static [&'static str],
pub(crate) connect:
DebugFn<fn(&AnyConnectOptions) -> BoxFuture<'_, crate::Result<AnyConnection>>>,
pub(crate) migrate_database: Option<AnyMigrateDatabase>,
}
impl AnyDriver {
pub const fn without_migrate<DB: Database>() -> Self
where
DB::Connection: AnyConnectionBackend,
<DB::Connection as Connection>::Options:
for<'a> TryFrom<&'a AnyConnectOptions, Error = Error>,
{
Self {
name: DB::NAME,
url_schemes: DB::URL_SCHEMES,
connect: DebugFn(AnyConnection::connect::<DB>),
migrate_database: None,
}
}
#[cfg(not(feature = "migrate"))]
pub const fn with_migrate<DB: Database>() -> Self
where
DB::Connection: AnyConnectionBackend,
<DB::Connection as Connection>::Options:
for<'a> TryFrom<&'a AnyConnectOptions, Error = Error>,
{
Self::without_migrate::<DB>()
}
#[cfg(feature = "migrate")]
pub const fn with_migrate<DB: Database + crate::migrate::MigrateDatabase>() -> Self
where
DB::Connection: AnyConnectionBackend,
<DB::Connection as Connection>::Options:
for<'a> TryFrom<&'a AnyConnectOptions, Error = Error>,
{
Self {
migrate_database: Some(AnyMigrateDatabase {
create_database: DebugFn(DB::create_database),
database_exists: DebugFn(DB::database_exists),
drop_database: DebugFn(DB::drop_database),
}),
..Self::without_migrate::<DB>()
}
}
pub fn get_migrate_database(&self) -> crate::Result<&AnyMigrateDatabase> {
self.migrate_database.as_ref()
.ok_or_else(|| Error::Configuration(format!("{} driver does not support migrations or the `migrate` feature was not enabled for it", self.name).into()))
}
}
impl Debug for AnyDriver {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
f.debug_struct("AnyDriver")
.field("name", &self.name)
.field("url_schemes", &self.url_schemes)
.finish()
}
}
pub struct AnyMigrateDatabase {
create_database: DebugFn<fn(&str) -> BoxFuture<'_, crate::Result<()>>>,
database_exists: DebugFn<fn(&str) -> BoxFuture<'_, crate::Result<bool>>>,
drop_database: DebugFn<fn(&str) -> BoxFuture<'_, crate::Result<()>>>,
}
impl AnyMigrateDatabase {
pub fn create_database<'a>(&self, url: &'a str) -> BoxFuture<'a, crate::Result<()>> {
(self.create_database)(url)
}
pub fn database_exists<'a>(&self, url: &'a str) -> BoxFuture<'a, crate::Result<bool>> {
(self.database_exists)(url)
}
pub fn drop_database<'a>(&self, url: &'a str) -> BoxFuture<'a, crate::Result<()>> {
(self.drop_database)(url)
}
}
/// Install the list of drivers for [`AnyConnection`] to use.
///
/// Must be called before an `AnyConnection` or `AnyPool` can be connected.
///
/// ### Errors
/// If called more than once.
pub fn install_drivers(
drivers: &'static [AnyDriver],
) -> Result<(), Box<dyn std::error::Error + Send + Sync + 'static>> {
DRIVERS
.set(drivers)
.map_err(|_| "drivers already installed".into())
}
pub(crate) fn from_url_str(url: &str) -> crate::Result<&'static AnyDriver> {
from_url(&url.parse().map_err(Error::config)?)
}
pub(crate) fn from_url(url: &Url) -> crate::Result<&'static AnyDriver> {
let scheme = url.scheme();
let drivers: &[AnyDriver] = DRIVERS
.get()
.expect("No drivers installed. Please see the documentation in `sqlx::any` for details.");
drivers
.iter()
.find(|driver| driver.url_schemes.contains(&url.scheme()))
.ok_or_else(|| {
Error::Configuration(format!("no driver found for URL scheme {:?}", scheme).into())
})
}

View file

@ -1,361 +0,0 @@
use crate::encode::Encode;
use crate::types::Type;
#[cfg(feature = "postgres")]
use crate::postgres::Postgres;
#[cfg(feature = "mysql")]
use crate::mysql::MySql;
#[cfg(feature = "mssql")]
use crate::mssql::Mssql;
#[cfg(feature = "sqlite")]
use crate::sqlite::Sqlite;
// Implements Encode for any T where T supports Encode for any database that has support currently
// compiled into SQLx
macro_rules! impl_any_encode {
($ty:ty) => {
impl<'q> crate::encode::Encode<'q, crate::any::Any> for $ty
where
$ty: crate::any::AnyEncode<'q>,
{
fn encode_by_ref(
&self,
buf: &mut crate::any::AnyArgumentBuffer<'q>,
) -> crate::encode::IsNull {
match &mut buf.0 {
#[cfg(feature = "postgres")]
crate::any::arguments::AnyArgumentBufferKind::Postgres(args, _) => {
args.add(self)
}
#[cfg(feature = "mysql")]
crate::any::arguments::AnyArgumentBufferKind::MySql(args, _) => args.add(self),
#[cfg(feature = "mssql")]
crate::any::arguments::AnyArgumentBufferKind::Mssql(args, _) => args.add(self),
#[cfg(feature = "sqlite")]
crate::any::arguments::AnyArgumentBufferKind::Sqlite(args) => args.add(self),
}
// unused
crate::encode::IsNull::No
}
}
};
}
// FIXME: Find a nice way to auto-generate the below or petition Rust to add support for #[cfg]
// to trait bounds
// all 4
#[cfg(all(
feature = "postgres",
feature = "mysql",
feature = "mssql",
feature = "sqlite"
))]
pub trait AnyEncode<'q>:
Encode<'q, Postgres>
+ Type<Postgres>
+ Encode<'q, MySql>
+ Type<MySql>
+ Encode<'q, Mssql>
+ Type<Mssql>
+ Encode<'q, Sqlite>
+ Type<Sqlite>
{
}
#[cfg(all(
feature = "postgres",
feature = "mysql",
feature = "mssql",
feature = "sqlite"
))]
impl<'q, T> AnyEncode<'q> for T where
T: Encode<'q, Postgres>
+ Type<Postgres>
+ Encode<'q, MySql>
+ Type<MySql>
+ Encode<'q, Mssql>
+ Type<Mssql>
+ Encode<'q, Sqlite>
+ Type<Sqlite>
{
}
// only 3 (4)
#[cfg(all(
not(feature = "mssql"),
all(feature = "postgres", feature = "mysql", feature = "sqlite")
))]
pub trait AnyEncode<'q>:
Encode<'q, Postgres>
+ Type<Postgres>
+ Encode<'q, MySql>
+ Type<MySql>
+ Encode<'q, Sqlite>
+ Type<Sqlite>
{
}
#[cfg(all(
not(feature = "mssql"),
all(feature = "postgres", feature = "mysql", feature = "sqlite")
))]
impl<'q, T> AnyEncode<'q> for T where
T: Encode<'q, Postgres>
+ Type<Postgres>
+ Encode<'q, MySql>
+ Type<MySql>
+ Encode<'q, Sqlite>
+ Type<Sqlite>
{
}
#[cfg(all(
not(feature = "mysql"),
all(feature = "postgres", feature = "mssql", feature = "sqlite")
))]
pub trait AnyEncode<'q>:
Encode<'q, Postgres>
+ Type<Postgres>
+ Encode<'q, Mssql>
+ Type<Mssql>
+ Encode<'q, Sqlite>
+ Type<Sqlite>
{
}
#[cfg(all(
not(feature = "mysql"),
all(feature = "postgres", feature = "mssql", feature = "sqlite")
))]
impl<'q, T> AnyEncode<'q> for T where
T: Encode<'q, Postgres>
+ Type<Postgres>
+ Encode<'q, Mssql>
+ Type<Mssql>
+ Encode<'q, Sqlite>
+ Type<Sqlite>
{
}
#[cfg(all(
not(feature = "sqlite"),
all(feature = "postgres", feature = "mysql", feature = "mssql")
))]
pub trait AnyEncode<'q>:
Encode<'q, Postgres>
+ Type<Postgres>
+ Encode<'q, MySql>
+ Type<MySql>
+ Encode<'q, Mssql>
+ Type<Mssql>
{
}
#[cfg(all(
not(feature = "sqlite"),
all(feature = "postgres", feature = "mysql", feature = "mssql")
))]
impl<'q, T> AnyEncode<'q> for T where
T: Encode<'q, Postgres>
+ Type<Postgres>
+ Encode<'q, MySql>
+ Type<MySql>
+ Encode<'q, Mssql>
+ Type<Mssql>
{
}
#[cfg(all(
not(feature = "postgres"),
all(feature = "sqlite", feature = "mysql", feature = "mssql")
))]
pub trait AnyEncode<'q>:
Encode<'q, Sqlite>
+ Type<Sqlite>
+ Encode<'q, MySql>
+ Type<MySql>
+ Encode<'q, Mssql>
+ Type<Mssql>
{
}
#[cfg(all(
not(feature = "postgres"),
all(feature = "sqlite", feature = "mysql", feature = "mssql")
))]
impl<'q, T> AnyEncode<'q> for T where
T: Encode<'q, Sqlite>
+ Type<Sqlite>
+ Encode<'q, MySql>
+ Type<MySql>
+ Encode<'q, Mssql>
+ Type<Mssql>
{
}
// only 2 (6)
#[cfg(all(
not(any(feature = "mssql", feature = "sqlite")),
all(feature = "postgres", feature = "mysql")
))]
pub trait AnyEncode<'q>:
Encode<'q, Postgres> + Type<Postgres> + Encode<'q, MySql> + Type<MySql>
{
}
#[cfg(all(
not(any(feature = "mssql", feature = "sqlite")),
all(feature = "postgres", feature = "mysql")
))]
impl<'q, T> AnyEncode<'q> for T where
T: Encode<'q, Postgres> + Type<Postgres> + Encode<'q, MySql> + Type<MySql>
{
}
#[cfg(all(
not(any(feature = "mysql", feature = "sqlite")),
all(feature = "postgres", feature = "mssql")
))]
pub trait AnyEncode<'q>:
Encode<'q, Postgres> + Type<Postgres> + Encode<'q, Mssql> + Type<Mssql>
{
}
#[cfg(all(
not(any(feature = "mysql", feature = "sqlite")),
all(feature = "postgres", feature = "mssql")
))]
impl<'q, T> AnyEncode<'q> for T where
T: Encode<'q, Postgres> + Type<Postgres> + Encode<'q, Mssql> + Type<Mssql>
{
}
#[cfg(all(
not(any(feature = "mysql", feature = "mssql")),
all(feature = "postgres", feature = "sqlite")
))]
pub trait AnyEncode<'q>:
Encode<'q, Postgres> + Type<Postgres> + Encode<'q, Sqlite> + Type<Sqlite>
{
}
#[cfg(all(
not(any(feature = "mysql", feature = "mssql")),
all(feature = "postgres", feature = "sqlite")
))]
impl<'q, T> AnyEncode<'q> for T where
T: Encode<'q, Postgres> + Type<Postgres> + Encode<'q, Sqlite> + Type<Sqlite>
{
}
#[cfg(all(
not(any(feature = "postgres", feature = "sqlite")),
all(feature = "mssql", feature = "mysql")
))]
pub trait AnyEncode<'q>: Encode<'q, Mssql> + Type<Mssql> + Encode<'q, MySql> + Type<MySql> {}
#[cfg(all(
not(any(feature = "postgres", feature = "sqlite")),
all(feature = "mssql", feature = "mysql")
))]
impl<'q, T> AnyEncode<'q> for T where
T: Encode<'q, Mssql> + Type<Mssql> + Encode<'q, MySql> + Type<MySql>
{
}
#[cfg(all(
not(any(feature = "postgres", feature = "mysql")),
all(feature = "mssql", feature = "sqlite")
))]
pub trait AnyEncode<'q>:
Encode<'q, Mssql> + Type<Mssql> + Encode<'q, Sqlite> + Type<Sqlite>
{
}
#[cfg(all(
not(any(feature = "postgres", feature = "mysql")),
all(feature = "mssql", feature = "sqlite")
))]
impl<'q, T> AnyEncode<'q> for T where
T: Encode<'q, Mssql> + Type<Mssql> + Encode<'q, Sqlite> + Type<Sqlite>
{
}
#[cfg(all(
not(any(feature = "postgres", feature = "mssql")),
all(feature = "mysql", feature = "sqlite")
))]
pub trait AnyEncode<'q>:
Encode<'q, MySql> + Type<MySql> + Encode<'q, Sqlite> + Type<Sqlite>
{
}
#[cfg(all(
not(any(feature = "postgres", feature = "mssql")),
all(feature = "mysql", feature = "sqlite")
))]
impl<'q, T> AnyEncode<'q> for T where
T: Encode<'q, MySql> + Type<MySql> + Encode<'q, Sqlite> + Type<Sqlite>
{
}
// only 1 (4)
#[cfg(all(
not(any(feature = "mysql", feature = "mssql", feature = "sqlite")),
feature = "postgres"
))]
pub trait AnyEncode<'q>: Encode<'q, Postgres> + Type<Postgres> {}
#[cfg(all(
not(any(feature = "mysql", feature = "mssql", feature = "sqlite")),
feature = "postgres"
))]
impl<'q, T> AnyEncode<'q> for T where T: Encode<'q, Postgres> + Type<Postgres> {}
#[cfg(all(
not(any(feature = "postgres", feature = "mssql", feature = "sqlite")),
feature = "mysql"
))]
pub trait AnyEncode<'q>: Encode<'q, MySql> + Type<MySql> {}
#[cfg(all(
not(any(feature = "postgres", feature = "mssql", feature = "sqlite")),
feature = "mysql"
))]
impl<'q, T> AnyEncode<'q> for T where T: Encode<'q, MySql> + Type<MySql> {}
#[cfg(all(
not(any(feature = "mysql", feature = "postgres", feature = "sqlite")),
feature = "mssql"
))]
pub trait AnyEncode<'q>: Encode<'q, Mssql> + Type<Mssql> {}
#[cfg(all(
not(any(feature = "mysql", feature = "postgres", feature = "sqlite")),
feature = "mssql"
))]
impl<'q, T> AnyEncode<'q> for T where T: Encode<'q, Mssql> + Type<Mssql> {}
#[cfg(all(
not(any(feature = "mysql", feature = "mssql", feature = "postgres")),
feature = "sqlite"
))]
pub trait AnyEncode<'q>: Encode<'q, Sqlite> + Type<Sqlite> {}
#[cfg(all(
not(any(feature = "mysql", feature = "mssql", feature = "postgres")),
feature = "sqlite"
))]
impl<'q, T> AnyEncode<'q> for T where T: Encode<'q, Sqlite> + Type<Sqlite> {}

View file

@ -1,4 +1,4 @@
use crate::any::connection::AnyConnectionKind;
use crate::any::driver;
use crate::any::kind::AnyKind;
use crate::any::{Any, AnyConnection};
use crate::error::Error;
@ -9,224 +9,67 @@ use std::time::Duration;
impl MigrateDatabase for Any {
fn create_database(url: &str) -> BoxFuture<'_, Result<(), Error>> {
Box::pin(async move {
match AnyKind::from_str(url)? {
#[cfg(feature = "postgres")]
AnyKind::Postgres => crate::postgres::Postgres::create_database(url).await,
#[cfg(feature = "sqlite")]
AnyKind::Sqlite => crate::sqlite::Sqlite::create_database(url).await,
#[cfg(feature = "mysql")]
AnyKind::MySql => crate::mysql::MySql::create_database(url).await,
#[cfg(feature = "mssql")]
AnyKind::Mssql => unimplemented!(),
}
Box::pin(async {
driver::from_url_str(url)?
.get_migrate_database()?
.create_database(url)
.await
})
}
fn database_exists(url: &str) -> BoxFuture<'_, Result<bool, Error>> {
Box::pin(async move {
match AnyKind::from_str(url)? {
#[cfg(feature = "postgres")]
AnyKind::Postgres => crate::postgres::Postgres::database_exists(url).await,
#[cfg(feature = "sqlite")]
AnyKind::Sqlite => crate::sqlite::Sqlite::database_exists(url).await,
#[cfg(feature = "mysql")]
AnyKind::MySql => crate::mysql::MySql::database_exists(url).await,
#[cfg(feature = "mssql")]
AnyKind::Mssql => unimplemented!(),
}
Box::pin(async {
driver::from_url_str(url)?
.get_migrate_database()?
.database_exists(url)
.await
})
}
fn drop_database(url: &str) -> BoxFuture<'_, Result<(), Error>> {
Box::pin(async move {
match AnyKind::from_str(url)? {
#[cfg(feature = "postgres")]
AnyKind::Postgres => crate::postgres::Postgres::drop_database(url).await,
#[cfg(feature = "sqlite")]
AnyKind::Sqlite => crate::sqlite::Sqlite::drop_database(url).await,
#[cfg(feature = "mysql")]
AnyKind::MySql => crate::mysql::MySql::drop_database(url).await,
#[cfg(feature = "mssql")]
AnyKind::Mssql => unimplemented!(),
}
Box::pin(async {
driver::from_url_str(url)?
.get_migrate_database()?
.drop_database(url)
.await
})
}
}
impl Migrate for AnyConnection {
fn ensure_migrations_table(&mut self) -> BoxFuture<'_, Result<(), MigrateError>> {
match &mut self.0 {
#[cfg(feature = "postgres")]
AnyConnectionKind::Postgres(conn) => conn.ensure_migrations_table(),
#[cfg(feature = "sqlite")]
AnyConnectionKind::Sqlite(conn) => conn.ensure_migrations_table(),
#[cfg(feature = "mysql")]
AnyConnectionKind::MySql(conn) => conn.ensure_migrations_table(),
#[cfg(feature = "mssql")]
AnyConnectionKind::Mssql(_conn) => unimplemented!(),
}
}
#[allow(deprecated)]
fn version(&mut self) -> BoxFuture<'_, Result<Option<(i64, bool)>, MigrateError>> {
match &mut self.0 {
#[cfg(feature = "postgres")]
AnyConnectionKind::Postgres(conn) => conn.version(),
#[cfg(feature = "sqlite")]
AnyConnectionKind::Sqlite(conn) => conn.version(),
#[cfg(feature = "mysql")]
AnyConnectionKind::MySql(conn) => conn.version(),
#[cfg(feature = "mssql")]
AnyConnectionKind::Mssql(_conn) => unimplemented!(),
}
Box::pin(async { self.get_migrate()?.ensure_migrations_table().await })
}
fn dirty_version(&mut self) -> BoxFuture<'_, Result<Option<i64>, MigrateError>> {
match &mut self.0 {
#[cfg(feature = "postgres")]
AnyConnectionKind::Postgres(conn) => conn.dirty_version(),
#[cfg(feature = "sqlite")]
AnyConnectionKind::Sqlite(conn) => conn.dirty_version(),
#[cfg(feature = "mysql")]
AnyConnectionKind::MySql(conn) => conn.dirty_version(),
#[cfg(feature = "mssql")]
AnyConnectionKind::Mssql(_conn) => unimplemented!(),
}
}
#[allow(deprecated)]
fn validate<'e: 'm, 'm>(
&'e mut self,
migration: &'m Migration,
) -> BoxFuture<'m, Result<(), MigrateError>> {
match &mut self.0 {
#[cfg(feature = "postgres")]
AnyConnectionKind::Postgres(conn) => conn.validate(migration),
#[cfg(feature = "sqlite")]
AnyConnectionKind::Sqlite(conn) => conn.validate(migration),
#[cfg(feature = "mysql")]
AnyConnectionKind::MySql(conn) => conn.validate(migration),
#[cfg(feature = "mssql")]
AnyConnectionKind::Mssql(_conn) => {
let _ = migration;
unimplemented!()
}
}
Box::pin(async { self.get_migrate()?.dirty_version().await })
}
fn list_applied_migrations(
&mut self,
) -> BoxFuture<'_, Result<Vec<AppliedMigration>, MigrateError>> {
match &mut self.0 {
#[cfg(feature = "postgres")]
AnyConnectionKind::Postgres(conn) => conn.list_applied_migrations(),
#[cfg(feature = "sqlite")]
AnyConnectionKind::Sqlite(conn) => conn.list_applied_migrations(),
#[cfg(feature = "mysql")]
AnyConnectionKind::MySql(conn) => conn.list_applied_migrations(),
#[cfg(feature = "mssql")]
AnyConnectionKind::Mssql(_conn) => unimplemented!(),
}
Box::pin(async { self.get_migrate()?.list_applied_migrations().await })
}
fn lock(&mut self) -> BoxFuture<'_, Result<(), MigrateError>> {
match &mut self.0 {
#[cfg(feature = "postgres")]
AnyConnectionKind::Postgres(conn) => conn.lock(),
#[cfg(feature = "sqlite")]
AnyConnectionKind::Sqlite(conn) => conn.lock(),
#[cfg(feature = "mysql")]
AnyConnectionKind::MySql(conn) => conn.lock(),
#[cfg(feature = "mssql")]
AnyConnectionKind::Mssql(_conn) => unimplemented!(),
}
Box::pin(async { self.get_migrate()?.lock().await })
}
fn unlock(&mut self) -> BoxFuture<'_, Result<(), MigrateError>> {
match &mut self.0 {
#[cfg(feature = "postgres")]
AnyConnectionKind::Postgres(conn) => conn.unlock(),
#[cfg(feature = "sqlite")]
AnyConnectionKind::Sqlite(conn) => conn.unlock(),
#[cfg(feature = "mysql")]
AnyConnectionKind::MySql(conn) => conn.unlock(),
#[cfg(feature = "mssql")]
AnyConnectionKind::Mssql(_conn) => unimplemented!(),
}
Box::pin(async { self.get_migrate()?.unlock().await })
}
fn apply<'e: 'm, 'm>(
&'e mut self,
migration: &'m Migration,
) -> BoxFuture<'m, Result<Duration, MigrateError>> {
match &mut self.0 {
#[cfg(feature = "postgres")]
AnyConnectionKind::Postgres(conn) => conn.apply(migration),
#[cfg(feature = "sqlite")]
AnyConnectionKind::Sqlite(conn) => conn.apply(migration),
#[cfg(feature = "mysql")]
AnyConnectionKind::MySql(conn) => conn.apply(migration),
#[cfg(feature = "mssql")]
AnyConnectionKind::Mssql(_conn) => {
let _ = migration;
unimplemented!()
}
}
Box::pin(async { self.get_migrate()?.apply(migration).await })
}
fn revert<'e: 'm, 'm>(
&'e mut self,
migration: &'m Migration,
) -> BoxFuture<'m, Result<Duration, MigrateError>> {
match &mut self.0 {
#[cfg(feature = "postgres")]
AnyConnectionKind::Postgres(conn) => conn.revert(migration),
#[cfg(feature = "sqlite")]
AnyConnectionKind::Sqlite(conn) => conn.revert(migration),
#[cfg(feature = "mysql")]
AnyConnectionKind::MySql(conn) => conn.revert(migration),
#[cfg(feature = "mssql")]
AnyConnectionKind::Mssql(_conn) => {
let _ = migration;
unimplemented!()
}
}
Box::pin(async { self.get_migrate()?.revert(migration).await })
}
}

View file

@ -1,16 +1,10 @@
//! Generic database driver with the specific driver selected at runtime.
//! **SEE DOCUMENTATION BEFORE USE**. Generic database driver with the specific driver selected at runtime.
//!
//! The underlying database drivers are chosen at runtime from the list set via
//! [`install_drivers`][self::driver::install_drivers). Any use of `AnyConnection` or `AnyPool`
//! without this will panic.
use crate::executor::Executor;
#[macro_use]
mod decode;
#[macro_use]
mod encode;
#[macro_use]
mod r#type;
mod arguments;
pub(crate) mod column;
mod connection;
@ -26,27 +20,31 @@ pub(crate) mod type_info;
pub mod types;
pub(crate) mod value;
pub mod driver;
#[cfg(feature = "migrate")]
mod migrate;
pub use arguments::{AnyArgumentBuffer, AnyArguments};
pub use column::{AnyColumn, AnyColumnIndex};
pub use column::AnyColumn;
pub use connection::AnyConnection;
// Used internally in `sqlx-macros`
#[doc(hidden)]
pub use connection::AnyConnectionKind;
use crate::encode::Encode;
pub use connection::AnyConnectionBackend;
pub use database::Any;
pub use decode::AnyDecode;
pub use encode::AnyEncode;
pub use kind::AnyKind;
pub use options::AnyConnectOptions;
pub use query_result::AnyQueryResult;
pub use row::AnyRow;
pub use statement::AnyStatement;
pub use transaction::AnyTransactionManager;
pub use type_info::AnyTypeInfo;
pub use type_info::{AnyTypeInfo, AnyTypeInfoKind};
pub use value::{AnyValue, AnyValueRef};
#[doc(hidden)]
pub use value::AnyValueKind;
pub type AnyPool = crate::pool::Pool<Any>;
pub type AnyPoolOptions = crate::pool::PoolOptions<Any>;
@ -57,34 +55,25 @@ impl<'c, T: Executor<'c, Database = Any>> AnyExecutor<'c> for T {}
// NOTE: required due to the lack of lazy normalization
impl_into_arguments_for_arguments!(AnyArguments<'q>);
impl_executor_for_pool_connection!(Any, AnyConnection, AnyRow);
impl_executor_for_transaction!(Any, AnyRow);
// impl_executor_for_pool_connection!(Any, AnyConnection, AnyRow);
// impl_executor_for_transaction!(Any, AnyRow);
impl_acquire!(Any, AnyConnection);
impl_column_index_for_row!(AnyRow);
impl_column_index_for_statement!(AnyStatement);
impl_into_maybe_pool!(Any, AnyConnection);
// impl_into_maybe_pool!(Any, AnyConnection);
// required because some databases have a different handling of NULL
impl<'q, T> crate::encode::Encode<'q, Any> for Option<T>
impl<'q, T> Encode<'q, Any> for Option<T>
where
T: AnyEncode<'q> + 'q,
T: Encode<'q, Any> + 'q,
{
fn encode_by_ref(&self, buf: &mut AnyArgumentBuffer<'q>) -> crate::encode::IsNull {
match &mut buf.0 {
#[cfg(feature = "postgres")]
arguments::AnyArgumentBufferKind::Postgres(args, _) => args.add(self),
#[cfg(feature = "mysql")]
arguments::AnyArgumentBufferKind::MySql(args, _) => args.add(self),
#[cfg(feature = "mssql")]
arguments::AnyArgumentBufferKind::Mssql(args, _) => args.add(self),
#[cfg(feature = "sqlite")]
arguments::AnyArgumentBufferKind::Sqlite(args) => args.add(self),
if let Some(value) = self {
value.encode_by_ref(buf);
crate::encode::IsNull::No
} else {
buf.0.push(AnyValueKind::Null);
crate::encode::IsNull::Yes
}
// unused
crate::encode::IsNull::No
}
}

View file

@ -1,23 +1,13 @@
use crate::any::AnyConnection;
use crate::connection::ConnectOptions;
use crate::connection::{ConnectOptions, LogSettings};
use crate::error::Error;
use futures_core::future::BoxFuture;
use log::LevelFilter;
use std::str::FromStr;
use std::time::Duration;
#[cfg(feature = "postgres")]
use crate::postgres::PgConnectOptions;
#[cfg(feature = "mysql")]
use crate::mysql::MySqlConnectOptions;
#[cfg(feature = "sqlite")]
use crate::sqlite::SqliteConnectOptions;
use url::Url;
use crate::any::kind::AnyKind;
#[cfg(feature = "mssql")]
use crate::mssql::MssqlConnectOptions;
/// Opaque options for connecting to a database. These may only be constructed by parsing from
/// a connection url.
@ -27,210 +17,47 @@ use crate::mssql::MssqlConnectOptions;
/// mysql://root:password@localhost/database
/// ```
#[derive(Debug, Clone)]
pub struct AnyConnectOptions(pub(crate) AnyConnectOptionsKind);
impl AnyConnectOptions {
pub fn kind(&self) -> AnyKind {
match &self.0 {
#[cfg(feature = "postgres")]
AnyConnectOptionsKind::Postgres(_) => AnyKind::Postgres,
#[cfg(feature = "mysql")]
AnyConnectOptionsKind::MySql(_) => AnyKind::MySql,
#[cfg(feature = "sqlite")]
AnyConnectOptionsKind::Sqlite(_) => AnyKind::Sqlite,
#[cfg(feature = "mssql")]
AnyConnectOptionsKind::Mssql(_) => AnyKind::Mssql,
}
}
#[non_exhaustive]
pub struct AnyConnectOptions {
pub database_url: Url,
pub log_settings: LogSettings,
}
macro_rules! try_from_any_connect_options_to {
($to:ty, $kind:path, $name:expr) => {
impl TryFrom<AnyConnectOptions> for $to {
type Error = Error;
fn try_from(value: AnyConnectOptions) -> Result<Self, Self::Error> {
#[allow(irrefutable_let_patterns)]
if let $kind(connect_options) = value.0 {
Ok(connect_options)
} else {
Err(Error::Configuration(
format!("Not {} typed AnyConnectOptions", $name).into(),
))
}
}
}
impl AnyConnectOptions {
paste::item! {
pub fn [< as_ $name >] (&self) -> Option<&$to> {
#[allow(irrefutable_let_patterns)]
if let $kind(ref connect_options) = self.0 {
Some(connect_options)
} else {
None
}
}
pub fn [< as_ $name _mut >] (&mut self) -> Option<&mut $to> {
#[allow(irrefutable_let_patterns)]
if let $kind(ref mut connect_options) = self.0 {
Some(connect_options)
} else {
None
}
}
}
}
};
}
#[cfg(feature = "postgres")]
try_from_any_connect_options_to!(
PgConnectOptions,
AnyConnectOptionsKind::Postgres,
"postgres"
);
#[cfg(feature = "mysql")]
try_from_any_connect_options_to!(MySqlConnectOptions, AnyConnectOptionsKind::MySql, "mysql");
#[cfg(feature = "sqlite")]
try_from_any_connect_options_to!(
SqliteConnectOptions,
AnyConnectOptionsKind::Sqlite,
"sqlite"
);
#[cfg(feature = "mssql")]
try_from_any_connect_options_to!(MssqlConnectOptions, AnyConnectOptionsKind::Mssql, "mssql");
#[derive(Debug, Clone)]
pub(crate) enum AnyConnectOptionsKind {
#[cfg(feature = "postgres")]
Postgres(PgConnectOptions),
#[cfg(feature = "mysql")]
MySql(MySqlConnectOptions),
#[cfg(feature = "sqlite")]
Sqlite(SqliteConnectOptions),
#[cfg(feature = "mssql")]
Mssql(MssqlConnectOptions),
}
#[cfg(feature = "postgres")]
impl From<PgConnectOptions> for AnyConnectOptions {
fn from(options: PgConnectOptions) -> Self {
Self(AnyConnectOptionsKind::Postgres(options))
}
}
#[cfg(feature = "mysql")]
impl From<MySqlConnectOptions> for AnyConnectOptions {
fn from(options: MySqlConnectOptions) -> Self {
Self(AnyConnectOptionsKind::MySql(options))
}
}
#[cfg(feature = "sqlite")]
impl From<SqliteConnectOptions> for AnyConnectOptions {
fn from(options: SqliteConnectOptions) -> Self {
Self(AnyConnectOptionsKind::Sqlite(options))
}
}
#[cfg(feature = "mssql")]
impl From<MssqlConnectOptions> for AnyConnectOptions {
fn from(options: MssqlConnectOptions) -> Self {
Self(AnyConnectOptionsKind::Mssql(options))
}
}
impl FromStr for AnyConnectOptions {
type Err = Error;
fn from_str(url: &str) -> Result<Self, Self::Err> {
match AnyKind::from_str(url)? {
#[cfg(feature = "postgres")]
AnyKind::Postgres => {
PgConnectOptions::from_str(url).map(AnyConnectOptionsKind::Postgres)
}
#[cfg(feature = "mysql")]
AnyKind::MySql => MySqlConnectOptions::from_str(url).map(AnyConnectOptionsKind::MySql),
#[cfg(feature = "sqlite")]
AnyKind::Sqlite => {
SqliteConnectOptions::from_str(url).map(AnyConnectOptionsKind::Sqlite)
}
#[cfg(feature = "mssql")]
AnyKind::Mssql => MssqlConnectOptions::from_str(url).map(AnyConnectOptionsKind::Mssql),
}
.map(AnyConnectOptions)
Ok(AnyConnectOptions {
database_url: url
.parse::<Url>()
.map_err(|e| Error::Configuration(e.into()))?,
log_settings: LogSettings::default(),
})
}
}
impl ConnectOptions for AnyConnectOptions {
type Connection = AnyConnection;
fn from_url(url: &Url) -> Result<Self, Error> {
Ok(AnyConnectOptions {
database_url: url.clone(),
log_settings: LogSettings::default(),
})
}
#[inline]
fn connect(&self) -> BoxFuture<'_, Result<AnyConnection, Error>> {
Box::pin(AnyConnection::establish(self))
}
fn log_statements(&mut self, level: LevelFilter) -> &mut Self {
match &mut self.0 {
#[cfg(feature = "postgres")]
AnyConnectOptionsKind::Postgres(o) => {
o.log_statements(level);
}
#[cfg(feature = "mysql")]
AnyConnectOptionsKind::MySql(o) => {
o.log_statements(level);
}
#[cfg(feature = "sqlite")]
AnyConnectOptionsKind::Sqlite(o) => {
o.log_statements(level);
}
#[cfg(feature = "mssql")]
AnyConnectOptionsKind::Mssql(o) => {
o.log_statements(level);
}
};
self.log_settings.statements_level = level;
self
}
fn log_slow_statements(&mut self, level: LevelFilter, duration: Duration) -> &mut Self {
match &mut self.0 {
#[cfg(feature = "postgres")]
AnyConnectOptionsKind::Postgres(o) => {
o.log_slow_statements(level, duration);
}
#[cfg(feature = "mysql")]
AnyConnectOptionsKind::MySql(o) => {
o.log_slow_statements(level, duration);
}
#[cfg(feature = "sqlite")]
AnyConnectOptionsKind::Sqlite(o) => {
o.log_slow_statements(level, duration);
}
#[cfg(feature = "mssql")]
AnyConnectOptionsKind::Mssql(o) => {
o.log_slow_statements(level, duration);
}
};
self.log_settings.slow_statements_level = level;
self.log_settings.slow_statements_duration = duration;
self
}
}

View file

@ -2,8 +2,10 @@ use std::iter::{Extend, IntoIterator};
#[derive(Debug, Default)]
pub struct AnyQueryResult {
pub(crate) rows_affected: u64,
pub(crate) last_insert_id: Option<i64>,
#[doc(hidden)]
pub rows_affected: u64,
#[doc(hidden)]
pub last_insert_id: Option<i64>,
}
impl AnyQueryResult {

View file

@ -1,45 +1,25 @@
use crate::any::error::mismatched_types;
use crate::any::{Any, AnyColumn, AnyColumnIndex};
use crate::column::ColumnIndex;
use crate::database::HasValueRef;
use crate::any::{Any, AnyColumn, AnyTypeInfo, AnyTypeInfoKind, AnyValue, AnyValueKind};
use crate::column::{Column, ColumnIndex};
use crate::database::{Database, HasValueRef};
use crate::decode::Decode;
use crate::error::Error;
use crate::ext::ustr::UStr;
use crate::row::Row;
use crate::type_info::TypeInfo;
use crate::types::Type;
use crate::value::ValueRef;
#[cfg(feature = "postgres")]
use crate::postgres::PgRow;
#[cfg(feature = "mysql")]
use crate::mysql::MySqlRow;
#[cfg(feature = "sqlite")]
use crate::sqlite::SqliteRow;
#[cfg(feature = "mssql")]
use crate::mssql::MssqlRow;
use crate::value::{Value, ValueRef};
use std::borrow::Cow;
use std::sync::Arc;
#[derive(Clone)]
pub struct AnyRow {
pub(crate) kind: AnyRowKind,
pub(crate) columns: Vec<AnyColumn>,
}
impl crate::row::private_row::Sealed for AnyRow {}
pub(crate) enum AnyRowKind {
#[cfg(feature = "postgres")]
Postgres(PgRow),
#[cfg(feature = "mysql")]
MySql(MySqlRow),
#[cfg(feature = "sqlite")]
Sqlite(SqliteRow),
#[cfg(feature = "mssql")]
Mssql(MssqlRow),
#[doc(hidden)]
pub column_names: Arc<crate::HashMap<UStr, usize>>,
#[doc(hidden)]
pub columns: Vec<AnyColumn>,
#[doc(hidden)]
pub values: Vec<AnyValue>,
}
impl Row for AnyRow {
@ -57,20 +37,14 @@ impl Row for AnyRow {
I: ColumnIndex<Self>,
{
let index = index.index(self)?;
match &self.kind {
#[cfg(feature = "postgres")]
AnyRowKind::Postgres(row) => row.try_get_raw(index).map(Into::into),
#[cfg(feature = "mysql")]
AnyRowKind::MySql(row) => row.try_get_raw(index).map(Into::into),
#[cfg(feature = "sqlite")]
AnyRowKind::Sqlite(row) => row.try_get_raw(index).map(Into::into),
#[cfg(feature = "mssql")]
AnyRowKind::Mssql(row) => row.try_get_raw(index).map(Into::into),
}
Ok(self
.values
.get(index)
.ok_or_else(|| Error::ColumnIndexOutOfBounds {
index,
len: self.columns.len(),
})?
.as_ref())
}
fn try_get<'r, T, I>(&'r self, index: I) -> Result<T, Error>
@ -93,23 +67,82 @@ impl Row for AnyRow {
}
}
impl<'i> ColumnIndex<AnyRow> for &'i str
where
&'i str: AnyColumnIndex,
{
impl<'i> ColumnIndex<AnyRow> for &'i str {
fn index(&self, row: &AnyRow) -> Result<usize, Error> {
match &row.kind {
#[cfg(feature = "postgres")]
AnyRowKind::Postgres(row) => self.index(row),
#[cfg(feature = "mysql")]
AnyRowKind::MySql(row) => self.index(row),
#[cfg(feature = "sqlite")]
AnyRowKind::Sqlite(row) => self.index(row),
#[cfg(feature = "mssql")]
AnyRowKind::Mssql(row) => self.index(row),
}
row.column_names
.get(*self)
.copied()
.ok_or_else(|| Error::ColumnNotFound(self.to_string()))
}
}
impl AnyRow {
// This is not a `TryFrom` impl because trait impls are easy for users to accidentally
// become reliant upon, even if hidden, but we want to be able to change the bounds
// on this function as the `Any` driver gains support for more types.
//
// Also `column_names` needs to be passed by the driver to avoid making deep copies.
#[doc(hidden)]
pub fn map_from<'a, R: Row>(
row: &'a R,
column_names: Arc<crate::HashMap<UStr, usize>>,
) -> Result<Self, Error>
where
usize: ColumnIndex<R>,
AnyTypeInfo: for<'b> TryFrom<&'b <R::Database as Database>::TypeInfo, Error = Error>,
AnyColumn: for<'b> TryFrom<&'b <R::Database as Database>::Column, Error = Error>,
bool: Type<R::Database> + Decode<'a, R::Database>,
i16: Type<R::Database> + Decode<'a, R::Database>,
i32: Type<R::Database> + Decode<'a, R::Database>,
i64: Type<R::Database> + Decode<'a, R::Database>,
f32: Type<R::Database> + Decode<'a, R::Database>,
f64: Type<R::Database> + Decode<'a, R::Database>,
String: Type<R::Database> + Decode<'a, R::Database>,
Vec<u8>: Type<R::Database> + Decode<'a, R::Database>,
{
let mut row_out = AnyRow {
column_names,
columns: Vec::with_capacity(row.columns().len()),
values: Vec::with_capacity(row.columns().len()),
};
for col in row.columns() {
let i = col.ordinal();
let any_col = AnyColumn::try_from(col)?;
let value = row.try_get_raw(i)?;
// Map based on the _value_ type info, not the column type info.
let type_info =
AnyTypeInfo::try_from(&value.type_info()).map_err(|e| Error::ColumnDecode {
index: col.ordinal().to_string(),
source: e.into(),
})?;
let value_kind = match type_info.kind {
_ if value.is_null() => AnyValueKind::Null,
AnyTypeInfoKind::Null => AnyValueKind::Null,
AnyTypeInfoKind::Bool => AnyValueKind::Bool(decode(value)?),
AnyTypeInfoKind::SmallInt => AnyValueKind::SmallInt(decode(value)?),
AnyTypeInfoKind::Integer => AnyValueKind::Integer(decode(value)?),
AnyTypeInfoKind::BigInt => AnyValueKind::BigInt(decode(value)?),
AnyTypeInfoKind::Real => AnyValueKind::Real(decode(value)?),
AnyTypeInfoKind::Double => AnyValueKind::Double(decode(value)?),
AnyTypeInfoKind::Blob => AnyValueKind::Blob(decode::<_, Vec<u8>>(value)?.into()),
AnyTypeInfoKind::Text => AnyValueKind::Text(decode::<_, String>(value)?.into()),
};
row_out.columns.push(any_col);
row_out.values.push(AnyValue { kind: value_kind });
}
Ok(row_out)
}
}
fn decode<'r, DB: Database, T: Decode<'r, DB>>(
valueref: <DB as HasValueRef<'r>>::ValueRef,
) -> crate::Result<T> {
Decode::decode(valueref).map_err(Error::decode)
}

View file

@ -1,5 +1,6 @@
use crate::any::{Any, AnyArguments, AnyColumn, AnyColumnIndex, AnyTypeInfo};
use crate::any::{Any, AnyArguments, AnyColumn, AnyTypeInfo};
use crate::column::ColumnIndex;
use crate::database::{Database, HasStatement};
use crate::error::Error;
use crate::ext::ustr::UStr;
use crate::statement::Statement;
@ -9,10 +10,14 @@ use std::borrow::Cow;
use std::sync::Arc;
pub struct AnyStatement<'q> {
pub(crate) sql: Cow<'q, str>,
pub(crate) parameters: Option<Either<Vec<AnyTypeInfo>, usize>>,
pub(crate) column_names: Arc<HashMap<UStr, usize>>,
pub(crate) columns: Vec<AnyColumn>,
#[doc(hidden)]
pub sql: Cow<'q, str>,
#[doc(hidden)]
pub parameters: Option<Either<Vec<AnyTypeInfo>, usize>>,
#[doc(hidden)]
pub column_names: Arc<HashMap<UStr, usize>>,
#[doc(hidden)]
pub columns: Vec<AnyColumn>,
}
impl<'q> Statement<'q> for AnyStatement<'q> {
@ -46,10 +51,7 @@ impl<'q> Statement<'q> for AnyStatement<'q> {
impl_statement_query!(AnyArguments<'_>);
}
impl<'i> ColumnIndex<AnyStatement<'_>> for &'i str
where
&'i str: AnyColumnIndex,
{
impl<'i> ColumnIndex<AnyStatement<'_>> for &'i str {
fn index(&self, statement: &AnyStatement<'_>) -> Result<usize, Error> {
statement
.column_names
@ -58,3 +60,41 @@ where
.map(|v| *v)
}
}
impl<'q> AnyStatement<'q> {
#[doc(hidden)]
pub fn try_from_statement<S>(
query: &'q str,
statement: &S,
column_names: Arc<HashMap<UStr, usize>>,
) -> crate::Result<Self>
where
S: Statement<'q>,
AnyTypeInfo: for<'a> TryFrom<&'a <S::Database as Database>::TypeInfo, Error = Error>,
AnyColumn: for<'a> TryFrom<&'a <S::Database as Database>::Column, Error = Error>,
{
let parameters = match statement.parameters() {
Some(Either::Left(parameters)) => Some(Either::Left(
parameters
.iter()
.map(AnyTypeInfo::try_from)
.collect::<Result<Vec<_>, _>>()?,
)),
Some(Either::Right(count)) => Some(Either::Right(count)),
None => None,
};
let columns = statement
.columns()
.iter()
.map(AnyColumn::try_from)
.collect::<Result<Vec<_>, _>>()?;
Ok(Self {
sql: query.into(),
columns,
column_names,
parameters,
})
}
}

View file

@ -1,6 +1,5 @@
use futures_util::future::BoxFuture;
use crate::any::connection::AnyConnectionKind;
use crate::any::{Any, AnyConnection};
use crate::database::Database;
use crate::error::Error;
@ -12,98 +11,18 @@ impl TransactionManager for AnyTransactionManager {
type Database = Any;
fn begin(conn: &mut AnyConnection) -> BoxFuture<'_, Result<(), Error>> {
match &mut conn.0 {
#[cfg(feature = "postgres")]
AnyConnectionKind::Postgres(conn) => {
<crate::postgres::Postgres as Database>::TransactionManager::begin(conn)
}
#[cfg(feature = "mysql")]
AnyConnectionKind::MySql(conn) => {
<crate::mysql::MySql as Database>::TransactionManager::begin(conn)
}
#[cfg(feature = "sqlite")]
AnyConnectionKind::Sqlite(conn) => {
<crate::sqlite::Sqlite as Database>::TransactionManager::begin(conn)
}
#[cfg(feature = "mssql")]
AnyConnectionKind::Mssql(conn) => {
<crate::mssql::Mssql as Database>::TransactionManager::begin(conn)
}
}
conn.backend.begin()
}
fn commit(conn: &mut AnyConnection) -> BoxFuture<'_, Result<(), Error>> {
match &mut conn.0 {
#[cfg(feature = "postgres")]
AnyConnectionKind::Postgres(conn) => {
<crate::postgres::Postgres as Database>::TransactionManager::commit(conn)
}
#[cfg(feature = "mysql")]
AnyConnectionKind::MySql(conn) => {
<crate::mysql::MySql as Database>::TransactionManager::commit(conn)
}
#[cfg(feature = "sqlite")]
AnyConnectionKind::Sqlite(conn) => {
<crate::sqlite::Sqlite as Database>::TransactionManager::commit(conn)
}
#[cfg(feature = "mssql")]
AnyConnectionKind::Mssql(conn) => {
<crate::mssql::Mssql as Database>::TransactionManager::commit(conn)
}
}
conn.backend.commit()
}
fn rollback(conn: &mut AnyConnection) -> BoxFuture<'_, Result<(), Error>> {
match &mut conn.0 {
#[cfg(feature = "postgres")]
AnyConnectionKind::Postgres(conn) => {
<crate::postgres::Postgres as Database>::TransactionManager::rollback(conn)
}
#[cfg(feature = "mysql")]
AnyConnectionKind::MySql(conn) => {
<crate::mysql::MySql as Database>::TransactionManager::rollback(conn)
}
#[cfg(feature = "sqlite")]
AnyConnectionKind::Sqlite(conn) => {
<crate::sqlite::Sqlite as Database>::TransactionManager::rollback(conn)
}
#[cfg(feature = "mssql")]
AnyConnectionKind::Mssql(conn) => {
<crate::mssql::Mssql as Database>::TransactionManager::rollback(conn)
}
}
conn.backend.rollback()
}
fn start_rollback(conn: &mut AnyConnection) {
match &mut conn.0 {
#[cfg(feature = "postgres")]
AnyConnectionKind::Postgres(conn) => {
<crate::postgres::Postgres as Database>::TransactionManager::start_rollback(conn)
}
#[cfg(feature = "mysql")]
AnyConnectionKind::MySql(conn) => {
<crate::mysql::MySql as Database>::TransactionManager::start_rollback(conn)
}
#[cfg(feature = "sqlite")]
AnyConnectionKind::Sqlite(conn) => {
<crate::sqlite::Sqlite as Database>::TransactionManager::start_rollback(conn)
}
#[cfg(feature = "mssql")]
AnyConnectionKind::Mssql(conn) => {
<crate::mssql::Mssql as Database>::TransactionManager::start_rollback(conn)
}
}
conn.backend.start_rollback()
}
}

View file

@ -1,40 +0,0 @@
// Type is required by the bounds of the [`Row`] and [`Arguments`] trait but its been overridden in
// AnyRow and AnyArguments to not use this implementation; but instead, delegate to the
// database-specific implementation.
//
// The other use of this trait is for compile-time verification which is not feasible to support
// for the [`Any`] driver.
macro_rules! impl_any_type {
($ty:ty) => {
impl crate::types::Type<crate::any::Any> for $ty {
fn type_info() -> crate::any::AnyTypeInfo {
// FIXME: nicer panic explaining why this isn't possible
unimplemented!()
}
fn compatible(ty: &crate::any::AnyTypeInfo) -> bool {
match &ty.0 {
#[cfg(feature = "postgres")]
crate::any::type_info::AnyTypeInfoKind::Postgres(ty) => {
<$ty as crate::types::Type<crate::postgres::Postgres>>::compatible(&ty)
}
#[cfg(feature = "mysql")]
crate::any::type_info::AnyTypeInfoKind::MySql(ty) => {
<$ty as crate::types::Type<crate::mysql::MySql>>::compatible(&ty)
}
#[cfg(feature = "sqlite")]
crate::any::type_info::AnyTypeInfoKind::Sqlite(ty) => {
<$ty as crate::types::Type<crate::sqlite::Sqlite>>::compatible(&ty)
}
#[cfg(feature = "mssql")]
crate::any::type_info::AnyTypeInfoKind::Mssql(ty) => {
<$ty as crate::types::Type<crate::mssql::Mssql>>::compatible(&ty)
}
}
}
}
};
}

View file

@ -2,84 +2,63 @@ use std::fmt::{self, Display, Formatter};
use crate::type_info::TypeInfo;
#[cfg(feature = "postgres")]
use crate::postgres::PgTypeInfo;
#[cfg(feature = "mysql")]
use crate::mysql::MySqlTypeInfo;
#[cfg(feature = "sqlite")]
use crate::sqlite::SqliteTypeInfo;
#[cfg(feature = "mssql")]
use crate::mssql::MssqlTypeInfo;
use AnyTypeInfoKind::*;
#[derive(Debug, Clone, PartialEq)]
pub struct AnyTypeInfo(pub(crate) AnyTypeInfoKind);
pub struct AnyTypeInfo {
#[doc(hidden)]
pub kind: AnyTypeInfoKind,
}
#[derive(Debug, Clone, PartialEq)]
pub(crate) enum AnyTypeInfoKind {
#[cfg(feature = "postgres")]
Postgres(PgTypeInfo),
impl AnyTypeInfo {
pub fn kind(&self) -> AnyTypeInfoKind {
self.kind
}
}
#[cfg(feature = "mysql")]
MySql(MySqlTypeInfo),
#[cfg(feature = "sqlite")]
Sqlite(SqliteTypeInfo),
#[cfg(feature = "mssql")]
Mssql(MssqlTypeInfo),
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum AnyTypeInfoKind {
Null,
Bool,
SmallInt,
Integer,
BigInt,
Real,
Double,
Text,
Blob,
}
impl TypeInfo for AnyTypeInfo {
fn is_null(&self) -> bool {
match &self.0 {
#[cfg(feature = "postgres")]
AnyTypeInfoKind::Postgres(ty) => ty.is_null(),
#[cfg(feature = "mysql")]
AnyTypeInfoKind::MySql(ty) => ty.is_null(),
#[cfg(feature = "sqlite")]
AnyTypeInfoKind::Sqlite(ty) => ty.is_null(),
#[cfg(feature = "mssql")]
AnyTypeInfoKind::Mssql(ty) => ty.is_null(),
}
false
}
fn name(&self) -> &str {
match &self.0 {
#[cfg(feature = "postgres")]
AnyTypeInfoKind::Postgres(ty) => ty.name(),
use AnyTypeInfoKind::*;
#[cfg(feature = "mysql")]
AnyTypeInfoKind::MySql(ty) => ty.name(),
#[cfg(feature = "sqlite")]
AnyTypeInfoKind::Sqlite(ty) => ty.name(),
#[cfg(feature = "mssql")]
AnyTypeInfoKind::Mssql(ty) => ty.name(),
match self.kind {
Bool => "BOOLEAN",
SmallInt => "SMALLINT",
Integer => "INTEGER",
BigInt => "BIGINT",
Real => "REAL",
Double => "DOUBLE",
Text => "TEXT",
Blob => "BLOB",
Null => "NULL",
}
}
}
impl Display for AnyTypeInfo {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
match &self.0 {
#[cfg(feature = "postgres")]
AnyTypeInfoKind::Postgres(ty) => ty.fmt(f),
#[cfg(feature = "mysql")]
AnyTypeInfoKind::MySql(ty) => ty.fmt(f),
#[cfg(feature = "sqlite")]
AnyTypeInfoKind::Sqlite(ty) => ty.fmt(f),
#[cfg(feature = "mssql")]
AnyTypeInfoKind::Mssql(ty) => ty.fmt(f),
}
f.write_str(self.name())
}
}
impl AnyTypeInfoKind {
pub fn is_integer(&self) -> bool {
matches!(self, SmallInt | Integer | BigInt)
}
}

View file

@ -1,195 +0,0 @@
//! Conversions between Rust and standard **SQL** types.
//!
//! # Types
//!
//! | Rust type | SQL type(s) |
//! |---------------------------------------|------------------------------------------------------|
//! | `bool` | BOOLEAN |
//! | `i16` | SMALLINT |
//! | `i32` | INT |
//! | `i64` | BIGINT |
//! | `f32` | FLOAT |
//! | `f64` | DOUBLE |
//! | `&str`, [`String`] | VARCHAR, CHAR, TEXT |
//!
//! # Nullable
//!
//! In addition, `Option<T>` is supported where `T` implements `Type`. An `Option<T>` represents
//! a potentially `NULL` value from SQL.
//!
// Type
impl_any_type!(bool);
impl_any_type!(i16);
impl_any_type!(i32);
impl_any_type!(i64);
impl_any_type!(f32);
impl_any_type!(f64);
impl_any_type!(str);
impl_any_type!(String);
// Encode
impl_any_encode!(bool);
impl_any_encode!(i16);
impl_any_encode!(i32);
impl_any_encode!(i64);
impl_any_encode!(f32);
impl_any_encode!(f64);
impl_any_encode!(&'q str);
impl_any_encode!(String);
// Decode
impl_any_decode!(bool);
impl_any_decode!(i16);
impl_any_decode!(i32);
impl_any_decode!(i64);
impl_any_decode!(f32);
impl_any_decode!(f64);
impl_any_decode!(&'r str);
impl_any_decode!(String);
// Conversions for Blob SQL types
// Type
#[cfg(all(
any(feature = "mysql", feature = "sqlite", feature = "postgres"),
not(feature = "mssql")
))]
impl_any_type!([u8]);
#[cfg(all(
any(feature = "mysql", feature = "sqlite", feature = "postgres"),
not(feature = "mssql")
))]
impl_any_type!(Vec<u8>);
// Encode
#[cfg(all(
any(feature = "mysql", feature = "sqlite", feature = "postgres"),
not(feature = "mssql")
))]
impl_any_encode!(&'q [u8]);
#[cfg(all(
any(feature = "mysql", feature = "sqlite", feature = "postgres"),
not(feature = "mssql")
))]
impl_any_encode!(Vec<u8>);
// Decode
#[cfg(all(
any(feature = "mysql", feature = "sqlite", feature = "postgres"),
not(feature = "mssql")
))]
impl_any_decode!(&'r [u8]);
#[cfg(all(
any(feature = "mysql", feature = "sqlite", feature = "postgres"),
not(feature = "mssql")
))]
impl_any_decode!(Vec<u8>);
// Conversions for Time SQL types
// Type
#[cfg(all(
feature = "chrono",
any(feature = "mysql", feature = "sqlite", feature = "postgres"),
not(feature = "mssql")
))]
impl_any_type!(chrono::NaiveDate);
#[cfg(all(
feature = "chrono",
any(feature = "mysql", feature = "sqlite", feature = "postgres"),
not(feature = "mssql")
))]
impl_any_type!(chrono::NaiveTime);
#[cfg(all(
feature = "chrono",
any(feature = "mysql", feature = "sqlite", feature = "postgres"),
not(feature = "mssql")
))]
impl_any_type!(chrono::NaiveDateTime);
#[cfg(all(
feature = "chrono",
any(feature = "mysql", feature = "sqlite", feature = "postgres"),
not(feature = "mssql")
))]
impl_any_type!(chrono::DateTime<chrono::offset::Utc>);
#[cfg(all(
feature = "chrono",
any(feature = "sqlite", feature = "postgres", feature = "mysql"),
not(feature = "mssql")
))]
impl_any_type!(chrono::DateTime<chrono::offset::Local>);
// Encode
#[cfg(all(
feature = "chrono",
any(feature = "mysql", feature = "sqlite", feature = "postgres"),
not(feature = "mssql")
))]
impl_any_encode!(chrono::NaiveDate);
#[cfg(all(
feature = "chrono",
any(feature = "mysql", feature = "sqlite", feature = "postgres"),
not(feature = "mssql")
))]
impl_any_encode!(chrono::NaiveTime);
#[cfg(all(
feature = "chrono",
any(feature = "mysql", feature = "sqlite", feature = "postgres"),
not(feature = "mssql")
))]
impl_any_encode!(chrono::NaiveDateTime);
#[cfg(all(
feature = "chrono",
any(feature = "mysql", feature = "sqlite", feature = "postgres"),
not(feature = "mssql")
))]
impl_any_encode!(chrono::DateTime<chrono::offset::Utc>);
#[cfg(all(
feature = "chrono",
any(feature = "sqlite", feature = "postgres", feature = "mysql"),
not(feature = "mssql")
))]
impl_any_encode!(chrono::DateTime<chrono::offset::Local>);
// Decode
#[cfg(all(
feature = "chrono",
any(feature = "mysql", feature = "sqlite", feature = "postgres"),
not(feature = "mssql")
))]
impl_any_decode!(chrono::NaiveDate);
#[cfg(all(
feature = "chrono",
any(feature = "mysql", feature = "sqlite", feature = "postgres"),
not(feature = "mssql")
))]
impl_any_decode!(chrono::NaiveTime);
#[cfg(all(
feature = "chrono",
any(feature = "mysql", feature = "sqlite", feature = "postgres"),
not(feature = "mssql")
))]
impl_any_decode!(chrono::NaiveDateTime);
#[cfg(all(
feature = "chrono",
any(feature = "mysql", feature = "sqlite", feature = "postgres"),
not(feature = "mssql")
))]
impl_any_decode!(chrono::DateTime<chrono::offset::Utc>);
#[cfg(all(
feature = "chrono",
any(feature = "sqlite", feature = "postgres", feature = "mysql"),
not(feature = "mssql")
))]
impl_any_decode!(chrono::DateTime<chrono::offset::Local>);

View file

@ -0,0 +1,58 @@
use crate::any::{Any, AnyTypeInfo, AnyTypeInfoKind, AnyValueKind};
use crate::database::{HasArguments, HasValueRef};
use crate::decode::Decode;
use crate::encode::{Encode, IsNull};
use crate::error::BoxDynError;
use crate::types::Type;
use std::borrow::Cow;
impl Type<Any> for [u8] {
fn type_info() -> AnyTypeInfo {
AnyTypeInfo {
kind: AnyTypeInfoKind::Blob,
}
}
}
impl<'q> Encode<'q, Any> for &'q [u8] {
fn encode_by_ref(&self, buf: &mut <Any as HasArguments<'q>>::ArgumentBuffer) -> IsNull {
buf.0.push(AnyValueKind::Blob((*self).into()));
IsNull::No
}
}
impl<'r> Decode<'r, Any> for &'r [u8] {
fn decode(value: <Any as HasValueRef<'r>>::ValueRef) -> Result<Self, BoxDynError> {
match value.kind {
AnyValueKind::Blob(Cow::Borrowed(blob)) => Ok(blob),
// This shouldn't happen in practice, it means the user got an `AnyValueRef`
// constructed from an owned `Vec<u8>` which shouldn't be allowed by the API.
AnyValueKind::Blob(Cow::Owned(_text)) => {
panic!("attempting to return a borrow that outlives its buffer")
}
other => other.unexpected(),
}
}
}
impl Type<Any> for Vec<u8> {
fn type_info() -> AnyTypeInfo {
<[u8] as Type<Any>>::type_info()
}
}
impl<'q> Encode<'q, Any> for Vec<u8> {
fn encode_by_ref(&self, buf: &mut <Any as HasArguments<'q>>::ArgumentBuffer) -> IsNull {
buf.0.push(AnyValueKind::Blob(Cow::Owned(self.clone())));
IsNull::No
}
}
impl<'r> Decode<'r, Any> for Vec<u8> {
fn decode(value: <Any as HasValueRef<'r>>::ValueRef) -> Result<Self, BoxDynError> {
match value.kind {
AnyValueKind::Blob(blob) => Ok(blob.into_owned()),
other => other.unexpected(),
}
}
}

View file

@ -0,0 +1,30 @@
use crate::any::{Any, AnyTypeInfo, AnyTypeInfoKind, AnyValueKind};
use crate::database::{HasArguments, HasValueRef};
use crate::decode::Decode;
use crate::encode::{Encode, IsNull};
use crate::error::BoxDynError;
use crate::types::Type;
impl Type<Any> for bool {
fn type_info() -> AnyTypeInfo {
AnyTypeInfo {
kind: AnyTypeInfoKind::Bool,
}
}
}
impl<'q> Encode<'q, Any> for bool {
fn encode_by_ref(&self, buf: &mut <Any as HasArguments<'q>>::ArgumentBuffer) -> IsNull {
buf.0.push(AnyValueKind::Bool(*self));
IsNull::No
}
}
impl<'r> Decode<'r, Any> for bool {
fn decode(value: <Any as HasValueRef<'r>>::ValueRef) -> Result<Self, BoxDynError> {
match value.kind {
AnyValueKind::Bool(b) => Ok(b),
other => other.unexpected(),
}
}
}

View file

@ -0,0 +1,56 @@
use crate::any::{Any, AnyArgumentBuffer, AnyTypeInfo, AnyTypeInfoKind, AnyValueKind, AnyValueRef};
use crate::database::{HasArguments, HasValueRef};
use crate::decode::Decode;
use crate::encode::{Encode, IsNull};
use crate::error::BoxDynError;
use crate::types::Type;
impl Type<Any> for f32 {
fn type_info() -> AnyTypeInfo {
AnyTypeInfo {
kind: AnyTypeInfoKind::Real,
}
}
}
impl<'q> Encode<'q, Any> for f32 {
fn encode_by_ref(&self, buf: &mut AnyArgumentBuffer) -> IsNull {
buf.0.push(AnyValueKind::Real(*self));
IsNull::No
}
}
impl<'r> Decode<'r, Any> for f32 {
fn decode(value: AnyValueRef<'r>) -> Result<Self, BoxDynError> {
match value.kind {
AnyValueKind::Real(r) => Ok(r),
other => other.unexpected(),
}
}
}
impl Type<Any> for f64 {
fn type_info() -> AnyTypeInfo {
AnyTypeInfo {
kind: AnyTypeInfoKind::Double,
}
}
}
impl<'q> Encode<'q, Any> for f64 {
fn encode_by_ref(&self, buf: &mut <Any as HasArguments<'q>>::ArgumentBuffer) -> IsNull {
buf.0.push(AnyValueKind::Double(*self));
IsNull::No
}
}
impl<'r> Decode<'r, Any> for f64 {
fn decode(value: <Any as HasValueRef<'r>>::ValueRef) -> Result<Self, BoxDynError> {
match value.kind {
// Widening is safe
AnyValueKind::Real(r) => Ok(r as f64),
AnyValueKind::Double(d) => Ok(d),
other => other.unexpected(),
}
}
}

View file

@ -0,0 +1,81 @@
use crate::any::{Any, AnyTypeInfo, AnyTypeInfoKind, AnyValueKind};
use crate::database::{HasArguments, HasValueRef};
use crate::decode::Decode;
use crate::encode::{Encode, IsNull};
use crate::error::BoxDynError;
use crate::types::Type;
impl Type<Any> for i16 {
fn type_info() -> AnyTypeInfo {
AnyTypeInfo {
kind: AnyTypeInfoKind::SmallInt,
}
}
fn compatible(ty: &AnyTypeInfo) -> bool {
ty.kind().is_integer()
}
}
impl<'q> Encode<'q, Any> for i16 {
fn encode_by_ref(&self, buf: &mut <Any as HasArguments<'q>>::ArgumentBuffer) -> IsNull {
buf.0.push(AnyValueKind::SmallInt(*self));
IsNull::No
}
}
impl<'r> Decode<'r, Any> for i16 {
fn decode(value: <Any as HasValueRef<'r>>::ValueRef) -> Result<Self, BoxDynError> {
value.kind.try_integer()
}
}
impl Type<Any> for i32 {
fn type_info() -> AnyTypeInfo {
AnyTypeInfo {
kind: AnyTypeInfoKind::Integer,
}
}
fn compatible(ty: &AnyTypeInfo) -> bool {
ty.kind().is_integer()
}
}
impl<'q> Encode<'q, Any> for i32 {
fn encode_by_ref(&self, buf: &mut <Any as HasArguments<'q>>::ArgumentBuffer) -> IsNull {
buf.0.push(AnyValueKind::Integer(*self));
IsNull::No
}
}
impl<'r> Decode<'r, Any> for i32 {
fn decode(value: <Any as HasValueRef<'r>>::ValueRef) -> Result<Self, BoxDynError> {
value.kind.try_integer()
}
}
impl Type<Any> for i64 {
fn type_info() -> AnyTypeInfo {
AnyTypeInfo {
kind: AnyTypeInfoKind::BigInt,
}
}
fn compatible(ty: &AnyTypeInfo) -> bool {
ty.kind().is_integer()
}
}
impl<'q> Encode<'q, Any> for i64 {
fn encode_by_ref(&self, buf: &mut <Any as HasArguments<'q>>::ArgumentBuffer) -> IsNull {
buf.0.push(AnyValueKind::BigInt(*self));
IsNull::No
}
}
impl<'r> Decode<'r, Any> for i64 {
fn decode(value: <Any as HasValueRef<'r>>::ValueRef) -> Result<Self, BoxDynError> {
value.kind.try_integer()
}
}

View file

@ -0,0 +1,58 @@
//! Conversions between Rust and standard **SQL** types.
//!
//! # Types
//!
//! | Rust type | SQL type(s) |
//! |---------------------------------------|------------------------------------------------------|
//! | `bool` | BOOLEAN |
//! | `i16` | SMALLINT |
//! | `i32` | INT |
//! | `i64` | BIGINT |
//! | `f32` | FLOAT |
//! | `f64` | DOUBLE |
//! | `&str`, [`String`] | VARCHAR, CHAR, TEXT |
//!
//! # Nullable
//!
//! In addition, `Option<T>` is supported where `T` implements `Type`. An `Option<T>` represents
//! a potentially `NULL` value from SQL.
use crate::any::type_info::AnyTypeInfoKind;
use crate::any::value::AnyValueKind;
use crate::any::{Any, AnyTypeInfo, AnyValueRef};
use crate::database::{HasArguments, HasValueRef};
use crate::decode::Decode;
use crate::encode::{Encode, IsNull};
use crate::error::BoxDynError;
use crate::types::Type;
use std::borrow::Cow;
mod blob;
mod bool;
mod float;
mod int;
mod str;
#[test]
fn test_type_impls() {
fn has_type<T>()
where
T: Type<Any>,
for<'a> T: Encode<'a, Any>,
for<'a> T: Decode<'a, Any>,
{
}
has_type::<bool>();
has_type::<i16>();
has_type::<i32>();
has_type::<i64>();
has_type::<f32>();
has_type::<f64>();
// These imply that there are also impls for the equivalent slice types.
has_type::<Vec<u8>>();
has_type::<String>();
}

View file

@ -0,0 +1,66 @@
use crate::any::types::str;
use crate::any::{Any, AnyTypeInfo, AnyTypeInfoKind, AnyValueKind};
use crate::database::{HasArguments, HasValueRef};
use crate::decode::Decode;
use crate::encode::{Encode, IsNull};
use crate::error::BoxDynError;
use crate::types::Type;
use std::borrow::Cow;
impl Type<Any> for str {
fn type_info() -> AnyTypeInfo {
AnyTypeInfo {
kind: AnyTypeInfoKind::Text,
}
}
}
impl<'a> Encode<'a, Any> for &'a str {
fn encode(self, buf: &mut <Any as HasArguments<'a>>::ArgumentBuffer) -> IsNull
where
Self: Sized,
{
buf.0.push(AnyValueKind::Text(self.into()));
IsNull::No
}
fn encode_by_ref(&self, buf: &mut <Any as HasArguments<'a>>::ArgumentBuffer) -> IsNull {
(*self).encode(buf)
}
}
impl<'a> Decode<'a, Any> for &'a str {
fn decode(value: <Any as HasValueRef<'a>>::ValueRef) -> Result<Self, BoxDynError> {
match value.kind {
AnyValueKind::Text(Cow::Borrowed(text)) => Ok(text),
// This shouldn't happen in practice, it means the user got an `AnyValueRef`
// constructed from an owned `String` which shouldn't be allowed by the API.
AnyValueKind::Text(Cow::Owned(_text)) => {
panic!("attempting to return a borrow that outlives its buffer")
}
other => other.unexpected(),
}
}
}
impl Type<Any> for String {
fn type_info() -> AnyTypeInfo {
<str as Type<Any>>::type_info()
}
}
impl<'q> Encode<'q, Any> for String {
fn encode_by_ref(&self, buf: &mut <Any as HasArguments<'q>>::ArgumentBuffer) -> IsNull {
buf.0.push(AnyValueKind::Text(Cow::Owned(self.clone())));
IsNull::No
}
}
impl<'r> Decode<'r, Any> for String {
fn decode(value: <Any as HasValueRef<'r>>::ValueRef) -> Result<Self, BoxDynError> {
match value.kind {
AnyValueKind::Text(text) => Ok(text.into_owned()),
other => other.unexpected(),
}
}
}

View file

@ -1,155 +1,131 @@
use std::borrow::Cow;
use std::marker::PhantomData;
use std::num::TryFromIntError;
use crate::any::error::mismatched_types;
use crate::any::{Any, AnyTypeInfo};
use crate::database::HasValueRef;
use crate::any::{Any, AnyTypeInfo, AnyTypeInfoKind};
use crate::database::{Database, HasValueRef};
use crate::decode::Decode;
use crate::error::Error;
use crate::error::{BoxDynError, Error};
use crate::io::Encode;
use crate::type_info::TypeInfo;
use crate::types::Type;
use crate::value::{Value, ValueRef};
#[cfg(feature = "postgres")]
use crate::postgres::{PgValue, PgValueRef};
#[derive(Clone, Debug)]
#[non_exhaustive]
pub enum AnyValueKind<'a> {
Null,
Bool(bool),
SmallInt(i16),
Integer(i32),
BigInt(i64),
Real(f32),
Double(f64),
Text(Cow<'a, str>),
Blob(Cow<'a, [u8]>),
}
#[cfg(feature = "mysql")]
use crate::mysql::{MySqlValue, MySqlValueRef};
impl AnyValueKind<'_> {
fn type_info(&self) -> AnyTypeInfo {
AnyTypeInfo {
kind: match self {
AnyValueKind::Null => AnyTypeInfoKind::Null,
AnyValueKind::Bool(_) => AnyTypeInfoKind::Bool,
AnyValueKind::SmallInt(_) => AnyTypeInfoKind::SmallInt,
AnyValueKind::Integer(_) => AnyTypeInfoKind::Integer,
AnyValueKind::BigInt(_) => AnyTypeInfoKind::BigInt,
AnyValueKind::Real(_) => AnyTypeInfoKind::Real,
AnyValueKind::Double(_) => AnyTypeInfoKind::Double,
AnyValueKind::Text(_) => AnyTypeInfoKind::Text,
AnyValueKind::Blob(_) => AnyTypeInfoKind::Blob,
},
}
}
#[cfg(feature = "sqlite")]
use crate::sqlite::{SqliteValue, SqliteValueRef};
pub(in crate::any) fn unexpected<Expected: Type<Any>>(&self) -> Result<Expected, BoxDynError> {
Err(format!("expected {}, got {:?}", Expected::type_info(), self).into())
}
#[cfg(feature = "mssql")]
use crate::mssql::{MssqlValue, MssqlValueRef};
pub(in crate::any) fn try_integer<T>(&self) -> Result<T, BoxDynError>
where
T: Type<Any> + TryFrom<i16> + TryFrom<i32> + TryFrom<i64>,
BoxDynError: From<<T as TryFrom<i16>>::Error>,
BoxDynError: From<<T as TryFrom<i32>>::Error>,
BoxDynError: From<<T as TryFrom<i64>>::Error>,
{
Ok(match self {
AnyValueKind::SmallInt(i) => (*i).try_into()?,
AnyValueKind::Integer(i) => (*i).try_into()?,
AnyValueKind::BigInt(i) => (*i).try_into()?,
_ => return self.unexpected(),
})
}
}
#[derive(Clone, Debug)]
pub struct AnyValue {
pub(crate) kind: AnyValueKind,
pub(crate) type_info: AnyTypeInfo,
#[doc(hidden)]
pub kind: AnyValueKind<'static>,
}
pub(crate) enum AnyValueKind {
#[cfg(feature = "postgres")]
Postgres(PgValue),
#[cfg(feature = "mysql")]
MySql(MySqlValue),
#[cfg(feature = "sqlite")]
Sqlite(SqliteValue),
#[cfg(feature = "mssql")]
Mssql(MssqlValue),
}
pub struct AnyValueRef<'r> {
pub(crate) kind: AnyValueRefKind<'r>,
pub(crate) type_info: AnyTypeInfo,
}
pub(crate) enum AnyValueRefKind<'r> {
#[cfg(feature = "postgres")]
Postgres(PgValueRef<'r>),
#[cfg(feature = "mysql")]
MySql(MySqlValueRef<'r>),
#[cfg(feature = "sqlite")]
Sqlite(SqliteValueRef<'r>),
#[cfg(feature = "mssql")]
Mssql(MssqlValueRef<'r>),
#[derive(Clone, Debug)]
pub struct AnyValueRef<'a> {
pub(crate) kind: AnyValueKind<'a>,
}
impl Value for AnyValue {
type Database = Any;
fn as_ref(&self) -> <Self::Database as HasValueRef<'_>>::ValueRef {
match &self.kind {
#[cfg(feature = "postgres")]
AnyValueKind::Postgres(value) => value.as_ref().into(),
#[cfg(feature = "mysql")]
AnyValueKind::MySql(value) => value.as_ref().into(),
#[cfg(feature = "sqlite")]
AnyValueKind::Sqlite(value) => value.as_ref().into(),
#[cfg(feature = "mssql")]
AnyValueKind::Mssql(value) => value.as_ref().into(),
AnyValueRef {
kind: match &self.kind {
AnyValueKind::Null => AnyValueKind::Null,
AnyValueKind::Bool(b) => AnyValueKind::Bool(*b),
AnyValueKind::SmallInt(i) => AnyValueKind::SmallInt(*i),
AnyValueKind::Integer(i) => AnyValueKind::Integer(*i),
AnyValueKind::BigInt(i) => AnyValueKind::BigInt(*i),
AnyValueKind::Real(r) => AnyValueKind::Real(*r),
AnyValueKind::Double(d) => AnyValueKind::Double(*d),
AnyValueKind::Text(t) => AnyValueKind::Text(Cow::Borrowed(t)),
AnyValueKind::Blob(b) => AnyValueKind::Blob(Cow::Borrowed(b)),
},
}
}
fn type_info(&self) -> Cow<'_, AnyTypeInfo> {
Cow::Borrowed(&self.type_info)
fn type_info(&self) -> Cow<'_, <Self::Database as Database>::TypeInfo> {
Cow::Owned(self.kind.type_info())
}
fn is_null(&self) -> bool {
match &self.kind {
#[cfg(feature = "postgres")]
AnyValueKind::Postgres(value) => value.is_null(),
#[cfg(feature = "mysql")]
AnyValueKind::MySql(value) => value.is_null(),
#[cfg(feature = "sqlite")]
AnyValueKind::Sqlite(value) => value.is_null(),
#[cfg(feature = "mssql")]
AnyValueKind::Mssql(value) => value.is_null(),
}
}
fn try_decode<'r, T>(&'r self) -> Result<T, Error>
where
T: Decode<'r, Self::Database> + Type<Self::Database>,
{
if !self.is_null() {
let ty = self.type_info();
if !ty.is_null() && !T::compatible(&ty) {
return Err(Error::Decode(mismatched_types::<T>(&ty)));
}
}
self.try_decode_unchecked()
false
}
}
impl<'r> ValueRef<'r> for AnyValueRef<'r> {
impl<'a> ValueRef<'a> for AnyValueRef<'a> {
type Database = Any;
fn to_owned(&self) -> AnyValue {
match &self.kind {
#[cfg(feature = "postgres")]
AnyValueRefKind::Postgres(value) => ValueRef::to_owned(value).into(),
#[cfg(feature = "mysql")]
AnyValueRefKind::MySql(value) => ValueRef::to_owned(value).into(),
#[cfg(feature = "sqlite")]
AnyValueRefKind::Sqlite(value) => ValueRef::to_owned(value).into(),
#[cfg(feature = "mssql")]
AnyValueRefKind::Mssql(value) => ValueRef::to_owned(value).into(),
fn to_owned(&self) -> <Self::Database as Database>::Value {
AnyValue {
kind: match &self.kind {
AnyValueKind::Null => AnyValueKind::Null,
AnyValueKind::Bool(b) => AnyValueKind::Bool(*b),
AnyValueKind::SmallInt(i) => AnyValueKind::SmallInt(*i),
AnyValueKind::Integer(i) => AnyValueKind::Integer(*i),
AnyValueKind::BigInt(i) => AnyValueKind::BigInt(*i),
AnyValueKind::Real(r) => AnyValueKind::Real(*r),
AnyValueKind::Double(d) => AnyValueKind::Double(*d),
AnyValueKind::Text(t) => AnyValueKind::Text(Cow::Owned(t.to_string())),
AnyValueKind::Blob(b) => AnyValueKind::Blob(Cow::Owned(b.to_vec())),
},
}
}
fn type_info(&self) -> Cow<'_, AnyTypeInfo> {
Cow::Borrowed(&self.type_info)
fn type_info(&self) -> Cow<'_, <Self::Database as Database>::TypeInfo> {
Cow::Owned(self.kind.type_info())
}
fn is_null(&self) -> bool {
match &self.kind {
#[cfg(feature = "postgres")]
AnyValueRefKind::Postgres(value) => value.is_null(),
#[cfg(feature = "mysql")]
AnyValueRefKind::MySql(value) => value.is_null(),
#[cfg(feature = "sqlite")]
AnyValueRefKind::Sqlite(value) => value.is_null(),
#[cfg(feature = "mssql")]
AnyValueRefKind::Mssql(value) => value.is_null(),
}
false
}
}

View file

@ -28,13 +28,13 @@ pub trait IntoArguments<'q, DB: HasArguments<'q>>: Sized + Send {
}
// NOTE: required due to lack of lazy normalization
#[allow(unused_macros)]
#[macro_export]
macro_rules! impl_into_arguments_for_arguments {
($Arguments:path) => {
impl<'q>
crate::arguments::IntoArguments<
$crate::arguments::IntoArguments<
'q,
<$Arguments as crate::arguments::Arguments<'q>>::Database,
<$Arguments as $crate::arguments::Arguments<'q>>::Database,
> for $Arguments
{
fn into_arguments(self) -> $Arguments {

View file

@ -1,8 +1,9 @@
use crate::database::Database;
use crate::error::Error;
use std::fmt::Debug;
pub trait Column: private_column::Sealed + 'static + Send + Sync + Debug {
pub trait Column: 'static + Send + Sync + Debug {
type Database: Database;
/// Gets the column ordinal.
@ -21,11 +22,6 @@ pub trait Column: private_column::Sealed + 'static + Send + Sync + Debug {
fn type_info(&self) -> &<Self::Database as Database>::TypeInfo;
}
// Prevent users from implementing the `Row` trait.
pub(crate) mod private_column {
pub trait Sealed {}
}
/// A type that can be used to index into a [`Row`] or [`Statement`].
///
/// The [`get`] and [`try_get`] methods of [`Row`] accept any type that implements `ColumnIndex`.
@ -39,7 +35,7 @@ pub(crate) mod private_column {
/// [`get`]: crate::row::Row::get
/// [`try_get`]: crate::row::Row::try_get
///
pub trait ColumnIndex<T: ?Sized>: private_column_index::Sealed + Debug {
pub trait ColumnIndex<T: ?Sized>: Debug {
/// Returns a valid positional index into the row or statement, [`ColumnIndexOutOfBounds`], or,
/// [`ColumnNotFound`].
///
@ -55,14 +51,15 @@ impl<T: ?Sized, I: ColumnIndex<T> + ?Sized> ColumnIndex<T> for &'_ I {
}
}
#[macro_export]
macro_rules! impl_column_index_for_row {
($R:ident) => {
impl crate::column::ColumnIndex<$R> for usize {
fn index(&self, row: &$R) -> Result<usize, crate::error::Error> {
let len = crate::row::Row::len(row);
impl $crate::column::ColumnIndex<$R> for usize {
fn index(&self, row: &$R) -> Result<usize, $crate::error::Error> {
let len = $crate::row::Row::len(row);
if *self >= len {
return Err(crate::error::Error::ColumnIndexOutOfBounds { len, index: *self });
return Err($crate::error::Error::ColumnIndexOutOfBounds { len, index: *self });
}
Ok(*self)
@ -71,14 +68,15 @@ macro_rules! impl_column_index_for_row {
};
}
#[macro_export]
macro_rules! impl_column_index_for_statement {
($S:ident) => {
impl crate::column::ColumnIndex<$S<'_>> for usize {
fn index(&self, statement: &$S<'_>) -> Result<usize, crate::error::Error> {
let len = crate::statement::Statement::columns(statement).len();
impl $crate::column::ColumnIndex<$S<'_>> for usize {
fn index(&self, statement: &$S<'_>) -> Result<usize, $crate::error::Error> {
let len = $crate::statement::Statement::columns(statement).len();
if *self >= len {
return Err(crate::error::Error::ColumnIndexOutOfBounds { len, index: *self });
return Err($crate::error::Error::ColumnIndexOutOfBounds { len, index: *self });
}
Ok(*self)
@ -86,12 +84,3 @@ macro_rules! impl_column_index_for_statement {
}
};
}
// Prevent users from implementing the `ColumnIndex` trait.
mod private_column_index {
pub trait Sealed {}
impl Sealed for usize {}
impl Sealed for str {}
impl<T> Sealed for &'_ T where T: Sealed + ?Sized {}
}

View file

@ -1,11 +1,11 @@
mod statement_cache;
pub(crate) use statement_cache::StatementCache;
pub use statement_cache::StatementCache;
use std::fmt::{Debug, Formatter};
use std::ops::{Deref, DerefMut};
/// A wrapper for `Fn`s that provides a debug impl that just says "Function"
pub(crate) struct DebugFn<F: ?Sized>(pub F);
pub struct DebugFn<F: ?Sized>(pub F);
impl<F: ?Sized> Deref for DebugFn<F> {
type Target = F;

View file

@ -49,7 +49,6 @@ impl<T> StatementCache<T> {
}
/// Clear all cached statements from the cache.
#[cfg(feature = "sqlite")]
pub fn clear(&mut self) {
self.inner.clear();
}

View file

@ -1,11 +1,13 @@
use crate::database::{Database, HasStatementCache};
use crate::error::Error;
use crate::transaction::Transaction;
use futures_core::future::BoxFuture;
use log::LevelFilter;
use std::fmt::Debug;
use std::str::FromStr;
use std::time::Duration;
use url::Url;
/// Represents a single database connection.
pub trait Connection: Send {
@ -44,15 +46,12 @@ pub trait Connection: Send {
/// # Example
///
/// ```rust
/// use sqlx_core::connection::Connection;
/// use sqlx_core::error::Error;
/// use sqlx_core::executor::Executor;
/// use sqlx_core::postgres::{PgConnection, PgRow};
/// use sqlx_core::query::query;
/// use sqlx::postgres::{PgConnection, PgRow};
/// use sqlx::Connection;
///
/// # pub async fn _f(conn: &mut PgConnection) -> Result<Vec<PgRow>, Error> {
/// conn.transaction(|conn|Box::pin(async move {
/// query("select * from ..").fetch_all(conn).await
/// # pub async fn _f(conn: &mut PgConnection) -> sqlx::Result<Vec<PgRow>> {
/// conn.transaction(|txn| Box::pin(async move {
/// sqlx::query("select * from ..").fetch_all(&mut **txn).await
/// })).await
/// # }
/// ```
@ -132,10 +131,11 @@ pub trait Connection: Send {
}
#[derive(Clone, Debug)]
pub(crate) struct LogSettings {
pub(crate) statements_level: LevelFilter,
pub(crate) slow_statements_level: LevelFilter,
pub(crate) slow_statements_duration: Duration,
#[non_exhaustive]
pub struct LogSettings {
pub statements_level: LevelFilter,
pub slow_statements_level: LevelFilter,
pub slow_statements_duration: Duration,
}
impl Default for LogSettings {
@ -149,10 +149,10 @@ impl Default for LogSettings {
}
impl LogSettings {
pub(crate) fn log_statements(&mut self, level: LevelFilter) {
pub fn log_statements(&mut self, level: LevelFilter) {
self.statements_level = level;
}
pub(crate) fn log_slow_statements(&mut self, level: LevelFilter, duration: Duration) {
pub fn log_slow_statements(&mut self, level: LevelFilter, duration: Duration) {
self.slow_statements_level = level;
self.slow_statements_duration = duration;
}
@ -161,6 +161,9 @@ impl LogSettings {
pub trait ConnectOptions: 'static + Send + Sync + FromStr<Err = Error> + Debug + Clone {
type Connection: Connection + ?Sized;
/// Parse the `ConnectOptions` from a URL.
fn from_url(url: &Url) -> Result<Self, Error>;
/// Establish a new database connection with the options specified by `self`.
fn connect(&self) -> BoxFuture<'_, Result<Self::Connection, Error>>
where

View file

@ -59,6 +59,7 @@ use crate::arguments::Arguments;
use crate::column::Column;
use crate::connection::Connection;
use crate::row::Row;
use crate::statement::Statement;
use crate::transaction::TransactionManager;
use crate::type_info::TypeInfo;
@ -98,6 +99,12 @@ pub trait Database:
/// The concrete type used to hold an owned copy of the not-yet-decoded value that was
/// received from the database.
type Value: Value<Database = Self> + 'static;
/// The display name for this database driver.
const NAME: &'static str;
/// The schemes for database URLs that should match this driver.
const URL_SCHEMES: &'static [&'static str];
}
/// Associate [`Database`] with a [`ValueRef`](crate::value::ValueRef) of a generic lifetime.

View file

@ -2,6 +2,7 @@
use crate::database::{Database, HasValueRef};
use crate::error::BoxDynError;
use crate::value::ValueRef;
/// A type that can be decoded from the database.

View file

@ -19,9 +19,9 @@ use std::convert::identity;
)]
#[doc(hidden)]
pub struct Describe<DB: Database> {
pub(crate) columns: Vec<DB::Column>,
pub(crate) parameters: Option<Either<Vec<DB::TypeInfo>, usize>>,
pub(crate) nullable: Vec<Option<bool>>,
pub columns: Vec<DB::Column>,
pub parameters: Option<Either<Vec<DB::TypeInfo>, usize>>,
pub nullable: Vec<Option<bool>>,
}
impl<DB: Database> Describe<DB> {
@ -54,3 +54,50 @@ impl<DB: Database> Describe<DB> {
self.nullable.get(column).copied().and_then(identity)
}
}
#[cfg(feature = "any")]
impl<DB: Database> Describe<DB> {
#[doc(hidden)]
pub fn try_into_any(self) -> crate::Result<Describe<crate::any::Any>>
where
crate::any::AnyColumn: for<'a> TryFrom<&'a DB::Column, Error = crate::Error>,
crate::any::AnyTypeInfo: for<'a> TryFrom<&'a DB::TypeInfo, Error = crate::Error>,
{
use crate::any::AnyTypeInfo;
use std::convert::TryFrom;
let columns = self
.columns
.iter()
.map(crate::any::AnyColumn::try_from)
.collect::<Result<Vec<_>, _>>()?;
let parameters = match self.parameters {
Some(Either::Left(parameters)) => Some(Either::Left(
parameters
.iter()
.enumerate()
.map(|(i, type_info)| {
AnyTypeInfo::try_from(type_info).map_err(|_| {
crate::Error::AnyDriverError(
format!(
"Any driver does not support type {} of parameter {}",
type_info, i
)
.into(),
)
})
})
.collect::<Result<Vec<_>, _>>()?,
)),
Some(Either::Right(count)) => Some(Either::Right(count)),
None => None,
};
Ok(Describe {
columns,
parameters,
nullable: self.nullable,
})
}
}

View file

@ -70,15 +70,15 @@ where
}
}
#[allow(unused_macros)]
#[macro_export]
macro_rules! impl_encode_for_option {
($DB:ident) => {
impl<'q, T> crate::encode::Encode<'q, $DB> for Option<T>
impl<'q, T> $crate::encode::Encode<'q, $DB> for Option<T>
where
T: crate::encode::Encode<'q, $DB> + crate::types::Type<$DB> + 'q,
T: $crate::encode::Encode<'q, $DB> + $crate::types::Type<$DB> + 'q,
{
#[inline]
fn produces(&self) -> Option<<$DB as crate::database::Database>::TypeInfo> {
fn produces(&self) -> Option<<$DB as $crate::database::Database>::TypeInfo> {
if let Some(v) = self {
v.produces()
} else {
@ -89,30 +89,30 @@ macro_rules! impl_encode_for_option {
#[inline]
fn encode(
self,
buf: &mut <$DB as crate::database::HasArguments<'q>>::ArgumentBuffer,
) -> crate::encode::IsNull {
buf: &mut <$DB as $crate::database::HasArguments<'q>>::ArgumentBuffer,
) -> $crate::encode::IsNull {
if let Some(v) = self {
v.encode(buf)
} else {
crate::encode::IsNull::Yes
$crate::encode::IsNull::Yes
}
}
#[inline]
fn encode_by_ref(
&self,
buf: &mut <$DB as crate::database::HasArguments<'q>>::ArgumentBuffer,
) -> crate::encode::IsNull {
buf: &mut <$DB as $crate::database::HasArguments<'q>>::ArgumentBuffer,
) -> $crate::encode::IsNull {
if let Some(v) = self {
v.encode_by_ref(buf)
} else {
crate::encode::IsNull::Yes
$crate::encode::IsNull::Yes
}
}
#[inline]
fn size_hint(&self) -> usize {
self.as_ref().map_or(0, crate::encode::Encode::size_hint)
self.as_ref().map_or(0, $crate::encode::Encode::size_hint)
}
}
};

View file

@ -5,14 +5,14 @@ use std::borrow::Cow;
use std::error::Error as StdError;
use std::fmt::Display;
use std::io;
use std::result::Result as StdResult;
use crate::database::Database;
use crate::type_info::TypeInfo;
use crate::types::Type;
/// A specialized `Result` type for SQLx.
pub type Result<T> = StdResult<T, Error>;
pub type Result<T, E = Error> = ::std::result::Result<T, E>;
// Convenience type alias for usage within SQLx.
// Do not make this type public.
@ -82,6 +82,10 @@ pub enum Error {
#[error("error occurred while decoding: {0}")]
Decode(#[source] BoxDynError),
/// Error occurred within the `Any` driver mapping to/from the native driver.
#[error("error in Any driver mapping: {0}")]
AnyDriverError(#[source] BoxDynError),
/// A [`Pool::acquire`] timed out due to connections not becoming available or
/// because another task encountered too many errors while trying to open a new connection.
///
@ -122,20 +126,30 @@ impl Error {
}
}
#[allow(dead_code)]
#[doc(hidden)]
#[inline]
pub(crate) fn protocol(err: impl Display) -> Self {
pub fn protocol(err: impl Display) -> Self {
Error::Protocol(err.to_string())
}
#[allow(dead_code)]
#[doc(hidden)]
#[inline]
pub(crate) fn config(err: impl StdError + Send + Sync + 'static) -> Self {
pub fn config(err: impl StdError + Send + Sync + 'static) -> Self {
Error::Configuration(err.into())
}
pub(crate) fn tls(err: impl Into<Box<dyn StdError + Send + Sync + 'static>>) -> Self {
Error::Tls(err.into())
}
#[doc(hidden)]
#[inline]
pub fn decode(err: impl Into<Box<dyn StdError + Send + Sync + 'static>>) -> Self {
Error::Decode(err.into())
}
}
pub(crate) fn mismatched_types<DB: Database, T: Type<DB>>(ty: &DB::TypeInfo) -> BoxDynError {
pub fn mismatched_types<DB: Database, T: Type<DB>>(ty: &DB::TypeInfo) -> BoxDynError {
// TODO: `#name` only produces `TINYINT` but perhaps we want to show `TINYINT(1)`
format!(
"mismatched types; Rust type `{}` (as SQL type `{}`) is not compatible with SQL type `{}`",
@ -223,7 +237,7 @@ impl dyn DatabaseError {
/// Downcast this generic database error to a specific database error type.
#[inline]
pub fn try_downcast<E: DatabaseError>(self: Box<Self>) -> StdResult<Box<E>, Box<Self>> {
pub fn try_downcast<E: DatabaseError>(self: Box<Self>) -> Result<Box<E>, Box<Self>> {
if self.as_error().is::<E>() {
Ok(self.into_error().downcast().unwrap())
} else {
@ -250,15 +264,8 @@ impl From<crate::migrate::MigrateError> for Error {
}
}
#[cfg(feature = "_tls-native-tls")]
impl From<sqlx_rt::native_tls::Error> for Error {
#[inline]
fn from(error: sqlx_rt::native_tls::Error) -> Self {
Error::Tls(Box::new(error))
}
}
// Format an error message as a `Protocol` error
/// Format an error message as a `Protocol` error
#[macro_export]
macro_rules! err_protocol {
($expr:expr) => {
$crate::error::Error::Protocol($expr.into())

View file

@ -1,6 +1,7 @@
use crate::database::{Database, HasArguments, HasStatement};
use crate::describe::Describe;
use crate::error::Error;
use either::Either;
use futures_core::future::BoxFuture;
use futures_core::stream::BoxStream;

View file

@ -58,6 +58,7 @@ impl<'a, T> Stream for TryAsyncStream<'a, T> {
}
}
#[macro_export]
macro_rules! try_stream {
($($block:tt)*) => {
crate::ext::async_stream::TryAsyncStream::new(move |mut sender| async move {

View file

@ -14,8 +14,7 @@ pub enum UStr {
}
impl UStr {
#[allow(dead_code)]
pub(crate) fn new(s: &str) -> Self {
pub fn new(s: &str) -> Self {
UStr::Shared(Arc::from(s.to_owned()))
}
}

96
sqlx-core/src/fs.rs Normal file
View file

@ -0,0 +1,96 @@
use std::ffi::OsString;
use std::fs::Metadata;
use std::io;
use std::path::{Path, PathBuf};
use crate::rt;
pub struct ReadDir {
inner: Option<std::fs::ReadDir>,
}
pub struct DirEntry {
pub path: PathBuf,
pub file_name: OsString,
pub metadata: Metadata,
}
// Filesystem operations are generally not capable of being non-blocking
// so Tokio and async-std don't bother; they just send the work to a blocking thread pool.
//
// We save on code duplication here by just implementing the same strategy ourselves
// using the runtime's `spawn_blocking()` primitive.
pub async fn read<P: AsRef<Path>>(path: P) -> io::Result<Vec<u8>> {
let path = PathBuf::from(path.as_ref());
rt::spawn_blocking(move || std::fs::read(path)).await
}
pub async fn read_to_string<P: AsRef<Path>>(path: P) -> io::Result<String> {
let path = PathBuf::from(path.as_ref());
rt::spawn_blocking(move || std::fs::read_to_string(path)).await
}
pub async fn create_dir_all<P: AsRef<Path>>(path: P) -> io::Result<()> {
let path = PathBuf::from(path.as_ref());
rt::spawn_blocking(move || std::fs::create_dir_all(path)).await
}
pub async fn remove_file<P: AsRef<Path>>(path: P) -> io::Result<()> {
let path = PathBuf::from(path.as_ref());
rt::spawn_blocking(move || std::fs::remove_file(path)).await
}
pub async fn remove_dir<P: AsRef<Path>>(path: P) -> io::Result<()> {
let path = PathBuf::from(path.as_ref());
rt::spawn_blocking(move || std::fs::remove_dir(path)).await
}
pub async fn remove_dir_all<P: AsRef<Path>>(path: P) -> io::Result<()> {
let path = PathBuf::from(path.as_ref());
rt::spawn_blocking(move || std::fs::remove_dir_all(path)).await
}
pub async fn read_dir(path: PathBuf) -> io::Result<ReadDir> {
let read_dir = rt::spawn_blocking(move || std::fs::read_dir(path)).await?;
Ok(ReadDir {
inner: Some(read_dir),
})
}
impl ReadDir {
pub async fn next(&mut self) -> io::Result<Option<DirEntry>> {
if let Some(mut read_dir) = self.inner.take() {
let maybe = rt::spawn_blocking(move || {
let entry = read_dir.next().transpose()?;
entry
.map(|entry| -> io::Result<_> {
Ok((
read_dir,
DirEntry {
path: entry.path(),
file_name: entry.file_name(),
// We always want the metadata as well so might as well fetch
// it in the same blocking call.
metadata: entry.metadata()?,
},
))
})
.transpose()
})
.await?;
match maybe {
Some((read_dir, entry)) => {
self.inner = Some(read_dir);
Ok(Some(entry))
}
None => Ok(None),
}
} else {
Ok(None)
}
}
}

View file

@ -103,59 +103,3 @@ where
&mut self.stream
}
}
// Holds a buffer which has been temporarily extended, so that
// we can read into it. Automatically shrinks the buffer back
// down if the read is cancelled.
struct BufTruncator<'a> {
buf: &'a mut BytesMut,
filled_len: usize,
}
impl<'a> BufTruncator<'a> {
fn new(buf: &'a mut BytesMut) -> Self {
let filled_len = buf.len();
Self { buf, filled_len }
}
fn reserve(&mut self, space: usize) {
self.buf.resize(self.filled_len + space, 0);
}
async fn read<S: AsyncRead + Unpin>(&mut self, stream: &mut S) -> Result<usize, Error> {
let n = stream.read(&mut self.buf[self.filled_len..]).await?;
self.filled_len += n;
Ok(n)
}
fn is_full(&self) -> bool {
self.filled_len >= self.buf.len()
}
}
impl Drop for BufTruncator<'_> {
fn drop(&mut self) {
self.buf.truncate(self.filled_len);
}
}
async fn read_raw_into<S: AsyncRead + Unpin>(
stream: &mut S,
buf: &mut BytesMut,
cnt: usize,
) -> Result<(), Error> {
let mut buf = BufTruncator::new(buf);
buf.reserve(cnt);
while !buf.is_full() {
let n = buf.read(stream).await?;
if n == 0 {
// a zero read when we had space in the read buffer
// should be treated as an EOF
// and an unexpected EOF means the server told us to go away
return Err(io::Error::from(io::ErrorKind::ConnectionAborted).into());
}
}
Ok(())
}

View file

@ -1,12 +1,26 @@
mod buf;
mod buf_mut;
mod buf_stream;
// mod buf_stream;
mod decode;
mod encode;
mod write_and_flush;
mod read_buf;
// mod write_and_flush;
pub use buf::BufExt;
pub use buf_mut::BufMutExt;
pub use buf_stream::BufStream;
//pub use buf_stream::BufStream;
pub use decode::Decode;
pub use encode::Encode;
pub use read_buf::ReadBuf;
#[cfg(not(feature = "_rt-tokio"))]
pub use futures_io::AsyncRead;
#[cfg(feature = "_rt-tokio")]
pub use tokio::io::AsyncRead;
#[cfg(not(feature = "_rt-tokio"))]
pub use futures_util::io::AsyncReadExt;
#[cfg(feature = "_rt-tokio")]
pub use tokio::io::AsyncReadExt;

View file

@ -0,0 +1,35 @@
use bytes::{BufMut, BytesMut};
/// An extension for [`BufMut`] for getting a writeable buffer in safe code.
pub trait ReadBuf: BufMut {
/// Get the full capacity of this buffer as a safely initialized slice.
fn init_mut(&mut self) -> &mut [u8];
}
impl ReadBuf for &'_ mut [u8] {
#[inline(always)]
fn init_mut(&mut self) -> &mut [u8] {
self
}
}
impl ReadBuf for BytesMut {
#[inline(always)]
fn init_mut(&mut self) -> &mut [u8] {
// `self.remaining_mut()` returns `usize::MAX - self.len()`
let remaining = self.capacity() - self.len();
// I'm hoping for most uses that this operation is elided by the optimizer.
self.put_bytes(0, remaining);
self
}
}
#[test]
fn test_read_buf_bytes_mut() {
let mut buf = BytesMut::with_capacity(8);
buf.put_u32(0x12345678);
assert_eq!(buf.init_mut(), [0x12, 0x34, 0x56, 0x78, 0, 0, 0, 0]);
}

View file

@ -1,26 +1,31 @@
//! Core of SQLx, the rust SQL toolkit.
//! Not intended to be used directly.
//!
//! ### Note: Semver Exempt API
//! The API of this crate is not meant for general use and does *not* follow Semantic Versioning.
//! The only crate that follows Semantic Versioning in the project is the `sqlx` crate itself.
//! If you are building a custom SQLx driver, you should pin an exact version for `sqlx-core` to
//! avoid breakages:
//!
//! ```toml
//! sqlx-core = { version = "=0.6.2" }
//! ```
//!
//! And then make releases in lockstep with `sqlx-core`. We recommend all driver crates, in-tree
//! or otherwise, use the same version numbers as `sqlx-core` to avoid confusion.
#![recursion_limit = "512"]
#![warn(future_incompatible, rust_2018_idioms)]
#![allow(clippy::needless_doctest_main, clippy::type_complexity)]
// See `clippy.toml` at the workspace root
#![deny(clippy::disallowed_method)]
//
// The only unsafe code in SQLx is that necessary to interact with native APIs like with SQLite,
// and that can live in its own separate driver crate.
#![forbid(unsafe_code)]
// Allows an API be documented as only available in some specific platforms.
// <https://doc.rust-lang.org/unstable-book/language-features/doc-cfg.html>
#![cfg_attr(docsrs, feature(doc_cfg))]
//
// When compiling with support for SQLite we must allow some unsafe code in order to
// interface with the inherently unsafe C module. This unsafe code is contained
// to the sqlite module.
#![cfg_attr(feature = "sqlite", deny(unsafe_code))]
#![cfg_attr(not(feature = "sqlite"), forbid(unsafe_code))]
#[cfg(feature = "bigdecimal")]
extern crate bigdecimal_ as bigdecimal;
#[macro_use]
mod ext;
pub mod ext;
#[macro_use]
pub mod error;
@ -57,58 +62,60 @@ pub mod column;
#[macro_use]
pub mod statement;
mod common;
pub use either::Either;
pub mod common;
pub mod database;
pub mod describe;
pub mod executor;
pub mod from_row;
mod io;
mod logger;
mod net;
pub mod fs;
pub mod io;
pub mod logger;
pub mod net;
pub mod query_as;
pub mod query_builder;
pub mod query_scalar;
pub mod row;
pub mod rt;
pub mod sync;
pub mod type_info;
pub mod value;
#[cfg(feature = "migrate")]
pub mod migrate;
#[cfg(all(
any(
feature = "postgres",
feature = "mysql",
feature = "mssql",
feature = "sqlite"
),
feature = "any"
))]
#[cfg(feature = "any")]
pub mod any;
#[cfg(feature = "postgres")]
#[cfg_attr(docsrs, doc(cfg(feature = "postgres")))]
pub mod postgres;
#[cfg(feature = "sqlite")]
#[cfg_attr(docsrs, doc(cfg(feature = "sqlite")))]
pub mod sqlite;
#[cfg(feature = "mysql")]
#[cfg_attr(docsrs, doc(cfg(feature = "mysql")))]
pub mod mysql;
#[cfg(feature = "mssql")]
#[cfg_attr(docsrs, doc(cfg(feature = "mssql")))]
pub mod mssql;
// Implements test support with automatic DB management.
#[cfg(feature = "migrate")]
pub mod testing;
pub use sqlx_rt::test_block_on;
pub use error::{Error, Result};
/// sqlx uses ahash for increased performance, at the cost of reduced DoS resistance.
use ahash::AHashMap as HashMap;
pub use ahash::AHashMap as HashMap;
pub use either::Either;
pub use indexmap::IndexMap;
pub use percent_encoding;
pub use smallvec::SmallVec;
pub use url::{self, Url};
pub use bytes;
//type HashMap<K, V> = std::collections::HashMap<K, V, ahash::RandomState>;
/// Helper module to get drivers compiling again that used to be in this crate,
/// to avoid having to replace tons of `use crate::<...>` imports.
///
/// This module can be glob-imported and should not clash with any modules a driver
/// would want to implement itself.
pub mod driver_prelude {
pub use crate::{
acquire, common, decode, describe, encode, executor, ext, from_row, fs, io, logger, net,
pool, query, query_as, query_builder, query_scalar, rt, sync,
};
pub use crate::error::{Error, Result};
pub use crate::HashMap;
pub use either::Either;
}

View file

@ -1,13 +1,9 @@
use crate::connection::LogSettings;
#[cfg(feature = "sqlite")]
use std::collections::HashSet;
#[cfg(feature = "sqlite")]
use std::fmt::Debug;
#[cfg(feature = "sqlite")]
use std::hash::Hash;
use std::time::Instant;
pub(crate) struct QueryLogger<'q> {
pub use sqlformat;
pub struct QueryLogger<'q> {
sql: &'q str,
rows_returned: u64,
rows_affected: u64,
@ -16,7 +12,7 @@ pub(crate) struct QueryLogger<'q> {
}
impl<'q> QueryLogger<'q> {
pub(crate) fn new(sql: &'q str, settings: LogSettings) -> Self {
pub fn new(sql: &'q str, settings: LogSettings) -> Self {
Self {
sql,
rows_returned: 0,
@ -26,15 +22,15 @@ impl<'q> QueryLogger<'q> {
}
}
pub(crate) fn increment_rows_returned(&mut self) {
pub fn increment_rows_returned(&mut self) {
self.rows_returned += 1;
}
pub(crate) fn increase_rows_affected(&mut self, n: u64) {
pub fn increase_rows_affected(&mut self, n: u64) {
self.rows_affected += n;
}
pub(crate) fn finish(&self) {
pub fn finish(&self) {
let elapsed = self.start.elapsed();
let lvl = if elapsed >= self.settings.slow_statements_duration {
@ -84,94 +80,7 @@ impl<'q> Drop for QueryLogger<'q> {
}
}
#[cfg(feature = "sqlite")]
pub(crate) struct QueryPlanLogger<'q, O: Debug + Hash + Eq, R: Debug, P: Debug> {
sql: &'q str,
unknown_operations: HashSet<O>,
results: Vec<R>,
program: &'q [P],
settings: LogSettings,
}
#[cfg(feature = "sqlite")]
impl<'q, O: Debug + Hash + Eq, R: Debug, P: Debug> QueryPlanLogger<'q, O, R, P> {
pub(crate) fn new(sql: &'q str, program: &'q [P], settings: LogSettings) -> Self {
Self {
sql,
unknown_operations: HashSet::new(),
results: Vec::new(),
program,
settings,
}
}
pub(crate) fn log_enabled(&self) -> bool {
if let Some(_lvl) = self
.settings
.statements_level
.to_level()
.filter(|lvl| log::log_enabled!(target: "sqlx::explain", *lvl))
{
return true;
} else {
return false;
}
}
pub(crate) fn add_result(&mut self, result: R) {
self.results.push(result);
}
pub(crate) fn add_unknown_operation(&mut self, operation: O) {
self.unknown_operations.insert(operation);
}
pub(crate) fn finish(&self) {
let lvl = self.settings.statements_level;
if let Some(lvl) = lvl
.to_level()
.filter(|lvl| log::log_enabled!(target: "sqlx::explain", *lvl))
{
let mut summary = parse_query_summary(&self.sql);
let sql = if summary != self.sql {
summary.push_str("");
format!(
"\n\n{}\n",
sqlformat::format(
&self.sql,
&sqlformat::QueryParams::None,
sqlformat::FormatOptions::default()
)
)
} else {
String::new()
};
log::logger().log(
&log::Record::builder()
.args(format_args!(
"{}; program:{:?}, unknown_operations:{:?}, results: {:?}{}",
summary, self.program, self.unknown_operations, self.results, sql
))
.level(lvl)
.module_path_static(Some("sqlx::explain"))
.target("sqlx::explain")
.build(),
);
}
}
}
#[cfg(feature = "sqlite")]
impl<'q, O: Debug + Hash + Eq, R: Debug, P: Debug> Drop for QueryPlanLogger<'q, O, R, P> {
fn drop(&mut self) {
self.finish();
}
}
fn parse_query_summary(sql: &str) -> String {
pub fn parse_query_summary(sql: &str) -> String {
// For now, just take the first 4 words
sql.split_whitespace()
.take(4)

View file

@ -27,19 +27,6 @@ pub trait Migrate {
// "dirty" means there is a partially applied migration that failed.
fn dirty_version(&mut self) -> BoxFuture<'_, Result<Option<i64>, MigrateError>>;
// Return the current version and if the database is "dirty".
// "dirty" means there is a partially applied migration that failed.
#[deprecated]
fn version(&mut self) -> BoxFuture<'_, Result<Option<(i64, bool)>, MigrateError>>;
// validate the migration
// checks that it does exist on the database and that the checksum matches
#[deprecated]
fn validate<'e: 'm, 'm>(
&'e mut self,
migration: &'m Migration,
) -> BoxFuture<'m, Result<(), MigrateError>>;
// Return the ordered list of applied migrations
fn list_applied_migrations(
&mut self,

View file

@ -6,6 +6,7 @@ use std::ops::Deref;
use std::slice;
#[derive(Debug)]
#[doc(hidden)]
pub struct Migrator {
pub migrations: Cow<'static, [Migration]>,
pub ignore_missing: bool,
@ -39,7 +40,7 @@ impl Migrator {
/// ```rust,no_run
/// # use sqlx_core::migrate::MigrateError;
/// # fn main() -> Result<(), MigrateError> {
/// # sqlx_rt::block_on(async move {
/// # sqlx::__rt::test_block_on(async move {
/// # use sqlx_core::migrate::Migrator;
/// use std::path::Path;
///
@ -91,13 +92,14 @@ impl Migrator {
/// # Examples
///
/// ```rust,no_run
/// # use sqlx_core::migrate::MigrateError;
/// # #[cfg(feature = "sqlite")]
/// # use sqlx::migrate::MigrateError;
/// # fn main() -> Result<(), MigrateError> {
/// # sqlx_rt::block_on(async move {
/// # use sqlx_core::migrate::Migrator;
/// # sqlx::__rt::test_block_on(async move {
/// use sqlx::migrate::Migrator;
/// use sqlx::sqlite::SqlitePoolOptions;
///
/// let m = Migrator::new(std::path::Path::new("./migrations")).await?;
/// let pool = sqlx_core::sqlite::SqlitePoolOptions::new().connect("sqlite::memory:").await?;
/// let pool = SqlitePoolOptions::new().connect("sqlite::memory:").await?;
/// m.run(&pool).await
/// # })
/// # }
@ -170,13 +172,14 @@ impl Migrator {
/// # Examples
///
/// ```rust,no_run
/// # use sqlx_core::migrate::MigrateError;
/// # #[cfg(feature = "sqlite")]
/// # use sqlx::migrate::MigrateError;
/// # fn main() -> Result<(), MigrateError> {
/// # sqlx_rt::block_on(async move {
/// # use sqlx_core::migrate::Migrator;
/// # sqlx::__rt::test_block_on(async move {
/// use sqlx::migrate::Migrator;
/// use sqlx::sqlite::SqlitePoolOptions;
///
/// let m = Migrator::new(std::path::Path::new("./migrations")).await?;
/// let pool = sqlx_core::sqlite::SqlitePoolOptions::new().connect("sqlite::memory:").await?;
/// let pool = SqlitePoolOptions::new().connect("sqlite::memory:").await?;
/// m.undo(&pool, 4).await
/// # })
/// # }

View file

@ -1,8 +1,8 @@
use crate::error::BoxDynError;
use crate::fs;
use crate::migrate::{Migration, MigrationType};
use futures_core::future::BoxFuture;
use futures_util::TryStreamExt;
use sqlx_rt::fs;
use std::borrow::Cow;
use std::fmt::Debug;
use std::path::{Path, PathBuf};
@ -20,21 +20,16 @@ pub trait MigrationSource<'s>: Debug {
impl<'s> MigrationSource<'s> for &'s Path {
fn resolve(self) -> BoxFuture<'s, Result<Vec<Migration>, BoxDynError>> {
Box::pin(async move {
#[allow(unused_mut)]
let mut s = fs::read_dir(self.canonicalize()?).await?;
let mut migrations = Vec::new();
#[cfg(feature = "_rt-tokio")]
let mut s = tokio_stream::wrappers::ReadDirStream::new(s);
while let Some(entry) = s.try_next().await? {
if !entry.metadata().await?.is_file() {
while let Some(entry) = s.next().await? {
if !entry.metadata.is_file() {
// not a file; ignore
continue;
}
let file_name = entry.file_name();
let file_name = file_name.to_string_lossy();
let file_name = entry.file_name.to_string_lossy();
let parts = file_name.splitn(2, '_').collect::<Vec<_>>();
@ -52,7 +47,7 @@ impl<'s> MigrationSource<'s> for &'s Path {
.replace('_', " ")
.to_owned();
let sql = fs::read_to_string(&entry.path()).await?;
let sql = fs::read_to_string(&entry.path).await?;
migrations.push(Migration::new(
version,

View file

@ -1,118 +0,0 @@
use crate::arguments::Arguments;
use crate::encode::Encode;
use crate::mssql::database::Mssql;
use crate::mssql::io::MssqlBufMutExt;
use crate::mssql::protocol::rpc::StatusFlags;
use crate::types::Type;
#[derive(Default, Clone)]
pub struct MssqlArguments {
// next ordinal to be used when formatting a positional parameter name
pub(crate) ordinal: usize,
// temporary string buffer used to format parameter names
name: String,
pub(crate) data: Vec<u8>,
pub(crate) declarations: String,
}
impl MssqlArguments {
pub(crate) fn add_named<'q, T: Encode<'q, Mssql> + Type<Mssql>>(
&mut self,
name: &str,
value: T,
) {
let ty = value.produces().unwrap_or_else(T::type_info);
let mut ty_name = String::new();
ty.0.fmt(&mut ty_name);
self.data.put_b_varchar(name); // [ParamName]
self.data.push(0); // [StatusFlags]
ty.0.put(&mut self.data); // [TYPE_INFO]
ty.0.put_value(&mut self.data, value); // [ParamLenData]
}
pub(crate) fn add_unnamed<'q, T: Encode<'q, Mssql> + Type<Mssql>>(&mut self, value: T) {
self.add_named("", value);
}
pub(crate) fn declare<'q, T: Encode<'q, Mssql> + Type<Mssql>>(
&mut self,
name: &str,
initial_value: T,
) {
let ty = initial_value.produces().unwrap_or_else(T::type_info);
let mut ty_name = String::new();
ty.0.fmt(&mut ty_name);
self.data.put_b_varchar(name); // [ParamName]
self.data.push(StatusFlags::BY_REF_VALUE.bits()); // [StatusFlags]
ty.0.put(&mut self.data); // [TYPE_INFO]
ty.0.put_value(&mut self.data, initial_value); // [ParamLenData]
}
pub(crate) fn append(&mut self, arguments: &mut MssqlArguments) {
self.ordinal += arguments.ordinal;
self.data.append(&mut arguments.data);
}
pub(crate) fn add<'q, T>(&mut self, value: T)
where
T: Encode<'q, Mssql> + Type<Mssql>,
{
let ty = value.produces().unwrap_or_else(T::type_info);
// produce an ordinal parameter name
// @p1, @p2, ... @pN
self.name.clear();
self.name.push_str("@p");
self.ordinal += 1;
self.name.push_str(itoa::Buffer::new().format(self.ordinal));
let MssqlArguments {
ref name,
ref mut declarations,
ref mut data,
..
} = self;
// add this to our variable declaration list
// @p1 int, @p2 nvarchar(10), ...
if !declarations.is_empty() {
declarations.push_str(",");
}
declarations.push_str(name);
declarations.push(' ');
ty.0.fmt(declarations);
// write out the parameter
data.put_b_varchar(name); // [ParamName]
data.push(0); // [StatusFlags]
ty.0.put(data); // [TYPE_INFO]
ty.0.put_value(data, value); // [ParamLenData]
}
}
impl<'q> Arguments<'q> for MssqlArguments {
type Database = Mssql;
fn reserve(&mut self, _additional: usize, size: usize) {
self.data.reserve(size + 10); // est. 4 chars for name, 1 for status, 1 for TYPE_INFO
}
fn add<T>(&mut self, value: T)
where
T: 'q + Encode<'q, Self::Database> + Type<Mssql>,
{
self.add(value)
}
}

View file

@ -1,53 +0,0 @@
use crate::column::Column;
use crate::ext::ustr::UStr;
use crate::mssql::protocol::col_meta_data::{ColumnData, Flags};
use crate::mssql::{Mssql, MssqlTypeInfo};
#[derive(Debug, Clone)]
#[cfg_attr(feature = "offline", derive(serde::Serialize, serde::Deserialize))]
pub struct MssqlColumn {
pub(crate) ordinal: usize,
pub(crate) name: UStr,
pub(crate) type_info: MssqlTypeInfo,
pub(crate) flags: Flags,
}
impl crate::column::private_column::Sealed for MssqlColumn {}
impl MssqlColumn {
pub(crate) fn new(meta: ColumnData, ordinal: usize) -> Self {
Self {
name: UStr::from(meta.col_name),
type_info: MssqlTypeInfo(meta.type_info),
ordinal,
flags: meta.flags,
}
}
}
impl Column for MssqlColumn {
type Database = Mssql;
fn ordinal(&self) -> usize {
self.ordinal
}
fn name(&self) -> &str {
&*self.name
}
fn type_info(&self) -> &MssqlTypeInfo {
&self.type_info
}
}
#[cfg(feature = "any")]
impl From<MssqlColumn> for crate::any::AnyColumn {
#[inline]
fn from(column: MssqlColumn) -> Self {
crate::any::AnyColumn {
type_info: column.type_info.clone().into(),
kind: crate::any::column::AnyColumnKind::Mssql(column),
}
}
}

View file

@ -1,88 +0,0 @@
use crate::common::StatementCache;
use crate::error::Error;
use crate::io::Decode;
use crate::mssql::connection::stream::MssqlStream;
use crate::mssql::protocol::login::Login7;
use crate::mssql::protocol::message::Message;
use crate::mssql::protocol::packet::PacketType;
use crate::mssql::protocol::pre_login::{Encrypt, PreLogin, Version};
use crate::mssql::{MssqlConnectOptions, MssqlConnection};
impl MssqlConnection {
pub(crate) async fn establish(options: &MssqlConnectOptions) -> Result<Self, Error> {
let mut stream: MssqlStream = MssqlStream::connect(options).await?;
// Send PRELOGIN to set up the context for login. The server should immediately
// respond with a PRELOGIN message of its own.
// TODO: Encryption
// TODO: Send the version of SQLx over
stream.write_packet(
PacketType::PreLogin,
PreLogin {
version: Version::default(),
encryption: Encrypt::NOT_SUPPORTED,
..Default::default()
},
);
stream.flush().await?;
let (_, packet) = stream.recv_packet().await?;
let _ = PreLogin::decode(packet)?;
// LOGIN7 defines the authentication rules for use between client and server
stream.write_packet(
PacketType::Tds7Login,
Login7 {
// FIXME: use a version constant
version: 0x74000004, // SQL Server 2012 - SQL Server 2019
client_program_version: 0,
client_pid: 0,
packet_size: 4096,
hostname: "",
username: &options.username,
password: options.password.as_deref().unwrap_or_default(),
app_name: "",
server_name: "",
client_interface_name: "",
language: "",
database: &*options.database,
client_id: [0; 6],
},
);
stream.flush().await?;
loop {
// NOTE: we should receive an [Error] message if something goes wrong, otherwise,
// all messages are mostly informational (ENVCHANGE, INFO, LOGINACK)
match stream.recv_message().await? {
Message::LoginAck(_) => {
// indicates that the login was successful
// no action is needed, we are just going to keep waiting till we hit <Done>
}
Message::Done(_) => {
break;
}
_ => {}
}
}
// FIXME: Do we need to expose the capacity count here? It's not tied to
// server-side resources but just .prepare() calls which return
// client-side data.
Ok(Self {
stream,
cache_statement: StatementCache::new(1024),
log_settings: options.log_settings.clone(),
})
}
}

View file

@ -1,198 +0,0 @@
use crate::describe::Describe;
use crate::error::Error;
use crate::executor::{Execute, Executor};
use crate::logger::QueryLogger;
use crate::mssql::connection::prepare::prepare;
use crate::mssql::protocol::col_meta_data::Flags;
use crate::mssql::protocol::done::Status;
use crate::mssql::protocol::message::Message;
use crate::mssql::protocol::packet::PacketType;
use crate::mssql::protocol::rpc::{OptionFlags, Procedure, RpcRequest};
use crate::mssql::protocol::sql_batch::SqlBatch;
use crate::mssql::{
Mssql, MssqlArguments, MssqlConnection, MssqlQueryResult, MssqlRow, MssqlStatement,
MssqlTypeInfo,
};
use either::Either;
use futures_core::future::BoxFuture;
use futures_core::stream::BoxStream;
use futures_util::TryStreamExt;
use std::borrow::Cow;
use std::sync::Arc;
impl MssqlConnection {
async fn run(&mut self, query: &str, arguments: Option<MssqlArguments>) -> Result<(), Error> {
self.stream.wait_until_ready().await?;
self.stream.pending_done_count += 1;
if let Some(mut arguments) = arguments {
let proc = Either::Right(Procedure::ExecuteSql);
let mut proc_args = MssqlArguments::default();
// SQL
proc_args.add_unnamed(query);
if !arguments.data.is_empty() {
// Declarations
// NAME TYPE, NAME TYPE, ...
proc_args.add_unnamed(&*arguments.declarations);
// Add the list of SQL parameters _after_ our RPC parameters
proc_args.append(&mut arguments);
}
self.stream.write_packet(
PacketType::Rpc,
RpcRequest {
transaction_descriptor: self.stream.transaction_descriptor,
arguments: &proc_args,
procedure: proc,
options: OptionFlags::empty(),
},
);
} else {
self.stream.write_packet(
PacketType::SqlBatch,
SqlBatch {
transaction_descriptor: self.stream.transaction_descriptor,
sql: query,
},
);
}
self.stream.flush().await?;
Ok(())
}
}
impl<'c> Executor<'c> for &'c mut MssqlConnection {
type Database = Mssql;
fn fetch_many<'e, 'q: 'e, E: 'q>(
self,
mut query: E,
) -> BoxStream<'e, Result<Either<MssqlQueryResult, MssqlRow>, Error>>
where
'c: 'e,
E: Execute<'q, Self::Database>,
{
let sql = query.sql();
let arguments = query.take_arguments();
let mut logger = QueryLogger::new(sql, self.log_settings.clone());
Box::pin(try_stream! {
self.run(sql, arguments).await?;
loop {
let message = self.stream.recv_message().await?;
match message {
Message::Row(row) => {
let columns = Arc::clone(&self.stream.columns);
let column_names = Arc::clone(&self.stream.column_names);
logger.increment_rows_returned();
r#yield!(Either::Right(MssqlRow { row, column_names, columns }));
}
Message::Done(done) | Message::DoneProc(done) => {
if !done.status.contains(Status::DONE_MORE) {
self.stream.handle_done(&done);
}
if done.status.contains(Status::DONE_COUNT) {
let rows_affected = done.affected_rows;
logger.increase_rows_affected(rows_affected);
r#yield!(Either::Left(MssqlQueryResult {
rows_affected,
}));
}
if !done.status.contains(Status::DONE_MORE) {
break;
}
}
Message::DoneInProc(done) => {
if done.status.contains(Status::DONE_COUNT) {
let rows_affected = done.affected_rows;
logger.increase_rows_affected(rows_affected);
r#yield!(Either::Left(MssqlQueryResult {
rows_affected,
}));
}
}
_ => {}
}
}
Ok(())
})
}
fn fetch_optional<'e, 'q: 'e, E: 'q>(
self,
query: E,
) -> BoxFuture<'e, Result<Option<MssqlRow>, Error>>
where
'c: 'e,
E: Execute<'q, Self::Database>,
{
let mut s = self.fetch_many(query);
Box::pin(async move {
while let Some(v) = s.try_next().await? {
if let Either::Right(r) = v {
return Ok(Some(r));
}
}
Ok(None)
})
}
fn prepare_with<'e, 'q: 'e>(
self,
sql: &'q str,
_parameters: &[MssqlTypeInfo],
) -> BoxFuture<'e, Result<MssqlStatement<'q>, Error>>
where
'c: 'e,
{
Box::pin(async move {
let metadata = prepare(self, sql).await?;
Ok(MssqlStatement {
sql: Cow::Borrowed(sql),
metadata,
})
})
}
fn describe<'e, 'q: 'e>(
self,
sql: &'q str,
) -> BoxFuture<'e, Result<Describe<Self::Database>, Error>>
where
'c: 'e,
{
Box::pin(async move {
let metadata = prepare(self, sql).await?;
let mut nullable = Vec::with_capacity(metadata.columns.len());
for col in metadata.columns.iter() {
nullable.push(Some(col.flags.contains(Flags::NULLABLE)));
}
Ok(Describe {
nullable,
columns: (metadata.columns).clone(),
parameters: None,
})
})
}
}

View file

@ -1,83 +0,0 @@
use crate::common::StatementCache;
use crate::connection::{Connection, LogSettings};
use crate::error::Error;
use crate::executor::Executor;
use crate::mssql::connection::stream::MssqlStream;
use crate::mssql::statement::MssqlStatementMetadata;
use crate::mssql::{Mssql, MssqlConnectOptions};
use crate::transaction::Transaction;
use futures_core::future::BoxFuture;
use futures_util::{FutureExt, TryFutureExt};
use std::fmt::{self, Debug, Formatter};
use std::sync::Arc;
mod establish;
mod executor;
mod prepare;
mod stream;
pub struct MssqlConnection {
pub(crate) stream: MssqlStream,
pub(crate) cache_statement: StatementCache<Arc<MssqlStatementMetadata>>,
log_settings: LogSettings,
}
impl Debug for MssqlConnection {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
f.debug_struct("MssqlConnection").finish()
}
}
impl Connection for MssqlConnection {
type Database = Mssql;
type Options = MssqlConnectOptions;
#[allow(unused_mut)]
fn close(mut self) -> BoxFuture<'static, Result<(), Error>> {
// NOTE: there does not seem to be a clean shutdown packet to send to MSSQL
#[cfg(feature = "_rt-async-std")]
{
use std::future::ready;
use std::net::Shutdown;
ready(self.stream.shutdown(Shutdown::Both).map_err(Into::into)).boxed()
}
#[cfg(feature = "_rt-tokio")]
{
use sqlx_rt::AsyncWriteExt;
// FIXME: This is equivalent to Shutdown::Write, not Shutdown::Both like above
// https://docs.rs/tokio/1.0.1/tokio/io/trait.AsyncWriteExt.html#method.shutdown
async move { self.stream.shutdown().await.map_err(Into::into) }.boxed()
}
}
fn close_hard(self) -> BoxFuture<'static, Result<(), Error>> {
self.close()
}
fn ping(&mut self) -> BoxFuture<'_, Result<(), Error>> {
// NOTE: we do not use `SELECT 1` as that *could* interact with any ongoing transactions
self.execute("/* SQLx ping */").map_ok(|_| ()).boxed()
}
fn begin(&mut self) -> BoxFuture<'_, Result<Transaction<'_, Self::Database>, Error>>
where
Self: Sized,
{
Transaction::begin(self)
}
#[doc(hidden)]
fn flush(&mut self) -> BoxFuture<'_, Result<(), Error>> {
self.stream.wait_until_ready().boxed()
}
#[doc(hidden)]
fn should_flush(&self) -> bool {
!self.stream.wbuf.is_empty()
}
}

View file

@ -1,142 +0,0 @@
use crate::decode::Decode;
use crate::error::Error;
use crate::mssql::protocol::done::Status;
use crate::mssql::protocol::message::Message;
use crate::mssql::protocol::packet::PacketType;
use crate::mssql::protocol::rpc::{OptionFlags, Procedure, RpcRequest};
use crate::mssql::statement::MssqlStatementMetadata;
use crate::mssql::{Mssql, MssqlArguments, MssqlConnection, MssqlTypeInfo, MssqlValueRef};
use either::Either;
use once_cell::sync::Lazy;
use regex::Regex;
use std::sync::Arc;
pub(crate) async fn prepare(
conn: &mut MssqlConnection,
sql: &str,
) -> Result<Arc<MssqlStatementMetadata>, Error> {
if let Some(metadata) = conn.cache_statement.get_mut(sql) {
return Ok(metadata.clone());
}
// NOTE: this does not support unicode identifiers; as we don't even support
// named parameters (yet) this is probably fine, for now
static PARAMS_RE: Lazy<Regex> = Lazy::new(|| Regex::new(r"@p[[:alnum:]]+").unwrap());
let mut params = String::new();
for m in PARAMS_RE.captures_iter(sql) {
if !params.is_empty() {
params.push_str(",");
}
params.push_str(&m[0]);
// NOTE: this means that a query! of `SELECT @p1` will have the macros believe
// it will return nvarchar(1); this is a greater issue with `query!` that we
// we need to circle back to. This doesn't happen much in practice however.
params.push_str(" nvarchar(1)");
}
let params = if params.is_empty() {
None
} else {
Some(&*params)
};
let mut args = MssqlArguments::default();
args.declare("", 0_i32);
args.add_unnamed(params);
args.add_unnamed(sql);
args.add_unnamed(0x0001_i32); // 1 = SEND_METADATA
conn.stream.write_packet(
PacketType::Rpc,
RpcRequest {
transaction_descriptor: conn.stream.transaction_descriptor,
arguments: &args,
// [sp_prepare] will emit the column meta data
// small issue is that we need to declare all the used placeholders with a "fallback" type
// we currently use regex to collect them; false positives are *okay* but false
// negatives would break the query
procedure: Either::Right(Procedure::Prepare),
options: OptionFlags::empty(),
},
);
conn.stream.flush().await?;
conn.stream.wait_until_ready().await?;
conn.stream.pending_done_count += 1;
let mut id: Option<i32> = None;
loop {
let message = conn.stream.recv_message().await?;
match message {
Message::DoneProc(done) | Message::Done(done) => {
if !done.status.contains(Status::DONE_MORE) {
// done with prepare
conn.stream.handle_done(&done);
break;
}
}
Message::ReturnValue(rv) => {
id = <i32 as Decode<Mssql>>::decode(MssqlValueRef {
data: rv.value.as_ref(),
type_info: MssqlTypeInfo(rv.type_info),
})
.ok();
}
_ => {}
}
}
if let Some(id) = id {
let mut args = MssqlArguments::default();
args.add_unnamed(id);
conn.stream.write_packet(
PacketType::Rpc,
RpcRequest {
transaction_descriptor: conn.stream.transaction_descriptor,
arguments: &args,
procedure: Either::Right(Procedure::Unprepare),
options: OptionFlags::empty(),
},
);
conn.stream.flush().await?;
conn.stream.wait_until_ready().await?;
conn.stream.pending_done_count += 1;
loop {
let message = conn.stream.recv_message().await?;
match message {
Message::DoneProc(done) | Message::Done(done) => {
if !done.status.contains(Status::DONE_MORE) {
// done with unprepare
conn.stream.handle_done(&done);
break;
}
}
_ => {}
}
}
}
let metadata = Arc::new(MssqlStatementMetadata {
columns: conn.stream.columns.as_ref().clone(),
column_names: conn.stream.column_names.as_ref().clone(),
});
conn.cache_statement.insert(sql, metadata.clone());
Ok(metadata)
}

View file

@ -1,236 +0,0 @@
use std::ops::{Deref, DerefMut};
use bytes::{Bytes, BytesMut};
use sqlx_rt::TcpStream;
use crate::error::Error;
use crate::ext::ustr::UStr;
use crate::io::{BufStream, Encode};
use crate::mssql::protocol::col_meta_data::ColMetaData;
use crate::mssql::protocol::done::{Done, Status as DoneStatus};
use crate::mssql::protocol::env_change::EnvChange;
use crate::mssql::protocol::error::Error as ProtocolError;
use crate::mssql::protocol::info::Info;
use crate::mssql::protocol::login_ack::LoginAck;
use crate::mssql::protocol::message::{Message, MessageType};
use crate::mssql::protocol::order::Order;
use crate::mssql::protocol::packet::{PacketHeader, PacketType, Status};
use crate::mssql::protocol::return_status::ReturnStatus;
use crate::mssql::protocol::return_value::ReturnValue;
use crate::mssql::protocol::row::Row;
use crate::mssql::{MssqlColumn, MssqlConnectOptions, MssqlDatabaseError};
use crate::net::MaybeTlsStream;
use crate::HashMap;
use std::sync::Arc;
pub(crate) struct MssqlStream {
inner: BufStream<MaybeTlsStream<TcpStream>>,
// how many Done (or Error) we are currently waiting for
pub(crate) pending_done_count: usize,
// current transaction descriptor
// set from ENVCHANGE on `BEGIN` and reset to `0` on a ROLLBACK
pub(crate) transaction_descriptor: u64,
pub(crate) transaction_depth: usize,
// current TabularResult from the server that we are iterating over
response: Option<(PacketHeader, Bytes)>,
// most recent column data from ColMetaData
// we need to store this as its needed when decoding <Row>
pub(crate) columns: Arc<Vec<MssqlColumn>>,
pub(crate) column_names: Arc<HashMap<UStr, usize>>,
}
impl MssqlStream {
pub(super) async fn connect(options: &MssqlConnectOptions) -> Result<Self, Error> {
let inner = BufStream::new(MaybeTlsStream::Raw(
TcpStream::connect((&*options.host, options.port)).await?,
));
Ok(Self {
inner,
columns: Default::default(),
column_names: Default::default(),
response: None,
pending_done_count: 0,
transaction_descriptor: 0,
transaction_depth: 0,
})
}
// writes the packet out to the write buffer
// will (eventually) handle packet chunking
pub(crate) fn write_packet<'en, T: Encode<'en>>(&mut self, ty: PacketType, payload: T) {
// TODO: Support packet chunking for large packet sizes
// We likely need to double-buffer the writes so we know to chunk
// write out the packet header, leaving room for setting the packet length later
let mut len_offset = 0;
self.inner.write_with(
PacketHeader {
r#type: ty,
status: Status::END_OF_MESSAGE,
length: 0,
server_process_id: 0,
packet_id: 1,
},
&mut len_offset,
);
// write out the payload
self.inner.write(payload);
// overwrite the packet length now that we know it
let len = self.inner.wbuf.len();
self.inner.wbuf[len_offset..(len_offset + 2)].copy_from_slice(&(len as u16).to_be_bytes());
}
// receive the next packet from the database
// blocks until a packet is available
pub(super) async fn recv_packet(&mut self) -> Result<(PacketHeader, Bytes), Error> {
let mut header: PacketHeader = self.inner.read(8).await?;
// NOTE: From what I can tell, the response type from the server should ~always~
// be TabularResult. Here we expect that and die otherwise.
if !matches!(header.r#type, PacketType::TabularResult) {
return Err(err_protocol!(
"received unexpected packet: {:?}",
header.r#type
));
}
let mut payload = BytesMut::new();
loop {
self.inner
.read_raw_into(&mut payload, (header.length - 8) as usize)
.await?;
if header.status.contains(Status::END_OF_MESSAGE) {
break;
}
header = self.inner.read(8).await?;
}
Ok((header, payload.freeze()))
}
// receive the next ~message~
// TDS communicates in streams of packets that are themselves streams of messages
pub(super) async fn recv_message(&mut self) -> Result<Message, Error> {
loop {
while self.response.as_ref().map_or(false, |r| !r.1.is_empty()) {
let buf = if let Some((_, buf)) = self.response.as_mut() {
buf
} else {
// this shouldn't be reachable but just nope out
// and head to refill our buffer
break;
};
let ty = MessageType::get(buf)?;
let message = match ty {
MessageType::EnvChange => {
match EnvChange::get(buf)? {
EnvChange::BeginTransaction(desc) => {
self.transaction_descriptor = desc;
}
EnvChange::CommitTransaction(_) | EnvChange::RollbackTransaction(_) => {
self.transaction_descriptor = 0;
}
_ => {}
}
continue;
}
MessageType::Info => {
let _ = Info::get(buf)?;
continue;
}
MessageType::Row => Message::Row(Row::get(buf, false, &self.columns)?),
MessageType::NbcRow => Message::Row(Row::get(buf, true, &self.columns)?),
MessageType::LoginAck => Message::LoginAck(LoginAck::get(buf)?),
MessageType::ReturnStatus => Message::ReturnStatus(ReturnStatus::get(buf)?),
MessageType::ReturnValue => Message::ReturnValue(ReturnValue::get(buf)?),
MessageType::Done => Message::Done(Done::get(buf)?),
MessageType::DoneInProc => Message::DoneInProc(Done::get(buf)?),
MessageType::DoneProc => Message::DoneProc(Done::get(buf)?),
MessageType::Order => Message::Order(Order::get(buf)?),
MessageType::Error => {
let error = ProtocolError::get(buf)?;
return self.handle_error(error);
}
MessageType::ColMetaData => {
// NOTE: there isn't anything to return as the data gets
// consumed by the stream for use in subsequent Row decoding
ColMetaData::get(
buf,
Arc::make_mut(&mut self.columns),
Arc::make_mut(&mut self.column_names),
)?;
continue;
}
};
return Ok(message);
}
// no packet from the server to iterate (or its empty); fill our buffer
self.response = Some(self.recv_packet().await?);
}
}
pub(crate) fn handle_done(&mut self, _done: &Done) {
self.pending_done_count -= 1;
}
pub(crate) fn handle_error<T>(&mut self, error: ProtocolError) -> Result<T, Error> {
// NOTE: [error] is sent IN ADDITION TO [done]
Err(MssqlDatabaseError(error).into())
}
pub(crate) async fn wait_until_ready(&mut self) -> Result<(), Error> {
if !self.wbuf.is_empty() {
self.flush().await?;
}
while self.pending_done_count > 0 {
let message = self.recv_message().await?;
if let Message::DoneProc(done) | Message::Done(done) = message {
if !done.status.contains(DoneStatus::DONE_MORE) {
// finished RPC procedure *OR* SQL batch
self.handle_done(&done);
}
}
}
Ok(())
}
}
impl Deref for MssqlStream {
type Target = BufStream<MaybeTlsStream<TcpStream>>;
fn deref(&self) -> &Self::Target {
&self.inner
}
}
impl DerefMut for MssqlStream {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.inner
}
}

View file

@ -1,45 +0,0 @@
use crate::database::{Database, HasArguments, HasStatement, HasValueRef};
use crate::mssql::{
MssqlArguments, MssqlColumn, MssqlConnection, MssqlQueryResult, MssqlRow, MssqlStatement,
MssqlTransactionManager, MssqlTypeInfo, MssqlValue, MssqlValueRef,
};
/// MSSQL database driver.
#[derive(Debug)]
pub struct Mssql;
impl Database for Mssql {
type Connection = MssqlConnection;
type TransactionManager = MssqlTransactionManager;
type Row = MssqlRow;
type QueryResult = MssqlQueryResult;
type Column = MssqlColumn;
type TypeInfo = MssqlTypeInfo;
type Value = MssqlValue;
}
impl<'r> HasValueRef<'r> for Mssql {
type Database = Mssql;
type ValueRef = MssqlValueRef<'r>;
}
impl<'q> HasStatement<'q> for Mssql {
type Database = Mssql;
type Statement = MssqlStatement<'q>;
}
impl HasArguments<'_> for Mssql {
type Database = Mssql;
type Arguments = MssqlArguments;
type ArgumentBuffer = Vec<u8>;
}

View file

@ -1,52 +0,0 @@
use std::error::Error as StdError;
use std::fmt::{self, Debug, Display, Formatter};
use crate::error::DatabaseError;
use crate::mssql::protocol::error::Error;
/// An error returned from the MSSQL database.
pub struct MssqlDatabaseError(pub(crate) Error);
impl Debug for MssqlDatabaseError {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
f.debug_struct("MssqlDatabaseError")
.field("message", &self.0.message)
.field("number", &self.0.number)
.field("state", &self.0.state)
.field("class", &self.0.class)
.field("server", &self.0.server)
.field("procedure", &self.0.procedure)
.field("line", &self.0.line)
.finish()
}
}
impl Display for MssqlDatabaseError {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
f.pad(self.message())
}
}
impl StdError for MssqlDatabaseError {}
impl DatabaseError for MssqlDatabaseError {
#[inline]
fn message(&self) -> &str {
&self.0.message
}
#[doc(hidden)]
fn as_error(&self) -> &(dyn StdError + Send + Sync + 'static) {
self
}
#[doc(hidden)]
fn as_error_mut(&mut self) -> &mut (dyn StdError + Send + Sync + 'static) {
self
}
#[doc(hidden)]
fn into_error(self: Box<Self>) -> Box<dyn StdError + Send + Sync + 'static> {
self
}
}

View file

@ -1,43 +0,0 @@
use bytes::{Buf, Bytes};
use crate::error::Error;
use crate::io::BufExt;
pub trait MssqlBufExt: Buf {
fn get_utf16_str(&mut self, n: usize) -> Result<String, Error>;
fn get_b_varchar(&mut self) -> Result<String, Error>;
fn get_us_varchar(&mut self) -> Result<String, Error>;
fn get_b_varbyte(&mut self) -> Bytes;
}
impl MssqlBufExt for Bytes {
fn get_utf16_str(&mut self, mut n: usize) -> Result<String, Error> {
let mut raw = Vec::with_capacity(n * 2);
while n > 0 {
let ch = self.get_u16_le();
raw.push(ch);
n -= 1;
}
String::from_utf16(&raw).map_err(Error::protocol)
}
fn get_b_varchar(&mut self) -> Result<String, Error> {
let size = self.get_u8();
self.get_utf16_str(size as usize)
}
fn get_us_varchar(&mut self) -> Result<String, Error> {
let size = self.get_u16_le();
self.get_utf16_str(size as usize)
}
fn get_b_varbyte(&mut self) -> Bytes {
let size = self.get_u8();
self.get_bytes(size as usize)
}
}

View file

@ -1,18 +0,0 @@
pub trait MssqlBufMutExt {
fn put_b_varchar(&mut self, s: &str);
fn put_utf16_str(&mut self, s: &str);
}
impl MssqlBufMutExt for Vec<u8> {
fn put_utf16_str(&mut self, s: &str) {
let mut enc = s.encode_utf16();
while let Some(ch) = enc.next() {
self.extend_from_slice(&ch.to_le_bytes());
}
}
fn put_b_varchar(&mut self, s: &str) {
self.extend(&(s.len() as u8).to_le_bytes());
self.put_utf16_str(s);
}
}

View file

@ -1,5 +0,0 @@
mod buf;
mod buf_mut;
pub(crate) use buf::MssqlBufExt;
pub(crate) use buf_mut::MssqlBufMutExt;

View file

@ -1,51 +0,0 @@
//! Microsoft SQL (MSSQL) database driver.
use crate::executor::Executor;
mod arguments;
mod column;
mod connection;
mod database;
mod error;
mod io;
mod options;
mod protocol;
mod query_result;
mod row;
mod statement;
mod transaction;
mod type_info;
pub mod types;
mod value;
pub use arguments::MssqlArguments;
pub use column::MssqlColumn;
pub use connection::MssqlConnection;
pub use database::Mssql;
pub use error::MssqlDatabaseError;
pub use options::MssqlConnectOptions;
pub use query_result::MssqlQueryResult;
pub use row::MssqlRow;
pub use statement::MssqlStatement;
pub use transaction::MssqlTransactionManager;
pub use type_info::MssqlTypeInfo;
pub use value::{MssqlValue, MssqlValueRef};
/// An alias for [`Pool`][crate::pool::Pool], specialized for MSSQL.
pub type MssqlPool = crate::pool::Pool<Mssql>;
/// An alias for [`PoolOptions`][crate::pool::PoolOptions], specialized for MSSQL.
pub type MssqlPoolOptions = crate::pool::PoolOptions<Mssql>;
/// An alias for [`Executor<'_, Database = Mssql>`][Executor].
pub trait MssqlExecutor<'c>: Executor<'c, Database = Mssql> {}
impl<'c, T: Executor<'c, Database = Mssql>> MssqlExecutor<'c> for T {}
// NOTE: required due to the lack of lazy normalization
impl_into_arguments_for_arguments!(MssqlArguments);
impl_executor_for_pool_connection!(Mssql, MssqlConnection, MssqlRow);
impl_executor_for_transaction!(Mssql, MssqlRow);
impl_acquire!(Mssql, MssqlConnection);
impl_column_index_for_row!(MssqlRow);
impl_column_index_for_statement!(MssqlStatement);
impl_into_maybe_pool!(Mssql, MssqlConnection);

View file

@ -1,27 +0,0 @@
use crate::connection::ConnectOptions;
use crate::error::Error;
use crate::mssql::{MssqlConnectOptions, MssqlConnection};
use futures_core::future::BoxFuture;
use log::LevelFilter;
use std::time::Duration;
impl ConnectOptions for MssqlConnectOptions {
type Connection = MssqlConnection;
fn connect(&self) -> BoxFuture<'_, Result<Self::Connection, Error>>
where
Self::Connection: Sized,
{
Box::pin(MssqlConnection::establish(self))
}
fn log_statements(&mut self, level: LevelFilter) -> &mut Self {
self.log_settings.log_statements(level);
self
}
fn log_slow_statements(&mut self, level: LevelFilter, duration: Duration) -> &mut Self {
self.log_settings.log_slow_statements(level, duration);
self
}
}

View file

@ -1,58 +0,0 @@
use crate::connection::LogSettings;
mod connect;
mod parse;
#[derive(Debug, Clone)]
pub struct MssqlConnectOptions {
pub(crate) host: String,
pub(crate) port: u16,
pub(crate) username: String,
pub(crate) database: String,
pub(crate) password: Option<String>,
pub(crate) log_settings: LogSettings,
}
impl Default for MssqlConnectOptions {
fn default() -> Self {
Self::new()
}
}
impl MssqlConnectOptions {
pub fn new() -> Self {
Self {
port: 1433,
host: String::from("localhost"),
database: String::from("master"),
username: String::from("sa"),
password: None,
log_settings: Default::default(),
}
}
pub fn host(mut self, host: &str) -> Self {
self.host = host.to_owned();
self
}
pub fn port(mut self, port: u16) -> Self {
self.port = port;
self
}
pub fn username(mut self, username: &str) -> Self {
self.username = username.to_owned();
self
}
pub fn password(mut self, password: &str) -> Self {
self.password = Some(password.to_owned());
self
}
pub fn database(mut self, database: &str) -> Self {
self.database = database.to_owned();
self
}
}

View file

@ -1,62 +0,0 @@
use crate::error::Error;
use crate::mssql::MssqlConnectOptions;
use percent_encoding::percent_decode_str;
use std::str::FromStr;
use url::Url;
impl FromStr for MssqlConnectOptions {
type Err = Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let url: Url = s.parse().map_err(Error::config)?;
let mut options = Self::new();
if let Some(host) = url.host_str() {
options = options.host(host);
}
if let Some(port) = url.port() {
options = options.port(port);
}
let username = url.username();
if !username.is_empty() {
options = options.username(
&*percent_decode_str(username)
.decode_utf8()
.map_err(Error::config)?,
);
}
if let Some(password) = url.password() {
options = options.password(
&*percent_decode_str(password)
.decode_utf8()
.map_err(Error::config)?,
);
}
let path = url.path().trim_start_matches('/');
if !path.is_empty() {
options = options.database(path);
}
Ok(options)
}
}
#[test]
fn it_parses_username_with_at_sign_correctly() {
let url = "mysql://user@hostname:password@hostname:5432/database";
let opts = MssqlConnectOptions::from_str(url).unwrap();
assert_eq!("user@hostname", &opts.username);
}
#[test]
fn it_parses_password_with_non_ascii_chars_correctly() {
let url = "mysql://username:p@ssw0rd@hostname:5432/database";
let opts = MssqlConnectOptions::from_str(url).unwrap();
assert_eq!(Some("p@ssw0rd".into()), opts.password);
}

View file

@ -1,133 +0,0 @@
use bitflags::bitflags;
use bytes::{Buf, Bytes};
use crate::error::Error;
use crate::ext::ustr::UStr;
use crate::mssql::io::MssqlBufExt;
use crate::mssql::protocol::type_info::TypeInfo;
use crate::mssql::MssqlColumn;
use crate::HashMap;
#[derive(Debug)]
pub(crate) struct ColMetaData;
#[derive(Debug)]
pub(crate) struct ColumnData {
// The user type ID of the data type of the column. Depending on the TDS version that is used,
// valid values are 0x0000 or 0x00000000, with the exceptions of data type
// TIMESTAMP (0x0050 or 0x00000050) and alias types (greater than 0x00FF or 0x000000FF).
#[allow(dead_code)]
pub(crate) user_type: u32,
pub(crate) flags: Flags,
pub(crate) type_info: TypeInfo,
// TODO: pub(crate) table_name: Option<Vec<String>>,
// TODO: crypto_meta_data: Option<CryptoMetaData>,
// The column name. It contains the column name length and column name.
pub(crate) col_name: String,
}
bitflags! {
#[cfg_attr(feature = "offline", derive(serde::Serialize, serde::Deserialize))]
pub struct Flags: u16 {
// Its value is 1 if the column is nullable.
const NULLABLE = 0x0001;
// Set to 1 for string columns with binary collation and always for the XML data type.
// Set to 0 otherwise.
const CASE_SEN = 0x0002;
// usUpdateable is a 2-bit field. Its value is 0 if column is read-only, 1 if column is
// read/write and2 if updateable is unknown.
const UPDATEABLE1 = 0x0004;
const UPDATEABLE2 = 0x0008;
// Its value is 1 if the column is an identity column.
const IDENITTY = 0x0010;
// Its value is 1 if the column is a COMPUTED column.
const COMPUTED = 0x0020;
// Its value is 1 if the column is a fixed-length common language runtime
// user-defined type (CLR UDT).
const FIXED_LEN_CLR_TYPE = 0x0100;
// fSparseColumnSet, introduced in TDSversion 7.3.B, is a bit flag. Its value is 1 if the
// column is the special XML column for the sparse column set. For information about using
// column sets, see [MSDN-ColSets]
const SPARSE_COLUMN_SET = 0x0200;
// Its value is 1 if the column is encrypted transparently and
// has to be decrypted to view the plaintext value. This flag is valid when the column
// encryption feature is negotiated between client and server and is turned on.
const ENCRYPTED = 0x0400;
// Its value is 1 if the column is part of a hidden primary key created to support a
// T-SQL SELECT statement containing FOR BROWSE.
const HIDDEN = 0x0800;
// Its value is 1 if the column is part of a primary key for the row
// and the T-SQL SELECT statement contains FOR BROWSE.
const KEY = 0x1000;
// Its value is 1 if it is unknown whether the column might be nullable.
const NULLABLE_UNKNOWN = 0x2000;
}
}
impl ColMetaData {
pub(crate) fn get(
buf: &mut Bytes,
columns: &mut Vec<MssqlColumn>,
column_names: &mut HashMap<UStr, usize>,
) -> Result<(), Error> {
columns.clear();
column_names.clear();
let mut count = buf.get_u16_le();
let mut ordinal = 0;
if count == 0xffff {
// In the event that the client requested no metadata to be returned, the value of
// Count will be 0xFFFF. This has the same effect on Count as a
// zero value (for example, no ColumnData is sent).
count = 0;
} else {
columns.reserve(count as usize);
}
while count > 0 {
let col = MssqlColumn::new(ColumnData::get(buf)?, ordinal);
column_names.insert(col.name.clone(), ordinal);
columns.push(col);
count -= 1;
ordinal += 1;
}
Ok(())
}
}
impl ColumnData {
fn get(buf: &mut Bytes) -> Result<Self, Error> {
let user_type = buf.get_u32_le();
let flags = Flags::from_bits_truncate(buf.get_u16_le());
let type_info = TypeInfo::get(buf)?;
// TODO: table_name
// TODO: crypto_meta_data
let name = buf.get_b_varchar()?;
Ok(Self {
user_type,
flags,
type_info,
col_name: name,
})
}
}

View file

@ -1,55 +0,0 @@
use bitflags::bitflags;
use bytes::{Buf, Bytes};
use crate::error::Error;
#[derive(Debug)]
pub(crate) struct Done {
pub(crate) status: Status,
// The token of the current SQL statement. The token value is provided and controlled by the
// application layer, which utilizes TDS. The TDS layer does not evaluate the value.
#[allow(dead_code)]
cursor_command: u16,
// The count of rows that were affected by the SQL statement. The value of DoneRowCount is
// valid if the value of Status includes DONE_COUNT.
pub(crate) affected_rows: u64, // NOTE: u32 before TDS 7.2
}
impl Done {
pub(crate) fn get(buf: &mut Bytes) -> Result<Self, Error> {
let status = Status::from_bits_truncate(buf.get_u16_le());
let cursor_command = buf.get_u16_le();
let affected_rows = buf.get_u64_le();
Ok(Self {
affected_rows,
status,
cursor_command,
})
}
}
bitflags! {
pub struct Status: u16 {
// This DONEINPROC message is not the final DONE/DONEPROC/DONEINPROC message in
// the response; more data streams are to follow.
const DONE_MORE = 0x0001;
// An error occurred on the current SQL statement or execution of a stored procedure was
// interrupted. A preceding ERROR token SHOULD be sent when this bit is set.
const DONE_ERROR = 0x0002;
// A transaction is in progress.
const DONE_INXACT = 0x0004;
// The DoneRowCount value is valid. This is used to distinguish between a valid value of 0
// for DoneRowCount or just an initialized variable.
const DONE_COUNT = 0x0010;
// Used in place of DONE_ERROR when an error occurred on the current SQL statement that is
// severe enough to require the result set, if any, to be discarded.
const DONE_SRVERROR = 0x0100;
}
}

View file

@ -1,65 +0,0 @@
use bytes::{Buf, Bytes};
use crate::error::Error;
use crate::mssql::io::MssqlBufExt;
#[derive(Debug)]
#[allow(dead_code)]
pub(crate) enum EnvChange {
Database(String),
Language(String),
CharacterSet(String),
PacketSize(String),
UnicodeDataSortingLocalId(String),
UnicodeDataSortingComparisonFlags(String),
SqlCollation(Bytes),
// TDS 7.2+
BeginTransaction(u64),
CommitTransaction(u64),
RollbackTransaction(u64),
EnlistDtcTransaction,
DefectTransaction,
RealTimeLogShipping,
PromoteTransaction,
TransactionManagerAddress,
TransactionEnded,
ResetConnectionCompletionAck,
LoginRequestUserNameAck,
// TDS 7.4+
RoutingInformation,
}
impl EnvChange {
pub(crate) fn get(buf: &mut Bytes) -> Result<Self, Error> {
let len = buf.get_u16_le();
let ty = buf.get_u8();
let mut data = buf.split_to((len - 1) as usize);
Ok(match ty {
1 => EnvChange::Database(data.get_b_varchar()?),
2 => EnvChange::Language(data.get_b_varchar()?),
3 => EnvChange::CharacterSet(data.get_b_varchar()?),
4 => EnvChange::PacketSize(data.get_b_varchar()?),
5 => EnvChange::UnicodeDataSortingLocalId(data.get_b_varchar()?),
6 => EnvChange::UnicodeDataSortingComparisonFlags(data.get_b_varchar()?),
7 => EnvChange::SqlCollation(data.get_b_varbyte()),
8 => EnvChange::BeginTransaction(data.get_b_varbyte().get_u64_le()),
9 => {
let _ = data.get_u8();
EnvChange::CommitTransaction(data.get_u64_le())
}
10 => {
let _ = data.get_u8();
EnvChange::RollbackTransaction(data.get_u64_le())
}
_ => {
return Err(err_protocol!("unexpected value {} for ENVCHANGE Type", ty));
}
})
}
}

View file

@ -1,54 +0,0 @@
use crate::mssql::io::MssqlBufExt;
use bytes::{Buf, Bytes};
#[derive(Debug)]
pub(crate) struct Error {
// The error number
pub(crate) number: i32,
// The error state, used as a modifier to the error number.
pub(crate) state: u8,
// The class (severity) of the error. A class of less than 10 indicates
// an informational message.
pub(crate) class: u8,
// The message text length and message text using US_VARCHAR format.
pub(crate) message: String,
// The server name length and server name using B_VARCHAR format
pub(crate) server: String,
// The stored procedure name length and the stored procedure name using B_VARCHAR format
pub(crate) procedure: String,
// The line number in the SQL batch or stored procedure that caused the error. Line numbers
// begin at 1. If the line number is not applicable to the message, the
// value of LineNumber is 0.
pub(crate) line: i32,
}
impl Error {
pub(crate) fn get(buf: &mut Bytes) -> Result<Self, crate::error::Error> {
let len = buf.get_u16_le();
let mut data = buf.split_to(len as usize);
let number = data.get_i32_le();
let state = data.get_u8();
let class = data.get_u8();
let message = data.get_us_varchar()?;
let server = data.get_b_varchar()?;
let procedure = data.get_b_varchar()?;
let line = data.get_i32_le();
Ok(Self {
number,
state,
class,
message,
server,
procedure,
line,
})
}
}

View file

@ -1,45 +0,0 @@
use crate::io::Encode;
pub(crate) struct AllHeaders<'a>(pub(crate) &'a [Header]);
impl Encode<'_> for AllHeaders<'_> {
fn encode_with(&self, buf: &mut Vec<u8>, _: ()) {
let offset = buf.len();
buf.resize(buf.len() + 4, 0);
for header in self.0 {
header.encode_with(buf, ());
}
let len = buf.len() - offset;
buf[offset..(offset + 4)].copy_from_slice(&(len as u32).to_le_bytes());
}
}
pub(crate) enum Header {
TransactionDescriptor {
// number of requests currently active on the connection
outstanding_request_count: u32,
// for each connection, a number that uniquely identifies the transaction with which the
// request is associated; initially generated by the server when a new transaction is
// created and returned to the client as part of the ENVCHANGE token stream
transaction_descriptor: u64,
},
}
impl Encode<'_> for Header {
fn encode_with(&self, buf: &mut Vec<u8>, _: ()) {
match self {
Header::TransactionDescriptor {
outstanding_request_count,
transaction_descriptor,
} => {
buf.extend(&18_u32.to_le_bytes()); // [HeaderLength] 4 + 2 + 8 + 4
buf.extend(&2_u16.to_le_bytes()); // [HeaderType]
buf.extend(&transaction_descriptor.to_le_bytes());
buf.extend(&outstanding_request_count.to_le_bytes());
}
}
}
}

View file

@ -1,59 +0,0 @@
use bytes::{Buf, Bytes};
use crate::error::Error;
use crate::mssql::io::MssqlBufExt;
#[allow(dead_code)]
#[derive(Debug)]
pub(crate) struct Info {
pub(crate) number: u32,
pub(crate) state: u8,
pub(crate) class: u8,
pub(crate) message: String,
pub(crate) server: String,
pub(crate) procedure: String,
pub(crate) line: u32,
}
impl Info {
pub(crate) fn get(buf: &mut Bytes) -> Result<Self, Error> {
let len = buf.get_u16_le();
let mut data = buf.split_to(len as usize);
let number = data.get_u32_le();
let state = data.get_u8();
let class = data.get_u8();
let message = data.get_us_varchar()?;
let server = data.get_b_varchar()?;
let procedure = data.get_b_varchar()?;
let line = data.get_u32_le();
Ok(Self {
number,
state,
class,
message,
server,
procedure,
line,
})
}
}
#[test]
fn test_get() {
#[rustfmt::skip]
let mut buf = Bytes::from_static(&[
0x74, 0, 0x47, 0x16, 0, 0, 1, 0, 0x27, 0, 0x43, 0, 0x68, 0, 0x61, 0, 0x6e, 0, 0x67, 0, 0x65, 0, 0x64, 0, 0x20, 0, 0x6c, 0, 0x61, 0, 0x6e, 0, 0x67, 0, 0x75, 0, 0x61, 0, 0x67, 0, 0x65, 0, 0x20, 0, 0x73, 0, 0x65, 0, 0x74, 0, 0x74, 0, 0x69, 0, 0x6e, 0, 0x67, 0, 0x20, 0, 0x74, 0, 0x6f, 0, 0x20, 0, 0x75, 0, 0x73, 0, 0x5f, 0, 0x65, 0, 0x6e, 0, 0x67, 0, 0x6c, 0, 0x69, 0, 0x73, 0, 0x68, 0, 0x2e, 0, 0xc, 0x61, 0, 0x62, 0, 0x64, 0, 0x30, 0, 0x62, 0, 0x36, 0, 0x37, 0, 0x62, 0, 0x64, 0, 0x34, 0, 0x39, 0, 0x33, 0, 0, 1, 0, 0, 0, 0xad, 0x36, 0, 1, 0x74, 0, 0, 4, 0x16, 0x4d, 0, 0x69, 0, 0x63, 0, 0x72, 0, 0x6f, 0, 0x73, 0, 0x6f, 0, 0x66, 0, 0x74, 0, 0x20, 0, 0x53, 0, 0x51, 0, 0x4c, 0, 0x20, 0, 0x53, 0, 0x65, 0, 0x72, 0, 0x76, 0, 0x65, 0, 0x72, 0, 0, 0, 0, 0, 0xf, 0, 0x10, 0x7f, 0xe3, 0x13, 0, 4, 4, 0x34, 0, 0x30, 0, 0x39, 0, 0x36, 0, 4, 0x34, 0, 0x30, 0, 0x39, 0, 0x36, 0, 0xfd, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
]);
let info = Info::get(&mut buf).unwrap();
assert_eq!(info.number, 5703);
assert_eq!(info.state, 1);
assert_eq!(info.class, 0);
assert_eq!(info.message, "Changed language setting to us_english.");
assert_eq!(info.server, "abd0b67bd493");
assert_eq!(info.procedure, "");
assert_eq!(info.line, 1);
}

View file

@ -1,243 +0,0 @@
use crate::io::Encode;
use crate::mssql::io::MssqlBufMutExt;
#[derive(Debug)]
pub struct Login7<'a> {
pub version: u32,
pub packet_size: u32,
pub client_program_version: u32,
pub client_pid: u32,
pub hostname: &'a str,
pub username: &'a str,
pub password: &'a str,
pub app_name: &'a str,
pub server_name: &'a str,
pub client_interface_name: &'a str,
pub language: &'a str,
pub database: &'a str,
pub client_id: [u8; 6],
}
impl Encode<'_> for Login7<'_> {
fn encode_with(&self, buf: &mut Vec<u8>, _: ()) {
// [Length] The total length of the LOGIN7 structure.
let beg = buf.len();
buf.extend(&0_u32.to_le_bytes());
// [TDSVersion] The highest TDS version supported by the client.
buf.extend(&self.version.to_le_bytes());
// [PacketSize] The packet size being requested by the client.
buf.extend(&self.packet_size.to_le_bytes());
// [ClientProgVer] The version of the **interface** library.
buf.extend(&self.client_program_version.to_le_bytes());
// [ClientPID] The process ID of the client application.
buf.extend(&self.client_pid.to_le_bytes());
// [ConnectionID] The connection ID of the primary server.
buf.extend(&0_u32.to_le_bytes());
// [OptionFlags1]
// 7 | SET_LANG_ON (1) Require a warning message for a language choice statement
// 6 | INIT_DB_FATAL (1) Fail to change to initial database should be fatal
// 5 | USE_DB_ON (1) Require a warning message for a db change statement
// 4 | DUMPLOAD_OFF (0)
// 3-2 | FLOAT_IEEE_754 (0)
// 1 | CHARSET_ASCII (0)
// 0 | ORDER_X86 (0)
buf.push(0b11_10_00_00);
// [OptionsFlags2]
// 6 | INTEGRATED_SECURITY_OFF (0)
// 5-4 | USER_NORMAL (0)
// 3 | <fCacheConnect>
// 2 | <fTransBoundary>
// 1 | ODBC_ON (1)
// 0 | INIT_LANG_FATAL (1)
buf.push(0b00_00_00_11);
// [TypeFlags]
// 2 | <fReadOnlyIntent>
// 1 | OLEDB_OFF (0)
// 0 | SQL_DFLT (0)
buf.push(0);
// [OptionFlags3]
// 4 | <fExtension>
// 3 | <fUnknownCollationHandling>
// 2 | <fUserInstance>
// 1 | <fSendYukonBinaryXML>
// 0 | <fChangePassword>
buf.push(0);
// [ClientTimeZone] This field is not used and can be set to zero.
buf.extend(&0_u32.to_le_bytes());
// [ClientLanguageCodeIdentifier] The language code identifier (LCID) value for
// the client collation.
buf.extend(&0_u32.to_le_bytes());
// [OffsetLength] pre-allocate a space for all offset, length pairs
let mut offsets = buf.len();
buf.resize(buf.len() + 58, 0);
// [Hostname] The client machine name
write_str(buf, &mut offsets, beg, self.hostname);
// [UserName] The client user ID
write_str(buf, &mut offsets, beg, self.username);
// [Password] The password supplied by the client
let password_start = buf.len();
write_str(buf, &mut offsets, beg, self.password);
// Before submitting a password from the client to the server, for every byte in the
// password buffer starting with the position pointed to by ibPassword or
// ibChangePassword, the client SHOULD first swap the four high bits with
// the four low bits and then do a bit-XOR with 0xA5 (10100101).
for i in password_start..buf.len() {
let b = buf[i];
buf[i] = ((b << 4) & 0xf0 | (b >> 4) & 0x0f) ^ 0xa5;
}
// [AppName] The client application name
write_str(buf, &mut offsets, beg, self.app_name);
// [ServerName] The server name
write_str(buf, &mut offsets, beg, self.server_name);
// [Extension] Points to an extension block.
// TODO: Implement to get FeatureExt which should let us use UTF-8
write_offset(buf, &mut offsets, beg);
offsets += 2;
// [CltIntName] The interface library name
write_str(buf, &mut offsets, beg, self.client_interface_name);
// [Language] The initial language (overrides the user IDs language)
write_str(buf, &mut offsets, beg, self.language);
// [Database] The initial database (overrides the user IDs database)
write_str(buf, &mut offsets, beg, self.database);
// [ClientID] The unique client ID. Can be all zero.
buf[offsets..(offsets + 6)].copy_from_slice(&self.client_id);
offsets += 6;
// [SSPI] SSPI data
write_offset(buf, &mut offsets, beg);
offsets += 2;
// [AtchDBFile] The file name for a database that is to be attached
write_offset(buf, &mut offsets, beg);
offsets += 2;
// [ChangePassword] New password for the specified login
write_offset(buf, &mut offsets, beg);
// Establish the length of the entire structure
let len = buf.len();
buf[beg..beg + 4].copy_from_slice(&((len - beg) as u32).to_le_bytes());
}
}
fn write_offset(buf: &mut Vec<u8>, offsets: &mut usize, beg: usize) {
// The offset must be relative to the beginning of the packet payload, after
// the packet header
let offset = buf.len() - beg;
buf[*offsets..(*offsets + 2)].copy_from_slice(&(offset as u16).to_le_bytes());
*offsets += 2;
}
fn write_str(buf: &mut Vec<u8>, offsets: &mut usize, beg: usize, s: &str) {
// Write the offset
write_offset(buf, offsets, beg);
// Write the length, in UCS-2 characters
buf[*offsets..(*offsets + 2)].copy_from_slice(&(s.len() as u16).to_le_bytes());
*offsets += 2;
// Encode the character sequence as UCS-2 (precursor to UTF16-LE)
buf.put_utf16_str(s);
}
#[test]
fn test_encode_login() {
let mut buf = Vec::new();
let login = Login7 {
version: 0x72090002,
client_program_version: 0x07_00_00_00,
client_pid: 0x0100,
packet_size: 0x1000,
hostname: "skostov1",
username: "sa",
password: "",
app_name: "OSQL-32",
server_name: "",
client_interface_name: "ODBC",
language: "",
database: "",
client_id: [0x00, 0x50, 0x8B, 0xE2, 0xB7, 0x8F],
};
// Adapted from v20191101 of MS-TDS
#[rustfmt::skip]
let expected = vec![
// Packet Header
/* 0x10, 0x01, 0x00, 0x90, 0x00, 0x00, 0x01, 0x00, */
0x88, 0x00, 0x00, 0x00, // Length
0x02, 0x00, 0x09, 0x72, // TDS Version = SQL Server 2005
0x00, 0x10, 0x00, 0x00, // Packet Size = 1048576 or 1 Mi
0x00, 0x00, 0x00, 0x07, // Client Program Version = 7
0x00, 0x01, 0x00, 0x00, // Client PID = 0x01_00_00
0x00, 0x00, 0x00, 0x00, // Connection ID
0xE0, // [OptionFlags1] 0b1110_0000
0x03, // [OptionFlags2] 0b0000_0011
0x00, // [TypeFlags]
0x00, // [OptionFlags3]
0x00, 0x00, 0x00, 0x00, // [ClientTimeZone]
0x00, 0x00, 0x00, 0x00, // [ClientLCID]
0x5E, 0x00, // [ibHostName]
0x08, 0x00, // [cchHostName]
0x6E, 0x00, // [ibUserName]
0x02, 0x00, // [cchUserName]
0x72, 0x00, // [ibPassword]
0x00, 0x00, // [cchPassword]
0x72, 0x00, // [ibAppName]
0x07, 0x00, // [cchAppName]
0x80, 0x00, // [ibServerName]
0x00, 0x00, // [cchServerName]
0x80, 0x00, // [ibUnused]
0x00, 0x00, // [cbUnused]
0x80, 0x00, // [ibCltIntName]
0x04, 0x00, // [cchCltIntName]
0x88, 0x00, // [ibLanguage]
0x00, 0x00, // [cchLanguage]
0x88, 0x00, // [ibDatabase]
0x00, 0x00, // [chDatabase]
0x00, 0x50, 0x8B, // [ClientID]
0xE2, 0xB7, 0x8F,
0x88, 0x00, // [ibSSPI]
0x00, 0x00, // [cchSSPI]
0x88, 0x00, // [ibAtchDBFile]
0x00, 0x00, // [cchAtchDBFile]
0x88, 0x00, // [ibChangePassword]
0x00, 0x00, // [cchChangePassword]
0x00, 0x00, 0x00, 0x00, // [cbSSPILong]
0x73, 0x00, 0x6B, 0x00, 0x6F, 0x00, 0x73, 0x00, 0x74, 0x00, // [Data]
0x6F, 0x00, 0x76, 0x00, 0x31, 0x00, 0x73, 0x00, 0x61, 0x00,
0x4F, 0x00, 0x53, 0x00, 0x51, 0x00, 0x4C, 0x00, 0x2D, 0x00,
0x33, 0x00, 0x32, 0x00, 0x4F, 0x00, 0x44, 0x00, 0x42, 0x00,
0x43, 0x00,
];
login.encode(&mut buf);
assert_eq!(expected, buf);
}

View file

@ -1,60 +0,0 @@
use bytes::{Buf, Bytes};
use crate::error::Error;
use crate::mssql::io::MssqlBufExt;
use crate::mssql::protocol::pre_login::Version;
#[allow(dead_code)]
#[derive(Debug)]
pub(crate) struct LoginAck {
pub(crate) interface: u8,
pub(crate) tds_version: u32,
pub(crate) program_name: String,
pub(crate) program_version: Version,
}
impl LoginAck {
pub(crate) fn get(buf: &mut Bytes) -> Result<Self, Error> {
let len = buf.get_u16_le();
let mut data = buf.split_to(len as usize);
let interface = data.get_u8();
let tds_version = data.get_u32_le();
let program_name = data.get_b_varchar()?;
let program_version_major = data.get_u8();
let program_version_minor = data.get_u8();
let program_version_build = data.get_u16();
Ok(Self {
interface,
tds_version,
program_name,
program_version: Version {
major: program_version_major,
minor: program_version_minor,
build: program_version_build,
sub_build: 0,
},
})
}
}
#[test]
fn test_get() {
#[rustfmt::skip]
let mut buf = Bytes::from_static(&[
0x36, 0, 1, 0x74, 0, 0, 4, 0x16, 0x4d, 0, 0x69, 0, 0x63, 0, 0x72, 0, 0x6f, 0, 0x73, 0, 0x6f, 0, 0x66, 0, 0x74, 0, 0x20, 0, 0x53, 0, 51, 0, 0x4c, 0, 0x20, 0, 0x53, 0, 0x65, 0, 0x72, 0, 0x76, 0, 0x65, 0, 0x72, 0, 0, 0, 0, 0, 0xf, 0, 0x10, 0x7f, 0xe3, 0x13, 0, 4, 4, 0x34, 0, 0x30, 0, 0x39, 0, 0x36, 0, 4, 0x34, 0, 0x30, 0, 0x39, 0, 0x36, 0, 0xfd, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
]);
let login_ack = LoginAck::get(&mut buf).unwrap();
assert_eq!(login_ack.interface, 1);
assert_eq!(login_ack.tds_version, 67108980);
assert_eq!(login_ack.program_version.major, 15);
assert_eq!(login_ack.program_version.minor, 0);
assert_eq!(login_ack.program_version.build, 4223);
assert_eq!(login_ack.program_version.sub_build, 0);
assert_eq!(login_ack.program_name, "Microsoft S3L Server\0\0");
}

View file

@ -1,64 +0,0 @@
use bytes::{Buf, Bytes};
use crate::mssql::protocol::done::Done;
use crate::mssql::protocol::login_ack::LoginAck;
use crate::mssql::protocol::order::Order;
use crate::mssql::protocol::return_status::ReturnStatus;
use crate::mssql::protocol::return_value::ReturnValue;
use crate::mssql::protocol::row::Row;
#[derive(Debug)]
pub(crate) enum Message {
LoginAck(LoginAck),
Done(Done),
DoneInProc(Done),
DoneProc(Done),
Row(Row),
ReturnStatus(ReturnStatus),
ReturnValue(ReturnValue),
Order(Order),
}
#[derive(Debug)]
pub(crate) enum MessageType {
Info,
LoginAck,
EnvChange,
Done,
DoneProc,
DoneInProc,
Row,
NbcRow,
Error,
ColMetaData,
ReturnStatus,
ReturnValue,
Order,
}
impl MessageType {
pub(crate) fn get(buf: &mut Bytes) -> Result<Self, crate::error::Error> {
Ok(match buf.get_u8() {
0x81 => MessageType::ColMetaData,
0xaa => MessageType::Error,
0xab => MessageType::Info,
0xac => MessageType::ReturnValue,
0xad => MessageType::LoginAck,
0xd1 => MessageType::Row,
0xd2 => MessageType::NbcRow,
0xe3 => MessageType::EnvChange,
0x79 => MessageType::ReturnStatus,
0xa9 => MessageType::Order,
0xfd => MessageType::Done,
0xfe => MessageType::DoneProc,
0xff => MessageType::DoneInProc,
ty => {
return Err(err_protocol!(
"unknown value `0x{:02x?}` for message type in token stream",
ty
));
}
})
}
}

View file

@ -1,18 +0,0 @@
pub(crate) mod col_meta_data;
pub(crate) mod done;
pub(crate) mod env_change;
pub(crate) mod error;
pub(crate) mod header;
pub(crate) mod info;
pub(crate) mod login;
pub(crate) mod login_ack;
pub(crate) mod message;
pub(crate) mod order;
pub(crate) mod packet;
pub(crate) mod pre_login;
pub(crate) mod return_status;
pub(crate) mod return_value;
pub(crate) mod row;
pub(crate) mod rpc;
pub(crate) mod sql_batch;
pub(crate) mod type_info;

View file

@ -1,18 +0,0 @@
use bytes::{Buf, Bytes};
use crate::error::Error;
#[derive(Debug)]
pub(crate) struct Order {
#[allow(dead_code)]
columns: Bytes,
}
impl Order {
pub(crate) fn get(buf: &mut Bytes) -> Result<Self, Error> {
let len = buf.get_u16_le();
let columns = buf.split_to(len as usize);
Ok(Self { columns })
}
}

View file

@ -1,138 +0,0 @@
use bitflags::bitflags;
use bytes::{Buf, Bytes};
use crate::error::Error;
use crate::io::{Decode, Encode};
#[derive(Debug)]
pub(crate) struct PacketHeader {
// Type defines the type of message. Type is a 1-byte unsigned char.
pub(crate) r#type: PacketType,
// Status is a bit field used to indicate the message state. Status is a 1-byte unsigned char.
pub(crate) status: Status,
// Length is the size of the packet including the 8 bytes in the packet header.
pub(crate) length: u16,
// The process ID on the server, corresponding to the current connection.
pub(crate) server_process_id: u16,
// Packet ID is used for numbering message packets that contain data in addition to the packet
// header. Packet ID is a 1-byte, unsigned char. Each time packet data is sent, the value of
// PacketID is incremented by 1, modulo 256. This allows the receiver to track the sequence
// of TDS packets for a given message. This value is currently ignored.
pub(crate) packet_id: u8,
}
impl<'s> Encode<'s, &'s mut usize> for PacketHeader {
fn encode_with(&self, buf: &mut Vec<u8>, offset: &'s mut usize) {
buf.push(self.r#type as u8);
buf.push(self.status.bits());
*offset = buf.len();
buf.extend(&self.length.to_be_bytes());
buf.extend(&self.server_process_id.to_be_bytes());
buf.push(self.packet_id);
// window, unused
buf.push(0);
}
}
impl Decode<'_> for PacketHeader {
fn decode_with(mut buf: Bytes, _: ()) -> Result<Self, Error> {
Ok(Self {
r#type: PacketType::get(buf.get_u8())?,
status: Status::from_bits_truncate(buf.get_u8()),
length: buf.get_u16(),
server_process_id: buf.get_u16(),
packet_id: buf.get_u8(),
})
}
}
#[derive(Debug, Copy, PartialEq, Clone)]
pub(crate) enum PacketType {
// Pre-login. Should always be #18 unless we decide to try and support pre 7.0 TDS
PreTds7Login = 2,
PreLogin = 18,
SqlBatch = 1,
Rpc = 3,
AttentionSignal = 6,
BulkLoadData = 7,
FederatedAuthToken = 8,
TransactionManagerRequest = 14,
Tds7Login = 16,
Sspi = 17,
TabularResult = 4,
}
impl PacketType {
pub fn get(value: u8) -> Result<Self, Error> {
Ok(match value {
1 => PacketType::SqlBatch,
2 => PacketType::PreTds7Login,
3 => PacketType::Rpc,
4 => PacketType::TabularResult,
6 => PacketType::AttentionSignal,
7 => PacketType::BulkLoadData,
8 => PacketType::FederatedAuthToken,
14 => PacketType::TransactionManagerRequest,
16 => PacketType::Tds7Login,
17 => PacketType::Sspi,
18 => PacketType::PreLogin,
ty => {
return Err(err_protocol!("unknown packet type: {}", ty));
}
})
}
}
// Status is a bit field used to indicate the message state. Status is a 1-byte unsigned char.
// The following Status bit flags are defined.
bitflags! {
pub(crate) struct Status: u8 {
// "Normal" message.
const NORMAL = 0x00;
// End of message (EOM). The packet is the last packet in the whole request.
const END_OF_MESSAGE = 0x01;
// (From client to server) Ignore this event (0x01 MUST also be set).
const IGNORE_EVENT = 0x02;
// RESETCONNECTION
//
// (Introduced in TDS 7.1)
//
// (From client to server) Reset this connection
// before processing event. Only set for event types Batch, RPC, or Transaction Manager
// request. If clients want to set this bit, it MUST be part of the first packet of the
// message. This signals the server to clean up the environment state of the connection
// back to the default environment setting, effectively simulating a logout and a
// subsequent login, and provides server support for connection pooling. This bit SHOULD
// be ignored if it is set in a packet that is not the first packet of the message.
//
// This status bit MUST NOT be set in conjunction with the RESETCONNECTIONSKIPTRAN bit.
// Distributed transactions and isolation levels will not be reset.
const RESET_CONN = 0x08;
// RESETCONNECTIONSKIPTRAN
//
// (Introduced in TDS 7.3)
//
// (From client to server) Reset the
// connection before processing event but do not modify the transaction state (the
// state will remain the same before and after the reset). The transaction in the
// session can be a local transaction that is started from the session or it can
// be a distributed transaction in which the session is enlisted. This status bit
// MUST NOT be set in conjunction with the RESETCONNECTION bit.
// Otherwise identical to RESETCONNECTION.
const RESET_CONN_SKIP_TRAN = 0x10;
}
}

View file

@ -1,311 +0,0 @@
use std::fmt::{self, Display, Formatter};
use bitflags::bitflags;
use bytes::{Buf, Bytes};
use uuid::Uuid;
use crate::error::Error;
use crate::io::{Decode, Encode};
/// A message sent by the client to set up context for login. The server responds to a client
/// `PRELOGIN` message with a message of packet header type `0x04` and the packet data
/// containing a `PRELOGIN` structure.
#[derive(Debug, Default)]
pub(crate) struct PreLogin<'a> {
pub(crate) version: Version,
pub(crate) encryption: Encrypt,
pub(crate) instance: Option<&'a str>,
pub(crate) thread_id: Option<u32>,
pub(crate) trace_id: Option<TraceId>,
pub(crate) multiple_active_result_sets: Option<bool>,
}
impl<'de> Decode<'de> for PreLogin<'de> {
fn decode_with(buf: Bytes, _: ()) -> Result<Self, Error> {
let mut version = None;
let mut encryption = None;
// TODO: Decode the remainder of the structure
// let mut instance = None;
// let mut thread_id = None;
// let mut trace_id = None;
// let mut multiple_active_result_sets = None;
let mut offsets = buf.clone();
loop {
let token = offsets.get_u8();
match PreLoginOptionToken::get(token) {
Some(token) => {
let offset = offsets.get_u16() as usize;
let size = offsets.get_u16() as usize;
let mut data = &buf[offset..offset + size];
match token {
PreLoginOptionToken::Version => {
let major = data.get_u8();
let minor = data.get_u8();
let build = data.get_u16();
let sub_build = data.get_u16();
version = Some(Version {
major,
minor,
build,
sub_build,
});
}
PreLoginOptionToken::Encryption => {
encryption = Some(Encrypt::from_bits_truncate(data.get_u8()));
}
tok => todo!("{:?}", tok),
}
}
None if token == 0xff => {
break;
}
None => {
return Err(err_protocol!(
"PRELOGIN: unexpected login option token: 0x{:02?}",
token
)
.into());
}
}
}
let version =
version.ok_or(err_protocol!("PRELOGIN: missing required `version` option"))?;
let encryption = encryption.ok_or(err_protocol!(
"PRELOGIN: missing required `encryption` option"
))?;
Ok(Self {
version,
encryption,
..Default::default()
})
}
}
impl Encode<'_> for PreLogin<'_> {
fn encode_with(&self, buf: &mut Vec<u8>, _: ()) {
use PreLoginOptionToken::*;
// NOTE: Packet headers are written in MssqlStream::write
// Rules
// PRELOGIN = (*PRELOGIN_OPTION *PL_OPTION_DATA) / SSL_PAYLOAD
// PRELOGIN_OPTION = (PL_OPTION_TOKEN PL_OFFSET PL_OPTION_LENGTH) / TERMINATOR
// Count the number of set options
let num_options = 2
+ self.instance.map_or(0, |_| 1)
+ self.thread_id.map_or(0, |_| 1)
+ self.trace_id.as_ref().map_or(0, |_| 1)
+ self.multiple_active_result_sets.map_or(0, |_| 1);
// Calculate the length of the option offset block. Each block is 5 bytes and it ends in
// a 1 byte terminator.
let len_offsets = (num_options * 5) + 1;
let mut offsets = buf.len() as usize;
let mut offset = len_offsets as u16;
// Reserve a chunk for the offset block and set the final terminator
buf.resize(buf.len() + len_offsets, 0);
let end_offsets = buf.len() - 1;
buf[end_offsets] = 0xff;
// NOTE: VERSION is a required token, and it MUST be the first token.
Version.put(buf, &mut offsets, &mut offset, 6);
self.version.encode(buf);
Encryption.put(buf, &mut offsets, &mut offset, 1);
buf.push(self.encryption.bits());
if let Some(name) = self.instance {
Instance.put(buf, &mut offsets, &mut offset, name.len() as u16 + 1);
buf.extend_from_slice(name.as_bytes());
buf.push(b'\0');
}
if let Some(id) = self.thread_id {
ThreadId.put(buf, &mut offsets, &mut offset, 4);
buf.extend_from_slice(&id.to_le_bytes());
}
if let Some(trace) = &self.trace_id {
ThreadId.put(buf, &mut offsets, &mut offset, 36);
buf.extend_from_slice(trace.connection_id.as_bytes());
buf.extend_from_slice(trace.activity_id.as_bytes());
buf.extend_from_slice(&trace.activity_seq.to_be_bytes());
}
if let Some(mars) = &self.multiple_active_result_sets {
MultipleActiveResultSets.put(buf, &mut offsets, &mut offset, 1);
buf.push(*mars as u8);
}
}
}
// token value representing the option (PL_OPTION_TOKEN)
#[derive(Debug, Copy, Clone)]
#[repr(u8)]
enum PreLoginOptionToken {
Version = 0x00,
Encryption = 0x01,
Instance = 0x02,
ThreadId = 0x03,
// Multiple Active Result Sets (MARS)
MultipleActiveResultSets = 0x04,
TraceId = 0x05,
}
impl PreLoginOptionToken {
fn put(self, buf: &mut Vec<u8>, pos: &mut usize, offset: &mut u16, len: u16) {
buf[*pos] = self as u8;
*pos += 1;
buf[*pos..(*pos + 2)].copy_from_slice(&offset.to_be_bytes());
*pos += 2;
buf[*pos..(*pos + 2)].copy_from_slice(&len.to_be_bytes());
*pos += 2;
*offset += len;
}
fn get(b: u8) -> Option<Self> {
Some(match b {
0x00 => PreLoginOptionToken::Version,
0x01 => PreLoginOptionToken::Encryption,
0x02 => PreLoginOptionToken::Instance,
0x03 => PreLoginOptionToken::ThreadId,
0x04 => PreLoginOptionToken::MultipleActiveResultSets,
0x05 => PreLoginOptionToken::TraceId,
_ => {
return None;
}
})
}
}
#[derive(Debug)]
pub(crate) struct TraceId {
// client application trace ID (GUID_CONNID)
pub(crate) connection_id: Uuid,
// client application activity ID (GUID_ActivityID)
pub(crate) activity_id: Uuid,
// client application activity sequence (ActivitySequence)
pub(crate) activity_seq: u32,
}
// Version of the sender (UL_VERSION)
#[derive(Debug, Default)]
pub(crate) struct Version {
pub(crate) major: u8,
pub(crate) minor: u8,
pub(crate) build: u16,
// Sub-build number of the sender (US_SUBBUILD)
pub(crate) sub_build: u16,
}
impl Version {
fn encode(&self, buf: &mut Vec<u8>) {
buf.push(self.major);
buf.push(self.minor);
buf.extend(&self.build.to_be_bytes());
buf.extend(&self.sub_build.to_be_bytes());
}
}
impl Display for Version {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
write!(f, "v{}.{}.{}", self.major, self.minor, self.build)
}
}
bitflags! {
/// During the Pre-Login handshake, the client and the server negotiate the
/// wire encryption to be used.
#[derive(Default)]
pub(crate) struct Encrypt: u8 {
/// Encryption is available but on.
const ON = 0x01;
/// Encryption is not available.
const NOT_SUPPORTED = 0x02;
/// Encryption is required.
const REQUIRED = 0x03;
/// The client certificate should be used to authenticate
/// the user in place of a user/password.
const CLIENT_CERT = 0x80;
}
}
#[test]
fn test_encode_pre_login() {
let mut buf = Vec::new();
let pre_login = PreLogin {
version: Version {
major: 9,
minor: 0,
build: 0,
sub_build: 0,
},
encryption: Encrypt::ON,
instance: Some(""),
thread_id: Some(0x00000DB8),
multiple_active_result_sets: Some(true),
..Default::default()
};
// From v20191101 of MS-TDS documentation
#[rustfmt::skip]
let expected = vec![
0x00, 0x00, 0x1A, 0x00, 0x06, 0x01, 0x00, 0x20, 0x00, 0x01, 0x02, 0x00, 0x21, 0x00,
0x01, 0x03, 0x00, 0x22, 0x00, 0x04, 0x04, 0x00, 0x26, 0x00, 0x01, 0xFF, 0x09, 0x00,
0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0xB8, 0x0D, 0x00, 0x00, 0x01
];
pre_login.encode(&mut buf);
assert_eq!(expected, buf);
}
#[test]
fn test_decode_pre_login() {
#[rustfmt::skip]
let buffer = Bytes::from_static(&[
0, 0, 11, 0, 6, 1, 0, 17, 0, 1, 255,
14, 0, 12, 209, 0, 0, 0,
]);
let pre_login = PreLogin::decode(buffer).unwrap();
// v14.0.3281
assert_eq!(pre_login.version.major, 14);
assert_eq!(pre_login.version.minor, 0);
assert_eq!(pre_login.version.build, 3281);
assert_eq!(pre_login.version.sub_build, 0);
// ENCRYPT_OFF
assert_eq!(pre_login.encryption.bits(), 0);
}

View file

@ -1,17 +0,0 @@
use bytes::{Buf, Bytes};
use crate::error::Error;
#[derive(Debug)]
pub(crate) struct ReturnStatus {
#[allow(dead_code)]
value: i32,
}
impl ReturnStatus {
pub(crate) fn get(buf: &mut Bytes) -> Result<Self, Error> {
let value = buf.get_i32_le();
Ok(Self { value })
}
}

View file

@ -1,77 +0,0 @@
use bitflags::bitflags;
use bytes::{Buf, Bytes};
use crate::error::Error;
use crate::mssql::io::MssqlBufExt;
use crate::mssql::protocol::col_meta_data::Flags;
#[cfg(test)]
use crate::mssql::protocol::type_info::DataType;
use crate::mssql::protocol::type_info::TypeInfo;
#[allow(dead_code)]
#[derive(Debug)]
pub(crate) struct ReturnValue {
param_ordinal: u16,
param_name: String,
status: ReturnValueStatus,
user_type: u32,
flags: Flags,
pub(crate) type_info: TypeInfo,
pub(crate) value: Option<Bytes>,
}
bitflags! {
pub(crate) struct ReturnValueStatus: u8 {
// If ReturnValue corresponds to OUTPUT parameter of a stored procedure invocation
const OUTPUT_PARAM = 0x01;
// If ReturnValue corresponds to return value of User Defined Function.
const USER_DEFINED = 0x02;
}
}
impl ReturnValue {
pub(crate) fn get(buf: &mut Bytes) -> Result<Self, Error> {
let ordinal = buf.get_u16_le();
let name = buf.get_b_varchar()?;
let status = ReturnValueStatus::from_bits_truncate(buf.get_u8());
let user_type = buf.get_u32_le();
let flags = Flags::from_bits_truncate(buf.get_u16_le());
let type_info = TypeInfo::get(buf)?;
let value = type_info.get_value(buf);
Ok(Self {
param_ordinal: ordinal,
param_name: name,
status,
user_type,
flags,
type_info,
value,
})
}
}
#[test]
fn test_get() {
#[rustfmt::skip]
let mut buf = Bytes::from_static(&[
0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0x26, 4, 4, 1, 0, 0, 0, 0xfe, 0, 0, 0xe0, 0, 0, 0, 0, 0, 0, 0, 0, 0
]);
let return_value = ReturnValue::get(&mut buf).unwrap();
assert_eq!(return_value.param_ordinal, 0);
assert_eq!(return_value.param_name, "");
assert_eq!(
return_value.status,
ReturnValueStatus::from_bits_truncate(1)
);
assert_eq!(return_value.user_type, 0);
assert_eq!(return_value.flags, Flags::from_bits_truncate(0));
assert_eq!(return_value.type_info, TypeInfo::new(DataType::IntN, 4));
assert_eq!(
return_value.value,
Some(Bytes::from_static(&[0x01, 0, 0, 0]))
);
}

View file

@ -1,44 +0,0 @@
use bytes::Bytes;
use crate::error::Error;
use crate::io::BufExt;
use crate::mssql::{MssqlColumn, MssqlTypeInfo};
#[derive(Debug)]
pub(crate) struct Row {
pub(crate) column_types: Vec<MssqlTypeInfo>,
pub(crate) values: Vec<Option<Bytes>>,
}
impl Row {
pub(crate) fn get(
buf: &mut Bytes,
nullable: bool,
columns: &[MssqlColumn],
) -> Result<Self, Error> {
let mut values = Vec::with_capacity(columns.len());
let mut column_types = Vec::with_capacity(columns.len());
let nulls = if nullable {
buf.get_bytes((columns.len() + 7) / 8)
} else {
Bytes::from_static(b"")
};
for (i, column) in columns.iter().enumerate() {
column_types.push(column.type_info.clone());
if !(column.type_info.0.is_null() || (nullable && (nulls[i / 8] & (1 << (i % 8))) != 0))
{
values.push(column.type_info.0.get_value(buf));
} else {
values.push(None);
}
}
Ok(Self {
values,
column_types,
})
}
}

View file

@ -1,93 +0,0 @@
use bitflags::bitflags;
use either::Either;
use crate::io::Encode;
use crate::mssql::io::MssqlBufMutExt;
use crate::mssql::protocol::header::{AllHeaders, Header};
use crate::mssql::MssqlArguments;
pub(crate) struct RpcRequest<'a> {
pub(crate) transaction_descriptor: u64,
// the procedure can be encoded as a u16 of a built-in or the name for a custom one
pub(crate) procedure: Either<&'a str, Procedure>,
pub(crate) options: OptionFlags,
pub(crate) arguments: &'a MssqlArguments,
}
#[derive(Debug, Copy, Clone)]
#[repr(u16)]
#[allow(dead_code)]
pub(crate) enum Procedure {
Cursor = 1,
CursorOpen = 2,
CursorPrepare = 3,
CursorExecute = 4,
CursorPrepareExecute = 5,
CursorUnprepare = 6,
CursorFetch = 7,
CursorOption = 8,
CursorClose = 9,
ExecuteSql = 10,
Prepare = 11,
Execute = 12,
PrepareExecute = 13,
PrepareExecuteRpc = 14,
Unprepare = 15,
}
bitflags! {
pub(crate) struct OptionFlags: u16 {
const WITH_RECOMPILE = 1;
// The server sends NoMetaData only if fNoMetadata is set to 1 in the request
const NO_META_DATA = 2;
// 1 if the metadata has not changed from the previous call and the server SHOULD reuse
// its cached metadata (the metadata MUST still be sent).
const REUSE_META_DATA = 4;
}
}
bitflags! {
pub(crate) struct StatusFlags: u8 {
// if the parameter is passed by reference (OUTPUT parameter) or
// 0 if parameter is passed by value
const BY_REF_VALUE = 1;
// 1 if the parameter being passed is to be the default value
const DEFAULT_VALUE = 2;
// 1 if the parameter that is being passed is encrypted. This flag is valid
// only when the column encryption feature is negotiated by client and server
// and is turned on
const ENCRYPTED = 8;
}
}
impl Encode<'_> for RpcRequest<'_> {
fn encode_with(&self, buf: &mut Vec<u8>, _: ()) {
AllHeaders(&[Header::TransactionDescriptor {
outstanding_request_count: 1,
transaction_descriptor: self.transaction_descriptor,
}])
.encode(buf);
match &self.procedure {
Either::Left(name) => {
buf.extend(&(name.len() as u16).to_le_bytes());
buf.put_utf16_str(name);
}
Either::Right(id) => {
buf.extend(&(0xffff_u16).to_le_bytes());
buf.extend(&(*id as u16).to_le_bytes());
}
}
buf.extend(&self.options.bits.to_le_bytes());
buf.extend(&self.arguments.data);
}
}
// TODO: Test serialization of this?

Some files were not shown because too many files have changed in this diff Show more