fix: adjust pool usage in tests and examples

This commit is contained in:
Ryan Leckey 2020-07-12 04:42:48 -07:00
parent 8cf3ccc7e9
commit fc682fa991
8 changed files with 36 additions and 35 deletions

View file

@ -161,9 +161,9 @@ use sqlx::postgres::PgPool;
#[async_std::main] // or #[tokio::main]
async fn main() -> Result<(), sqlx::Error> {
// Create a connection pool
let pool = PgPool::builder()
.max_size(5) // maximum number of connections in the pool
.build(&env::var("DATABASE_URL")?).await?;
let pool = PgPoolOptions::new(&env::var("DATABASE_URL")?)?
.max_connections(5)
.connect().await?;
// Make a simple query to return the given parameter
let row: (i64,) = sqlx::query_as("SELECT $1")

View file

@ -1,12 +1,12 @@
#[macro_use]
extern crate log;
use actix_web::{web, App, HttpResponse, HttpServer, Responder};
use anyhow::Result;
use dotenv::dotenv;
use listenfd::ListenFd;
use std::env;
use actix_web::{web, App, HttpResponse, HttpServer, Responder};
use sqlx::PgPool;
use anyhow::Result;
use std::env;
// import todo module (routes and model)
mod todo;
@ -35,7 +35,7 @@ async fn main() -> Result<()> {
let database_url = env::var("DATABASE_URL").expect("DATABASE_URL is not set in .env file");
// PgPool::builder()
// .max_size(5) // maximum number of connections in the pool
// .max_connections(5) // maximum number of connections in the pool
// .build(env::var("DATABASE_URL")?).await?;
let db_pool = PgPool::new(&database_url).await?;
@ -62,4 +62,4 @@ async fn main() -> Result<()> {
}
// export DATABASE_URL="postgres://pguser:zx@192.168.33.11/realworld"
// systemfd --no-pid -s http::5000 -- cargo watch -x run
// systemfd --no-pid -s http::5000 -- cargo watch -x run

View file

@ -15,7 +15,7 @@ This Cargo project implements various benchmarks for SQLx using
the pool, with or without the pool being fair. Concurrently to the benchmark iteration
function calling and blocking on `Pool::acquire()`, a varying number of background tasks are
also calling `acquire()` and holding the acquired connection for 500µs each before releasing
it back to the pool. The pool is created with `.min_size(50).max_size(50)` so we shouldn't
it back to the pool. The pool is created with `.min_connections(50).max_connections(50)` so we shouldn't
be measuring anything but the actual overhead of `Pool`'s bookeeping.
### Running

View file

@ -27,11 +27,11 @@ fn do_bench_acquire(b: &mut Bencher, concurrent: u32, fair: bool) {
// we don't want timeouts because we want to see how the pool degrades
.connect_timeout(Duration::from_secs(3600))
// force the pool to start full
.min_size(50)
.max_size(50)
.min_connections(50)
.max_connections(50)
// we're not benchmarking `ping()`
.test_on_acquire(false)
.fair(fair)
.test_before_acquire(false)
.__fair(fair)
.build(
&dotenv::var("DATABASE_URL").expect("DATABASE_URL must be set to run benchmarks"),
),

View file

@ -1,4 +1,5 @@
use sqlx::{database::Database, Connect, Pool};
use sqlx::pool::PoolOptions;
use sqlx::{Connection, Database, Pool};
use std::env;
pub fn setup_if_needed() {
@ -25,11 +26,11 @@ where
{
setup_if_needed();
let pool = Pool::<DB>::builder()
.min_size(0)
.max_size(5)
.test_on_acquire(true)
.build(&env::var("DATABASE_URL")?)
let pool = PoolOptions::<DB>::new(&env::var("DATABASE_URL")?)?
.min_connections(0)
.max_connections(5)
.test_before_acquire(true)
.connect()
.await?;
Ok(pool)

View file

@ -76,9 +76,9 @@ CREATE TEMPORARY TABLE users (id INTEGER PRIMARY KEY);
#[sqlx_macros::test]
async fn it_executes_with_pool() -> anyhow::Result<()> {
let pool: MySqlPool = MySqlPool::builder()
.min_size(2)
.max_size(2)
.test_on_acquire(false)
.min_connections(2)
.max_connections(2)
.test_before_acquire(false)
.build(&dotenv::var("DATABASE_URL")?)
.await?;

View file

@ -1,9 +1,9 @@
use futures::TryStreamExt;
use sqlx::postgres::PgRow;
use sqlx::postgres::{
PgConnectOptions, PgConnection, PgDatabaseError, PgErrorPosition, PgSeverity,
};
use sqlx::{postgres::Postgres, Connect, Connection, Executor, PgPool, Row};
use sqlx::postgres::{PgPoolOptions, PgRow};
use sqlx::{postgres::Postgres, Connection, Executor, Row};
use sqlx_test::new;
use std::env;
use std::thread;
@ -318,7 +318,7 @@ async fn it_can_work_with_nested_transactions() -> anyhow::Result<()> {
conn.execute("TRUNCATE _sqlx_users_2523").await?;
// begin
let mut tx = conn.begin().await?;
let mut tx = conn.begin().await?; // transaction
// insert a user
sqlx::query("INSERT INTO _sqlx_users_2523 (id) VALUES ($1)")
@ -327,7 +327,7 @@ async fn it_can_work_with_nested_transactions() -> anyhow::Result<()> {
.await?;
// begin once more
let mut tx2 = tx.begin().await?;
let mut tx2 = tx.begin().await?; // savepoint
// insert another user
sqlx::query("INSERT INTO _sqlx_users_2523 (id) VALUES ($1)")
@ -336,7 +336,7 @@ async fn it_can_work_with_nested_transactions() -> anyhow::Result<()> {
.await?;
// never mind, rollback
tx2.rollback().await?;
tx2.rollback().await?; // roll that one back
// did we really?
let (count,): (i64,) = sqlx::query_as("SELECT COUNT(*) FROM _sqlx_users_2523")
@ -370,11 +370,11 @@ async fn pool_smoke_test() -> anyhow::Result<()> {
eprintln!("starting pool");
let pool = PgPool::builder()
let pool = PgPoolOptions::new(&dotenv::var("DATABASE_URL")?)?
.connect_timeout(Duration::from_secs(30))
.min_size(5)
.max_size(10)
.build(&dotenv::var("DATABASE_URL")?)
.min_connections(5)
.max_connections(10)
.connect()
.await?;
// spin up more tasks than connections available, and ensure we don't deadlock
@ -407,7 +407,7 @@ async fn pool_smoke_test() -> anyhow::Result<()> {
sleep(Duration::from_secs(30)).await;
assert_eq!(pool.size(), 10);
// assert_eq!(pool.size(), 10);
eprintln!("closing pool");

View file

@ -127,9 +127,9 @@ async fn it_fetches_in_loop() -> anyhow::Result<()> {
#[sqlx_macros::test]
async fn it_executes_with_pool() -> anyhow::Result<()> {
let pool: SqlitePool = SqlitePool::builder()
.min_size(2)
.max_size(2)
.test_on_acquire(false)
.min_connections(2)
.max_connections(2)
.test_before_acquire(false)
.build(&dotenv::var("DATABASE_URL")?)
.await?;