chore: run cargo fmt

This commit is contained in:
Austin Bonander 2024-08-15 22:15:38 -07:00
parent d8cb7a2cd6
commit 26c85240fc
8 changed files with 43 additions and 36 deletions

View file

@ -274,6 +274,10 @@ impl DerefMut for PgArgumentBuffer {
}
pub(crate) fn value_size_int4_checked(size: usize) -> Result<i32, String> {
i32::try_from(size)
.map_err(|_| format!("value size would overflow in the binary protocol encoding: {size} > {}", i32::MAX))
i32::try_from(size).map_err(|_| {
format!(
"value size would overflow in the binary protocol encoding: {size} > {}",
i32::MAX
)
})
}

View file

@ -2,7 +2,7 @@ use crate::error::Error;
use crate::ext::ustr::UStr;
use crate::message::{ParameterDescription, RowDescription};
use crate::query_as::query_as;
use crate::query_scalar::{query_scalar};
use crate::query_scalar::query_scalar;
use crate::statement::PgStatementMetadata;
use crate::type_info::{PgArrayOf, PgCustomType, PgType, PgTypeKind};
use crate::types::Json;
@ -11,8 +11,8 @@ use crate::HashMap;
use crate::{PgColumn, PgConnection, PgTypeInfo};
use futures_core::future::BoxFuture;
use smallvec::SmallVec;
use std::sync::Arc;
use sqlx_core::query_builder::QueryBuilder;
use std::sync::Arc;
/// Describes the type of the `pg_type.typtype` column
///
@ -426,7 +426,7 @@ WHERE rngtypid = $1
if meta.columns.len() * 3 > 65535 {
tracing::debug!(
?stmt_id,
num_columns=meta.columns.len(),
num_columns = meta.columns.len(),
"number of columns in query is too large to pull nullability for"
);
}
@ -436,19 +436,18 @@ WHERE rngtypid = $1
// This will include columns that don't have a `relation_id` (are not from a table);
// assuming those are a minority of columns, it's less code to _not_ work around it
// and just let Postgres return `NULL`.
let mut nullable_query = QueryBuilder::new(
"SELECT NOT pg_attribute.attnotnull FROM ( "
);
let mut nullable_query = QueryBuilder::new("SELECT NOT pg_attribute.attnotnull FROM ( ");
nullable_query.push_values(
meta.columns.iter().zip(0i32..),
|mut tuple, (column, i)| {
// ({i}::int4, {column.relation_id}::int4, {column.relation_attribute_no}::int2)
tuple.push_bind(i).push_unseparated("::int4");
tuple.push_bind(column.relation_id).push_unseparated("::int4");
tuple.push_bind(column.relation_attribute_no).push_bind_unseparated("::int2");
},
);
nullable_query.push_values(meta.columns.iter().zip(0i32..), |mut tuple, (column, i)| {
// ({i}::int4, {column.relation_id}::int4, {column.relation_attribute_no}::int2)
tuple.push_bind(i).push_unseparated("::int4");
tuple
.push_bind(column.relation_id)
.push_unseparated("::int4");
tuple
.push_bind(column.relation_attribute_no)
.push_bind_unseparated("::int2");
});
nullable_query.push(
") as col(idx, table_id, col_idx) \

View file

@ -1,3 +1,4 @@
use crate::arguments::value_size_int4_checked;
use crate::{
decode::Decode,
encode::{Encode, IsNull},
@ -8,7 +9,6 @@ use crate::{
use bit_vec::BitVec;
use sqlx_core::bytes::Buf;
use std::{io, mem};
use crate::arguments::value_size_int4_checked;
impl Type<Postgres> for BitVec {
fn type_info() -> PgTypeInfo {

View file

@ -26,7 +26,9 @@ impl Encode<'_, Postgres> for NaiveDate {
let days: i32 = (*self - postgres_epoch_date())
.num_days()
.try_into()
.map_err(|_| format!("value {self:?} would overflow binary encoding for Postgres DATE"))?;
.map_err(|_| {
format!("value {self:?} would overflow binary encoding for Postgres DATE")
})?;
Encode::<Postgres>::encode(days, buf)
}

View file

@ -147,7 +147,7 @@ impl PgCube {
bytes.len()
),
)
.into());
.into());
}
match (header.is_point, header.dimensions) {

View file

@ -193,15 +193,9 @@ mod tests {
fn zero() {
let zero: Decimal = "0".parse().unwrap();
assert_eq!(
PgNumeric::from(&zero),
PgNumeric::ZERO,
);
assert_eq!(PgNumeric::from(&zero), PgNumeric::ZERO,);
assert_eq!(
Decimal::try_from(&PgNumeric::ZERO).unwrap(),
Decimal::ZERO
);
assert_eq!(Decimal::try_from(&PgNumeric::ZERO).unwrap(), Decimal::ZERO);
}
#[test]
@ -384,7 +378,10 @@ mod tests {
let actual_decimal = Decimal::try_from(expected_numeric).unwrap();
assert_eq!(actual_decimal, Decimal::MAX);
// Value split by 10,000's to match the expected digits[]
assert_eq!(actual_decimal.mantissa(), 7_9228_1625_1426_4337_5935_4395_0335);
assert_eq!(
actual_decimal.mantissa(),
7_9228_1625_1426_4337_5935_4395_0335
);
assert_eq!(actual_decimal.scale(), 0);
}
@ -406,7 +403,10 @@ mod tests {
let actual_decimal = Decimal::try_from(expected_numeric).unwrap();
assert_eq!(actual_decimal, max_value_max_scale);
assert_eq!(actual_decimal.mantissa(), 79_228_162_514_264_337_593_543_950_335);
assert_eq!(
actual_decimal.mantissa(),
79_228_162_514_264_337_593_543_950_335
);
assert_eq!(actual_decimal.scale(), 28);
}

View file

@ -23,10 +23,9 @@ impl PgHasArrayType for Date {
impl Encode<'_, Postgres> for Date {
fn encode_by_ref(&self, buf: &mut PgArgumentBuffer) -> Result<IsNull, BoxDynError> {
// DATE is encoded as number of days since epoch (2000-01-01)
let days: i32 = (*self - PG_EPOCH)
.whole_days()
.try_into()
.map_err(|_| format!("value {self:?} would overflow binary encoding for Postgres DATE"))?;
let days: i32 = (*self - PG_EPOCH).whole_days().try_into().map_err(|_| {
format!("value {self:?} would overflow binary encoding for Postgres DATE")
})?;
Encode::<Postgres>::encode(days, buf)
}

View file

@ -37,9 +37,12 @@ impl PgHasArrayType for OffsetDateTime {
impl Encode<'_, Postgres> for PrimitiveDateTime {
fn encode_by_ref(&self, buf: &mut PgArgumentBuffer) -> Result<IsNull, BoxDynError> {
// TIMESTAMP is encoded as the microseconds since the epoch
let micros: i64 = (*self - PG_EPOCH.midnight()).whole_microseconds()
let micros: i64 = (*self - PG_EPOCH.midnight())
.whole_microseconds()
.try_into()
.map_err(|_| format!("value {self:?} would overflow binary encoding for Postgres TIME"))?;
.map_err(|_| {
format!("value {self:?} would overflow binary encoding for Postgres TIME")
})?;
Encode::<Postgres>::encode(micros, buf)
}