Added basic benchmarks for overloaded functions

This commit is contained in:
Gino Valente 2024-09-04 14:52:21 -07:00
parent 77ae7cf822
commit b3b6671d9c

View file

@ -1,7 +1,7 @@
use bevy_reflect::func::{ArgList, IntoFunction, IntoFunctionMut, TypedFunction}; use bevy_reflect::func::{ArgList, IntoFunction, IntoFunctionMut, TypedFunction};
use criterion::{criterion_group, criterion_main, BatchSize, Criterion}; use criterion::{criterion_group, criterion_main, BatchSize, Criterion};
criterion_group!(benches, typed, into, call, clone); criterion_group!(benches, typed, into, call, overload, clone);
criterion_main!(benches); criterion_main!(benches);
fn add(a: i32, b: i32) -> i32 { fn add(a: i32, b: i32) -> i32 {
@ -79,6 +79,148 @@ fn call(c: &mut Criterion) {
}); });
} }
fn overload(c: &mut Criterion) {
fn add<T: std::ops::Add<Output = T>>(a: T, b: T) -> T {
a + b
}
c.benchmark_group("with_overload")
.bench_function("01_overload", |b| {
b.iter_batched(
|| add::<i8>.into_function(),
|func| func.with_overload(add::<i16>),
BatchSize::SmallInput,
);
})
.bench_function("03_overload", |b| {
b.iter_batched(
|| add::<i8>.into_function(),
|func| {
func.with_overload(add::<i16>)
.with_overload(add::<i32>)
.with_overload(add::<i64>)
},
BatchSize::SmallInput,
);
})
.bench_function("10_overload", |b| {
b.iter_batched(
|| add::<i8>.into_function(),
|func| {
func.with_overload(add::<i16>)
.with_overload(add::<i32>)
.with_overload(add::<i64>)
.with_overload(add::<i128>)
.with_overload(add::<u8>)
.with_overload(add::<u16>)
.with_overload(add::<u32>)
.with_overload(add::<u64>)
.with_overload(add::<u128>)
},
BatchSize::SmallInput,
);
})
.bench_function("01_nested_overload", |b| {
b.iter_batched(
|| add::<i8>.into_function(),
|func| func.with_overload(add::<i16>),
BatchSize::SmallInput,
);
})
.bench_function("03_nested_overload", |b| {
b.iter_batched(
|| add::<i8>.into_function(),
|func| {
func.with_overload(
add::<i16>
.into_function()
.with_overload(add::<i32>.into_function().with_overload(add::<i64>)),
)
},
BatchSize::SmallInput,
);
})
.bench_function("10_nested_overload", |b| {
b.iter_batched(
|| add::<i8>.into_function(),
|func| {
func.with_overload(
add::<i16>.into_function().with_overload(
add::<i32>.into_function().with_overload(
add::<i64>.into_function().with_overload(
add::<i128>.into_function().with_overload(
add::<u8>.into_function().with_overload(
add::<u16>.into_function().with_overload(
add::<u32>.into_function().with_overload(
add::<u64>
.into_function()
.with_overload(add::<u128>),
),
),
),
),
),
),
),
)
},
BatchSize::SmallInput,
);
});
c.benchmark_group("call_overload")
.bench_function("01_overload", |b| {
b.iter_batched(
|| {
(
add::<i8>.into_function().with_overload(add::<i16>),
ArgList::new().push_owned(75_i8).push_owned(25_i8),
)
},
|(func, args)| func.call(args),
BatchSize::SmallInput,
);
})
.bench_function("03_overload", |b| {
b.iter_batched(
|| {
(
add::<i8>
.into_function()
.with_overload(add::<i16>)
.with_overload(add::<i32>)
.with_overload(add::<i64>),
ArgList::new().push_owned(75_i32).push_owned(25_i32),
)
},
|(func, args)| func.call(args),
BatchSize::SmallInput,
);
})
.bench_function("10_overload", |b| {
b.iter_batched(
|| {
(
add::<i8>
.into_function()
.with_overload(add::<i16>)
.with_overload(add::<i32>)
.with_overload(add::<i64>)
.with_overload(add::<i128>)
.with_overload(add::<u8>)
.with_overload(add::<u16>)
.with_overload(add::<u32>)
.with_overload(add::<u64>)
.with_overload(add::<u128>),
ArgList::new().push_owned(75_u8).push_owned(25_u8),
)
},
|(func, args)| func.call(args),
BatchSize::SmallInput,
);
});
}
fn clone(c: &mut Criterion) { fn clone(c: &mut Criterion) {
c.benchmark_group("clone").bench_function("function", |b| { c.benchmark_group("clone").bench_function("function", |b| {
let add = add.into_function(); let add = add.into_function();