mirror of
https://github.com/bevyengine/bevy
synced 2024-11-10 15:14:50 +00:00
Save an instruction in EntityHasher
(#10648)
# Objective Keep essentially the same structure of `EntityHasher` from #9903, but rephrase the multiplication slightly to save an instruction. cc @superdump Discord thread: https://discord.com/channels/691052431525675048/1172033156845674507/1174969772522356756 ## Solution Today, the hash is ```rust self.hash = i | (i.wrapping_mul(FRAC_U64MAX_PI) << 32); ``` with `i` being `(generation << 32) | index`. Expanding things out, we get ```rust i | ( (i * CONST) << 32 ) = (generation << 32) | index | ((((generation << 32) | index) * CONST) << 32) = (generation << 32) | index | ((index * CONST) << 32) // because the generation overflowed = (index * CONST | generation) << 32 | index ``` What if we do the same thing, but with `+` instead of `|`? That's almost the same thing, except that it has carries, which are actually often better in a hash function anyway, since it doesn't saturate. (`|` can be dangerous, since once something becomes `-1` it'll stay that, and there's no mixing available.) ```rust (index * CONST + generation) << 32 + index = (CONST << 32 + 1) * index + generation << 32 = (CONST << 32 + 1) * index + (WHATEVER << 32 + generation) << 32 // because the extra overflows and thus can be anything = (CONST << 32 + 1) * index + ((CONST * generation) << 32 + generation) << 32 // pick "whatever" to be something convenient = (CONST << 32 + 1) * index + ((CONST << 32 + 1) * generation) << 32 = (CONST << 32 + 1) * index +((CONST << 32 + 1) * (generation << 32) = (CONST << 32 + 1) * (index + generation << 32) = (CONST << 32 + 1) * (generation << 32 | index) = (CONST << 32 + 1) * i ``` So we can do essentially the same thing using a single multiplication instead of doing multiply-shift-or. LLVM was already smart enough to merge the shifting into a multiplication, but this saves the extra `or`: ![image](https://github.com/bevyengine/bevy/assets/18526288/d9396614-2326-4730-abbe-4908c01b5ace) <https://rust.godbolt.org/z/MEvbz4eo4> It's a very small change, and often will disappear in load latency anyway, but it's a couple percent faster in lookups: ![image](https://github.com/bevyengine/bevy/assets/18526288/c365ec85-6adc-4f6d-8fa6-a65146f55a75) (There was more of an improvement here before #10558, but with `to_bits` being a single `qword` load now, keeping things mostly as it is turned out to be better than the bigger changes I'd tried in #10605.) --- ## Changelog (Probably skip it) ## Migration Guide (none needed)
This commit is contained in:
parent
d95d20f4b1
commit
a902ea6f85
2 changed files with 69 additions and 12 deletions
|
@ -998,4 +998,36 @@ mod tests {
|
|||
assert!(Entity::new(2, 2) > Entity::new(1, 2));
|
||||
assert!(Entity::new(2, 2) >= Entity::new(1, 2));
|
||||
}
|
||||
|
||||
// Feel free to change this test if needed, but it seemed like an important
|
||||
// part of the best-case performance changes in PR#9903.
|
||||
#[test]
|
||||
fn entity_hash_keeps_similar_ids_together() {
|
||||
use std::hash::BuildHasher;
|
||||
let hash = bevy_utils::EntityHash;
|
||||
|
||||
let first_id = 0xC0FFEE << 8;
|
||||
let first_hash = hash.hash_one(Entity::from_raw(first_id));
|
||||
|
||||
for i in 1..=255 {
|
||||
let id = first_id + i;
|
||||
let hash = hash.hash_one(Entity::from_raw(id));
|
||||
assert_eq!(hash.wrapping_sub(first_hash) as u32, i);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn entity_hash_id_bitflip_affects_high_7_bits() {
|
||||
use std::hash::BuildHasher;
|
||||
let hash = bevy_utils::EntityHash;
|
||||
|
||||
let first_id = 0xC0FFEE;
|
||||
let first_hash = hash.hash_one(Entity::from_raw(first_id)) >> 57;
|
||||
|
||||
for bit in 0..u32::BITS {
|
||||
let id = first_id ^ (1 << bit);
|
||||
let hash = hash.hash_one(Entity::from_raw(id)) >> 57;
|
||||
assert_ne!(hash, first_hash);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -267,29 +267,54 @@ impl BuildHasher for EntityHash {
|
|||
/// A very fast hash that is only designed to work on generational indices
|
||||
/// like `Entity`. It will panic if attempting to hash a type containing
|
||||
/// non-u64 fields.
|
||||
///
|
||||
/// This is heavily optimized for typical cases, where you have mostly live
|
||||
/// entities, and works particularly well for contiguous indices.
|
||||
///
|
||||
/// If you have an unusual case -- say all your indices are multiples of 256
|
||||
/// or most of the entities are dead generations -- then you might want also to
|
||||
/// try [`AHasher`] for a slower hash computation but fewer lookup conflicts.
|
||||
#[derive(Debug, Default)]
|
||||
pub struct EntityHasher {
|
||||
hash: u64,
|
||||
}
|
||||
|
||||
// This value comes from rustc-hash (also known as FxHasher) which in turn got
|
||||
// it from Firefox. It is something like `u64::MAX / N` for an N that gives a
|
||||
// value close to π and works well for distributing bits for hashing when using
|
||||
// with a wrapping multiplication.
|
||||
const FRAC_U64MAX_PI: u64 = 0x517cc1b727220a95;
|
||||
|
||||
impl Hasher for EntityHasher {
|
||||
fn write(&mut self, _bytes: &[u8]) {
|
||||
panic!("can only hash u64 using EntityHasher");
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn write_u64(&mut self, i: u64) {
|
||||
// Apparently hashbrown's hashmap uses the upper 7 bits for some SIMD
|
||||
// optimisation that uses those bits for binning. This hash function
|
||||
// was faster than i | (i << (64 - 7)) in the worst cases, and was
|
||||
// faster than PassHasher for all cases tested.
|
||||
self.hash = i | (i.wrapping_mul(FRAC_U64MAX_PI) << 32);
|
||||
fn write_u64(&mut self, bits: u64) {
|
||||
// SwissTable (and thus `hashbrown`) cares about two things from the hash:
|
||||
// - H1: low bits (masked by `2ⁿ-1`) to pick the slot in which to store the item
|
||||
// - H2: high 7 bits are used to SIMD optimize hash collision probing
|
||||
// For more see <https://abseil.io/about/design/swisstables#metadata-layout>
|
||||
|
||||
// This hash function assumes that the entity ids are still well-distributed,
|
||||
// so for H1 leaves the entity id alone in the low bits so that id locality
|
||||
// will also give memory locality for things spawned together.
|
||||
// For H2, take advantage of the fact that while multiplication doesn't
|
||||
// spread entropy to the low bits, it's incredibly good at spreading it
|
||||
// upward, which is exactly where we need it the most.
|
||||
|
||||
// While this does include the generation in the output, it doesn't do so
|
||||
// *usefully*. H1 won't care until you have over 3 billion entities in
|
||||
// the table, and H2 won't care until something hits generation 33 million.
|
||||
// Thus the comment suggesting that this is best for live entities,
|
||||
// where there won't be generation conflicts where it would matter.
|
||||
|
||||
// The high 32 bits of this are ⅟φ for Fibonacci hashing. That works
|
||||
// particularly well for hashing for the same reason as described in
|
||||
// <https://extremelearning.com.au/unreasonable-effectiveness-of-quasirandom-sequences/>
|
||||
// It loses no information because it has a modular inverse.
|
||||
// (Specifically, `0x144c_bc89_u32 * 0x9e37_79b9_u32 == 1`.)
|
||||
//
|
||||
// The low 32 bits make that part of the just product a pass-through.
|
||||
const UPPER_PHI: u64 = 0x9e37_79b9_0000_0001;
|
||||
|
||||
// This is `(MAGIC * index + generation) << 32 + index`, in a single instruction.
|
||||
self.hash = bits.wrapping_mul(UPPER_PHI);
|
||||
}
|
||||
|
||||
#[inline]
|
||||
|
|
Loading…
Reference in a new issue