From 185ed988d240e380c63c7ff809622a270f670637 Mon Sep 17 00:00:00 2001 From: Amanieu d'Antras Date: Thu, 7 Feb 2019 12:08:37 +0100 Subject: [PATCH 01/11] Remove the Recover trait for HashSet --- src/libstd/collections/hash/map.rs | 40 ------------------------------ src/libstd/collections/hash/mod.rs | 8 ------ src/libstd/collections/hash/set.rs | 13 +++++++--- 3 files changed, 9 insertions(+), 52 deletions(-) diff --git a/src/libstd/collections/hash/map.rs b/src/libstd/collections/hash/map.rs index ac3cfde47b520..e24a824e38807 100644 --- a/src/libstd/collections/hash/map.rs +++ b/src/libstd/collections/hash/map.rs @@ -2937,13 +2937,6 @@ impl<'a, K, V> OccupiedEntry<'a, K, V> { pop_internal(self.elem).1 } - /// Returns a key that was used for search. - /// - /// The key was retained for further use. - fn take_key(&mut self) -> Option { - self.key.take() - } - /// Replaces the entry, returning the old key and value. The new key in the hash map will be /// the key used to create this entry. /// @@ -3262,39 +3255,6 @@ impl fmt::Debug for RandomState { } } -impl super::Recover for HashMap - where K: Eq + Hash + Borrow, - S: BuildHasher, - Q: Eq + Hash -{ - type Key = K; - - #[inline] - fn get(&self, key: &Q) -> Option<&K> { - self.search(key).map(|bucket| bucket.into_refs().0) - } - - fn take(&mut self, key: &Q) -> Option { - self.search_mut(key).map(|bucket| pop_internal(bucket).0) - } - - #[inline] - fn replace(&mut self, key: K) -> Option { - self.reserve(1); - - match self.entry(key) { - Occupied(mut occupied) => { - let key = occupied.take_key().unwrap(); - Some(mem::replace(occupied.elem.read_mut().0, key)) - } - Vacant(vacant) => { - vacant.insert(()); - None - } - } - } -} - #[allow(dead_code)] fn assert_covariance() { fn map_key<'new>(v: HashMap<&'static str, u8>) -> HashMap<&'new str, u8> { diff --git a/src/libstd/collections/hash/mod.rs b/src/libstd/collections/hash/mod.rs index 0d1bbb590db9e..56585477f1c17 100644 --- a/src/libstd/collections/hash/mod.rs +++ b/src/libstd/collections/hash/mod.rs @@ -4,11 +4,3 @@ mod bench; mod table; pub mod map; pub mod set; - -trait Recover { - type Key; - - fn get(&self, key: &Q) -> Option<&Self::Key>; - fn take(&mut self, key: &Q) -> Option; - fn replace(&mut self, key: Self::Key) -> Option; -} diff --git a/src/libstd/collections/hash/set.rs b/src/libstd/collections/hash/set.rs index b9fcc2365fa7c..0fbf374fa496a 100644 --- a/src/libstd/collections/hash/set.rs +++ b/src/libstd/collections/hash/set.rs @@ -4,7 +4,6 @@ use crate::hash::{Hash, BuildHasher}; use crate::iter::{Chain, FromIterator, FusedIterator}; use crate::ops::{BitOr, BitAnd, BitXor, Sub}; -use super::Recover; use super::map::{self, HashMap, Keys, RandomState}; // Future Optimization (FIXME!) @@ -579,7 +578,7 @@ impl HashSet where T: Borrow, Q: Hash + Eq { - Recover::get(&self.map, value) + self.map.get_key_value(value).map(|(k, _)| k) } /// Returns `true` if `self` has no elements in common with `other`. @@ -699,7 +698,13 @@ impl HashSet /// ``` #[stable(feature = "set_recovery", since = "1.9.0")] pub fn replace(&mut self, value: T) -> Option { - Recover::replace(&mut self.map, value) + match self.map.entry(value) { + map::Entry::Occupied(occupied) => Some(occupied.replace_key()), + map::Entry::Vacant(vacant) => { + vacant.insert(()); + None + } + } } /// Removes a value from the set. Returns whether the value was @@ -754,7 +759,7 @@ impl HashSet where T: Borrow, Q: Hash + Eq { - Recover::take(&mut self.map, value) + self.map.remove_entry(value).map(|(k, _)| k) } /// Retains only the elements specified by the predicate. From a533504ca12ed93fec3cfb1d42add1a32bbc27cf Mon Sep 17 00:00:00 2001 From: Amanieu d'Antras Date: Thu, 7 Feb 2019 12:28:27 +0100 Subject: [PATCH 02/11] Add try_reserve to HashSet --- src/libstd/collections/hash/set.rs | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/src/libstd/collections/hash/set.rs b/src/libstd/collections/hash/set.rs index 0fbf374fa496a..f8a93b680f762 100644 --- a/src/libstd/collections/hash/set.rs +++ b/src/libstd/collections/hash/set.rs @@ -1,4 +1,5 @@ use crate::borrow::Borrow; +use crate::collections::CollectionAllocErr; use crate::fmt; use crate::hash::{Hash, BuildHasher}; use crate::iter::{Chain, FromIterator, FusedIterator}; @@ -357,6 +358,29 @@ impl HashSet self.map.reserve(additional) } + /// Tries to reserve capacity for at least `additional` more elements to be inserted + /// in the given `HashSet`. The collection may reserve more space to avoid + /// frequent reallocations. + /// + /// # Errors + /// + /// If the capacity overflows, or the allocator reports a failure, then an error + /// is returned. + /// + /// # Examples + /// + /// ``` + /// #![feature(try_reserve)] + /// use std::collections::HashSet; + /// let mut set: HashSet = HashSet::new(); + /// set.try_reserve(10).expect("why is the test harness OOMing on 10 bytes?"); + /// ``` + #[inline] + #[unstable(feature = "try_reserve", reason = "new API", issue="48043")] + pub fn try_reserve(&mut self, additional: usize) -> Result<(), CollectionAllocErr> { + self.map.try_reserve(additional) + } + /// Shrinks the capacity of the set as much as possible. It will drop /// down as much as possible while maintaining the internal rules /// and possibly leaving some space in accordance with the resize policy. From 556fc40a95be2ea20e33ad34c32598cf2e3edf72 Mon Sep 17 00:00:00 2001 From: Amanieu d'Antras Date: Thu, 7 Feb 2019 12:28:38 +0100 Subject: [PATCH 03/11] Mark HashSet functions with #[inline] --- src/libstd/collections/hash/set.rs | 45 ++++++++++++++++++++++++++++++ 1 file changed, 45 insertions(+) diff --git a/src/libstd/collections/hash/set.rs b/src/libstd/collections/hash/set.rs index f8a93b680f762..b610e09ae749e 100644 --- a/src/libstd/collections/hash/set.rs +++ b/src/libstd/collections/hash/set.rs @@ -181,6 +181,7 @@ impl HashSet { /// println!("{}", x); /// } /// ``` + #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn iter(&self) -> Iter<'_, T> { Iter { iter: self.map.keys() } @@ -198,6 +199,7 @@ impl HashSet { /// v.insert(1); /// assert_eq!(v.len(), 1); /// ``` + #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn len(&self) -> usize { self.map.len() @@ -215,6 +217,7 @@ impl HashSet { /// v.insert(1); /// assert!(!v.is_empty()); /// ``` + #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn is_empty(&self) -> bool { self.map.is_empty() @@ -255,6 +258,7 @@ impl HashSet { /// v.clear(); /// assert!(v.is_empty()); /// ``` + #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn clear(&mut self) { self.map.clear() @@ -332,6 +336,7 @@ impl HashSet /// let set: HashSet = HashSet::with_hasher(hasher); /// let hasher: &RandomState = set.hasher(); /// ``` + #[inline] #[stable(feature = "hashmap_public_hasher", since = "1.9.0")] pub fn hasher(&self) -> &S { self.map.hasher() @@ -353,6 +358,7 @@ impl HashSet /// set.reserve(10); /// assert!(set.capacity() >= 10); /// ``` + #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn reserve(&mut self, additional: usize) { self.map.reserve(additional) @@ -397,6 +403,7 @@ impl HashSet /// set.shrink_to_fit(); /// assert!(set.capacity() >= 2); /// ``` + #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn shrink_to_fit(&mut self) { self.map.shrink_to_fit() @@ -453,6 +460,7 @@ impl HashSet /// let diff: HashSet<_> = b.difference(&a).collect(); /// assert_eq!(diff, [4].iter().collect()); /// ``` + #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn difference<'a>(&'a self, other: &'a HashSet) -> Difference<'a, T, S> { Difference { @@ -482,6 +490,7 @@ impl HashSet /// assert_eq!(diff1, diff2); /// assert_eq!(diff1, [1, 4].iter().collect()); /// ``` + #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn symmetric_difference<'a>(&'a self, other: &'a HashSet) @@ -507,6 +516,7 @@ impl HashSet /// let intersection: HashSet<_> = a.intersection(&b).collect(); /// assert_eq!(intersection, [2, 3].iter().collect()); /// ``` + #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn intersection<'a>(&'a self, other: &'a HashSet) -> Intersection<'a, T, S> { if self.len() <= other.len() { @@ -540,6 +550,7 @@ impl HashSet /// let union: HashSet<_> = a.union(&b).collect(); /// assert_eq!(union, [1, 2, 3, 4].iter().collect()); /// ``` + #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn union<'a>(&'a self, other: &'a HashSet) -> Union<'a, T, S> { if self.len() <= other.len() { @@ -571,6 +582,7 @@ impl HashSet /// /// [`Eq`]: ../../std/cmp/trait.Eq.html /// [`Hash`]: ../../std/hash/trait.Hash.html + #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn contains(&self, value: &Q) -> bool where T: Borrow, @@ -597,6 +609,7 @@ impl HashSet /// /// [`Eq`]: ../../std/cmp/trait.Eq.html /// [`Hash`]: ../../std/hash/trait.Hash.html + #[inline] #[stable(feature = "set_recovery", since = "1.9.0")] pub fn get(&self, value: &Q) -> Option<&T> where T: Borrow, @@ -700,6 +713,7 @@ impl HashSet /// assert_eq!(set.insert(2), false); /// assert_eq!(set.len(), 1); /// ``` + #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn insert(&mut self, value: T) -> bool { self.map.insert(value, ()).is_none() @@ -720,6 +734,7 @@ impl HashSet /// set.replace(Vec::with_capacity(10)); /// assert_eq!(set.get(&[][..]).unwrap().capacity(), 10); /// ``` + #[inline] #[stable(feature = "set_recovery", since = "1.9.0")] pub fn replace(&mut self, value: T) -> Option { match self.map.entry(value) { @@ -752,6 +767,7 @@ impl HashSet /// /// [`Eq`]: ../../std/cmp/trait.Eq.html /// [`Hash`]: ../../std/hash/trait.Hash.html + #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn remove(&mut self, value: &Q) -> bool where T: Borrow, @@ -778,6 +794,7 @@ impl HashSet /// /// [`Eq`]: ../../std/cmp/trait.Eq.html /// [`Hash`]: ../../std/hash/trait.Hash.html + #[inline] #[stable(feature = "set_recovery", since = "1.9.0")] pub fn take(&mut self, value: &Q) -> Option where T: Borrow, @@ -844,6 +861,7 @@ impl FromIterator for HashSet where T: Eq + Hash, S: BuildHasher + Default { + #[inline] fn from_iter>(iter: I) -> HashSet { let mut set = HashSet::with_hasher(Default::default()); set.extend(iter); @@ -856,6 +874,7 @@ impl Extend for HashSet where T: Eq + Hash, S: BuildHasher { + #[inline] fn extend>(&mut self, iter: I) { self.map.extend(iter.into_iter().map(|k| (k, ()))); } @@ -866,6 +885,7 @@ impl<'a, T, S> Extend<&'a T> for HashSet where T: 'a + Eq + Hash + Copy, S: BuildHasher { + #[inline] fn extend>(&mut self, iter: I) { self.extend(iter.into_iter().cloned()); } @@ -877,6 +897,7 @@ impl Default for HashSet S: BuildHasher + Default { /// Creates an empty `HashSet` with the `Default` value for the hasher. + #[inline] fn default() -> HashSet { HashSet { map: HashMap::default() } } @@ -1105,6 +1126,7 @@ impl<'a, T, S> IntoIterator for &'a HashSet { type Item = &'a T; type IntoIter = Iter<'a, T>; + #[inline] fn into_iter(self) -> Iter<'a, T> { self.iter() } @@ -1135,6 +1157,7 @@ impl IntoIterator for HashSet { /// println!("{}", x); /// } /// ``` + #[inline] fn into_iter(self) -> IntoIter { IntoIter { iter: self.map.into_iter() } } @@ -1142,6 +1165,7 @@ impl IntoIterator for HashSet { #[stable(feature = "rust1", since = "1.0.0")] impl Clone for Iter<'_, K> { + #[inline] fn clone(&self) -> Self { Iter { iter: self.iter.clone() } } @@ -1150,15 +1174,18 @@ impl Clone for Iter<'_, K> { impl<'a, K> Iterator for Iter<'a, K> { type Item = &'a K; + #[inline] fn next(&mut self) -> Option<&'a K> { self.iter.next() } + #[inline] fn size_hint(&self) -> (usize, Option) { self.iter.size_hint() } } #[stable(feature = "rust1", since = "1.0.0")] impl ExactSizeIterator for Iter<'_, K> { + #[inline] fn len(&self) -> usize { self.iter.len() } @@ -1177,15 +1204,18 @@ impl fmt::Debug for Iter<'_, K> { impl Iterator for IntoIter { type Item = K; + #[inline] fn next(&mut self) -> Option { self.iter.next().map(|(k, _)| k) } + #[inline] fn size_hint(&self) -> (usize, Option) { self.iter.size_hint() } } #[stable(feature = "rust1", since = "1.0.0")] impl ExactSizeIterator for IntoIter { + #[inline] fn len(&self) -> usize { self.iter.len() } @@ -1208,15 +1238,18 @@ impl fmt::Debug for IntoIter { impl<'a, K> Iterator for Drain<'a, K> { type Item = K; + #[inline] fn next(&mut self) -> Option { self.iter.next().map(|(k, _)| k) } + #[inline] fn size_hint(&self) -> (usize, Option) { self.iter.size_hint() } } #[stable(feature = "rust1", since = "1.0.0")] impl ExactSizeIterator for Drain<'_, K> { + #[inline] fn len(&self) -> usize { self.iter.len() } @@ -1237,6 +1270,7 @@ impl fmt::Debug for Drain<'_, K> { #[stable(feature = "rust1", since = "1.0.0")] impl Clone for Intersection<'_, T, S> { + #[inline] fn clone(&self) -> Self { Intersection { iter: self.iter.clone(), ..*self } } @@ -1249,6 +1283,7 @@ impl<'a, T, S> Iterator for Intersection<'a, T, S> { type Item = &'a T; + #[inline] fn next(&mut self) -> Option<&'a T> { loop { let elt = self.iter.next()?; @@ -1258,6 +1293,7 @@ impl<'a, T, S> Iterator for Intersection<'a, T, S> } } + #[inline] fn size_hint(&self) -> (usize, Option) { let (_, upper) = self.iter.size_hint(); (0, upper) @@ -1283,6 +1319,7 @@ impl FusedIterator for Intersection<'_, T, S> #[stable(feature = "rust1", since = "1.0.0")] impl Clone for Difference<'_, T, S> { + #[inline] fn clone(&self) -> Self { Difference { iter: self.iter.clone(), ..*self } } @@ -1295,6 +1332,7 @@ impl<'a, T, S> Iterator for Difference<'a, T, S> { type Item = &'a T; + #[inline] fn next(&mut self) -> Option<&'a T> { loop { let elt = self.iter.next()?; @@ -1304,6 +1342,7 @@ impl<'a, T, S> Iterator for Difference<'a, T, S> } } + #[inline] fn size_hint(&self) -> (usize, Option) { let (_, upper) = self.iter.size_hint(); (0, upper) @@ -1329,6 +1368,7 @@ impl fmt::Debug for Difference<'_, T, S> #[stable(feature = "rust1", since = "1.0.0")] impl Clone for SymmetricDifference<'_, T, S> { + #[inline] fn clone(&self) -> Self { SymmetricDifference { iter: self.iter.clone() } } @@ -1341,9 +1381,11 @@ impl<'a, T, S> Iterator for SymmetricDifference<'a, T, S> { type Item = &'a T; + #[inline] fn next(&mut self) -> Option<&'a T> { self.iter.next() } + #[inline] fn size_hint(&self) -> (usize, Option) { self.iter.size_hint() } @@ -1368,6 +1410,7 @@ impl fmt::Debug for SymmetricDifference<'_, T, S> #[stable(feature = "rust1", since = "1.0.0")] impl Clone for Union<'_, T, S> { + #[inline] fn clone(&self) -> Self { Union { iter: self.iter.clone() } } @@ -1397,9 +1440,11 @@ impl<'a, T, S> Iterator for Union<'a, T, S> { type Item = &'a T; + #[inline] fn next(&mut self) -> Option<&'a T> { self.iter.next() } + #[inline] fn size_hint(&self) -> (usize, Option) { self.iter.size_hint() } From 1fa7a21534bde7315bc78be970a342262ddf7a58 Mon Sep 17 00:00:00 2001 From: Amanieu d'Antras Date: Thu, 7 Feb 2019 12:08:05 +0100 Subject: [PATCH 04/11] Make libstd depend on the hashbrown crate --- Cargo.lock | 19 +++++++++++++++++++ Cargo.toml | 1 + src/libstd/Cargo.toml | 1 + .../rustc-std-workspace-alloc/Cargo.toml | 14 ++++++++++++++ src/tools/rustc-std-workspace-alloc/lib.rs | 9 +++++++++ 5 files changed, 44 insertions(+) create mode 100644 src/tools/rustc-std-workspace-alloc/Cargo.toml create mode 100644 src/tools/rustc-std-workspace-alloc/lib.rs diff --git a/Cargo.lock b/Cargo.lock index b62c7ff90970e..6c5951f08da2f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1066,6 +1066,16 @@ dependencies = [ "walkdir 2.2.7 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "hashbrown" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "compiler_builtins 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-std-workspace-alloc 1.0.0", + "rustc-std-workspace-core 1.0.0", +] + [[package]] name = "heck" version = "0.3.0" @@ -2529,6 +2539,13 @@ name = "rustc-serialize" version = "0.3.24" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "rustc-std-workspace-alloc" +version = "1.0.0" +dependencies = [ + "alloc 0.0.0", +] + [[package]] name = "rustc-std-workspace-core" version = "1.0.0" @@ -3260,6 +3277,7 @@ dependencies = [ "core 0.0.0", "dlmalloc 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", "fortanix-sgx-abi 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", + "hashbrown 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.2.51 (registry+https://github.com/rust-lang/crates.io-index)", "panic_abort 0.0.0", "panic_unwind 0.0.0", @@ -4103,6 +4121,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" "checksum globset 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "4743617a7464bbda3c8aec8558ff2f9429047e025771037df561d383337ff865" "checksum handlebars 0.32.4 (registry+https://github.com/rust-lang/crates.io-index)" = "d89ec99d1594f285d4590fc32bac5f75cdab383f1123d504d27862c644a807dd" "checksum handlebars 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d82e5750d8027a97b9640e3fefa66bbaf852a35228e1c90790efd13c4b09c166" +"checksum hashbrown 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "58623735fa622916205f9e0a52a031b25b0e251ddaef47f7cb288444c4410beb" "checksum heck 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ea04fa3ead4e05e51a7c806fc07271fdbde4e246a6c6d1efd52e72230b771b82" "checksum hex 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "805026a5d0141ffc30abb3be3173848ad46a1b1664fe632428479619a3644d77" "checksum home 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "80dff82fb58cfbbc617fb9a9184b010be0529201553cda50ad04372bc2333aff" diff --git a/Cargo.toml b/Cargo.toml index cb3c0ee194fe2..ccd7e8b7654a6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -67,6 +67,7 @@ rustc-workspace-hack = { path = 'src/tools/rustc-workspace-hack' } # See comments in `tools/rustc-std-workspace-core/README.md` for what's going on # here rustc-std-workspace-core = { path = 'src/tools/rustc-std-workspace-core' } +rustc-std-workspace-alloc = { path = 'src/tools/rustc-std-workspace-alloc' } [patch."https://github.com/rust-lang/rust-clippy"] clippy_lints = { path = "src/tools/clippy/clippy_lints" } diff --git a/src/libstd/Cargo.toml b/src/libstd/Cargo.toml index 86ad334d88603..83aee507f32f2 100644 --- a/src/libstd/Cargo.toml +++ b/src/libstd/Cargo.toml @@ -22,6 +22,7 @@ libc = { version = "0.2.51", default-features = false, features = ['rustc-dep-of compiler_builtins = { version = "0.1.9" } profiler_builtins = { path = "../libprofiler_builtins", optional = true } unwind = { path = "../libunwind" } +hashbrown = { version = "0.2.0", features = ['rustc-dep-of-std'] } rustc-demangle = { version = "0.1.10", features = ['rustc-dep-of-std'] } backtrace-sys = { version = "0.1.24", features = ["rustc-dep-of-std"], optional = true } diff --git a/src/tools/rustc-std-workspace-alloc/Cargo.toml b/src/tools/rustc-std-workspace-alloc/Cargo.toml new file mode 100644 index 0000000000000..98578914963db --- /dev/null +++ b/src/tools/rustc-std-workspace-alloc/Cargo.toml @@ -0,0 +1,14 @@ +[package] +name = "rustc-std-workspace-alloc" +version = "1.0.0" +authors = ["Alex Crichton "] +license = 'MIT/Apache-2.0' +description = """ +Hack for the compiler's own build system +""" + +[lib] +path = "lib.rs" + +[dependencies] +alloc = { path = "../../liballoc" } diff --git a/src/tools/rustc-std-workspace-alloc/lib.rs b/src/tools/rustc-std-workspace-alloc/lib.rs new file mode 100644 index 0000000000000..50294e6cbad5a --- /dev/null +++ b/src/tools/rustc-std-workspace-alloc/lib.rs @@ -0,0 +1,9 @@ +#![feature(no_core, alloc)] +#![no_core] + +// See rustc-std-workspace-core for why this crate is needed. + +// Rename the crate to avoid conflicting with the alloc module in liballoc. +extern crate alloc as foo; + +pub use foo::*; From cf46bd5037a86237b9be4eb218d57d71bb8e2ccf Mon Sep 17 00:00:00 2001 From: Amanieu d'Antras Date: Wed, 20 Feb 2019 11:13:35 +0000 Subject: [PATCH 05/11] Replace the robin-hood hash table with hashbrown --- src/libstd/collections/hash/map.rs | 1549 +++++++------------------- src/libstd/collections/hash/mod.rs | 1 - src/libstd/collections/hash/set.rs | 2 - src/libstd/collections/hash/table.rs | 1131 ------------------- 4 files changed, 405 insertions(+), 2278 deletions(-) delete mode 100644 src/libstd/collections/hash/table.rs diff --git a/src/libstd/collections/hash/map.rs b/src/libstd/collections/hash/map.rs index e24a824e38807..ecb08314f6d05 100644 --- a/src/libstd/collections/hash/map.rs +++ b/src/libstd/collections/hash/map.rs @@ -1,222 +1,18 @@ use self::Entry::*; -use self::VacantEntryState::*; -use crate::intrinsics::unlikely; -use crate::collections::CollectionAllocErr; -use crate::cell::Cell; +use hashbrown::hash_map as base; + use crate::borrow::Borrow; -use crate::cmp::max; +use crate::cell::Cell; +use crate::collections::CollectionAllocErr; use crate::fmt::{self, Debug}; #[allow(deprecated)] -use crate::hash::{Hash, Hasher, BuildHasher, SipHasher13}; +use crate::hash::{BuildHasher, Hash, Hasher, SipHasher13}; use crate::iter::{FromIterator, FusedIterator}; -use crate::mem::{self, replace}; -use crate::ops::{Deref, DerefMut, Index}; +use crate::ops::Index; use crate::sys; -use super::table::{self, Bucket, EmptyBucket, Fallibility, FullBucket, FullBucketMut, RawTable, - SafeHash}; -use super::table::BucketState::{Empty, Full}; -use super::table::Fallibility::{Fallible, Infallible}; - -const MIN_NONZERO_RAW_CAPACITY: usize = 32; // must be a power of two - -/// The default behavior of HashMap implements a maximum load factor of 90.9%. -#[derive(Clone)] -struct DefaultResizePolicy; - -impl DefaultResizePolicy { - #[inline] - fn new() -> DefaultResizePolicy { - DefaultResizePolicy - } - - /// A hash map's "capacity" is the number of elements it can hold without - /// being resized. Its "raw capacity" is the number of slots required to - /// provide that capacity, accounting for maximum loading. The raw capacity - /// is always zero or a power of two. - #[inline] - fn try_raw_capacity(&self, len: usize) -> Result { - if len == 0 { - Ok(0) - } else { - // 1. Account for loading: `raw_capacity >= len * 1.1`. - // 2. Ensure it is a power of two. - // 3. Ensure it is at least the minimum size. - let mut raw_cap = len.checked_mul(11) - .map(|l| l / 10) - .and_then(|l| l.checked_next_power_of_two()) - .ok_or(CollectionAllocErr::CapacityOverflow)?; - - raw_cap = max(MIN_NONZERO_RAW_CAPACITY, raw_cap); - Ok(raw_cap) - } - } - - #[inline] - fn raw_capacity(&self, len: usize) -> usize { - self.try_raw_capacity(len).expect("raw_capacity overflow") - } - - /// The capacity of the given raw capacity. - #[inline] - fn capacity(&self, raw_cap: usize) -> usize { - // This doesn't have to be checked for overflow since allocation size - // in bytes will overflow earlier than multiplication by 10. - // - // As per https://github.com/rust-lang/rust/pull/30991 this is updated - // to be: (raw_cap * den + den - 1) / num - (raw_cap * 10 + 10 - 1) / 11 - } -} - -// The main performance trick in this hashmap is called Robin Hood Hashing. -// It gains its excellent performance from one essential operation: -// -// If an insertion collides with an existing element, and that element's -// "probe distance" (how far away the element is from its ideal location) -// is higher than how far we've already probed, swap the elements. -// -// This massively lowers variance in probe distance, and allows us to get very -// high load factors with good performance. The 90% load factor I use is rather -// conservative. -// -// > Why a load factor of approximately 90%? -// -// In general, all the distances to initial buckets will converge on the mean. -// At a load factor of α, the odds of finding the target bucket after k -// probes is approximately 1-α^k. If we set this equal to 50% (since we converge -// on the mean) and set k=8 (64-byte cache line / 8-byte hash), α=0.92. I round -// this down to make the math easier on the CPU and avoid its FPU. -// Since on average we start the probing in the middle of a cache line, this -// strategy pulls in two cache lines of hashes on every lookup. I think that's -// pretty good, but if you want to trade off some space, it could go down to one -// cache line on average with an α of 0.84. -// -// > Wait, what? Where did you get 1-α^k from? -// -// On the first probe, your odds of a collision with an existing element is α. -// The odds of doing this twice in a row is approximately α^2. For three times, -// α^3, etc. Therefore, the odds of colliding k times is α^k. The odds of NOT -// colliding after k tries is 1-α^k. -// -// The paper from 1986 cited below mentions an implementation which keeps track -// of the distance-to-initial-bucket histogram. This approach is not suitable -// for modern architectures because it requires maintaining an internal data -// structure. This allows very good first guesses, but we are most concerned -// with guessing entire cache lines, not individual indexes. Furthermore, array -// accesses are no longer linear and in one direction, as we have now. There -// is also memory and cache pressure that this would entail that would be very -// difficult to properly see in a microbenchmark. -// -// ## Future Improvements (FIXME!) -// -// Allow the load factor to be changed dynamically and/or at initialization. -// -// Also, would it be possible for us to reuse storage when growing the -// underlying table? This is exactly the use case for 'realloc', and may -// be worth exploring. -// -// ## Future Optimizations (FIXME!) -// -// Another possible design choice that I made without any real reason is -// parameterizing the raw table over keys and values. Technically, all we need -// is the size and alignment of keys and values, and the code should be just as -// efficient (well, we might need one for power-of-two size and one for not...). -// This has the potential to reduce code bloat in rust executables, without -// really losing anything except 4 words (key size, key alignment, val size, -// val alignment) which can be passed in to every call of a `RawTable` function. -// This would definitely be an avenue worth exploring if people start complaining -// about the size of rust executables. -// -// Annotate exceedingly likely branches in `table::make_hash` -// and `search_hashed` to reduce instruction cache pressure -// and mispredictions once it becomes possible (blocked on issue #11092). -// -// Shrinking the table could simply reallocate in place after moving buckets -// to the first half. -// -// The growth algorithm (fragment of the Proof of Correctness) -// -------------------- -// -// The growth algorithm is basically a fast path of the naive reinsertion- -// during-resize algorithm. Other paths should never be taken. -// -// Consider growing a robin hood hashtable of capacity n. Normally, we do this -// by allocating a new table of capacity `2n`, and then individually reinsert -// each element in the old table into the new one. This guarantees that the -// new table is a valid robin hood hashtable with all the desired statistical -// properties. Remark that the order we reinsert the elements in should not -// matter. For simplicity and efficiency, we will consider only linear -// reinsertions, which consist of reinserting all elements in the old table -// into the new one by increasing order of index. However we will not be -// starting our reinsertions from index 0 in general. If we start from index -// i, for the purpose of reinsertion we will consider all elements with real -// index j < i to have virtual index n + j. -// -// Our hash generation scheme consists of generating a 64-bit hash and -// truncating the most significant bits. When moving to the new table, we -// simply introduce a new bit to the front of the hash. Therefore, if an -// element has ideal index i in the old table, it can have one of two ideal -// locations in the new table. If the new bit is 0, then the new ideal index -// is i. If the new bit is 1, then the new ideal index is n + i. Intuitively, -// we are producing two independent tables of size n, and for each element we -// independently choose which table to insert it into with equal probability. -// However, rather than wrapping around themselves on overflowing their -// indexes, the first table overflows into the second, and the second into the -// first. Visually, our new table will look something like: -// -// [yy_xxx_xxxx_xxx|xx_yyy_yyyy_yyy] -// -// Where x's are elements inserted into the first table, y's are elements -// inserted into the second, and _'s are empty sections. We now define a few -// key concepts that we will use later. Note that this is a very abstract -// perspective of the table. A real resized table would be at least half -// empty. -// -// Theorem: A linear robin hood reinsertion from the first ideal element -// produces identical results to a linear naive reinsertion from the same -// element. -// -// FIXME(Gankro, pczarn): review the proof and put it all in a separate README.md -// -// Adaptive early resizing -// ---------------------- -// To protect against degenerate performance scenarios (including DOS attacks), -// the implementation includes an adaptive behavior that can resize the map -// early (before its capacity is exceeded) when suspiciously long probe sequences -// are encountered. -// -// With this algorithm in place it would be possible to turn a CPU attack into -// a memory attack due to the aggressive resizing. To prevent that the -// adaptive behavior only triggers when the map is at least half full. -// This reduces the effectiveness of the algorithm but also makes it completely safe. -// -// The previous safety measure also prevents degenerate interactions with -// really bad quality hash algorithms that can make normal inputs look like a -// DOS attack. -// -const DISPLACEMENT_THRESHOLD: usize = 128; -// -// The threshold of 128 is chosen to minimize the chance of exceeding it. -// In particular, we want that chance to be less than 10^-8 with a load of 90%. -// For displacement, the smallest constant that fits our needs is 90, -// so we round that up to 128. -// -// At a load factor of α, the odds of finding the target bucket after exactly n -// unsuccessful probes[1] are -// -// Pr_α{displacement = n} = -// (1 - α) / α * ∑_{k≥1} e^(-kα) * (kα)^(k+n) / (k + n)! * (1 - kα / (k + n + 1)) -// -// We use this formula to find the probability of triggering the adaptive behavior -// -// Pr_0.909{displacement > 128} = 1.601 * 10^-11 -// -// 1. Alfredo Viola (2005). Distributional analysis of Robin Hood linear probing -// hashing with buckets. - -/// A hash map implemented with linear probing and Robin Hood bucket stealing. +/// A hash map implemented with quadratic probing and SIMD lookup. /// /// By default, `HashMap` uses a hashing algorithm selected to provide /// resistance against HashDoS attacks. The algorithm is randomly seeded, and a @@ -254,13 +50,13 @@ const DISPLACEMENT_THRESHOLD: usize = 128; /// the [`Eq`] trait, changes while it is in the map. This is normally only /// possible through [`Cell`], [`RefCell`], global state, I/O, or unsafe code. /// -/// Relevant papers/articles: +/// The hash table implementation is a Rust port of Google's [SwissTable]. +/// The original C++ version of SwissTable can be found [here], and this +/// [CppCon talk] gives an overview of how the algorithm works. /// -/// 1. Pedro Celis. ["Robin Hood Hashing"](https://cs.uwaterloo.ca/research/tr/1986/CS-86-14.pdf) -/// 2. Emmanuel Goossaert. ["Robin Hood -/// hashing"](http://codecapsule.com/2013/11/11/robin-hood-hashing/) -/// 3. Emmanuel Goossaert. ["Robin Hood hashing: backward shift -/// deletion"](http://codecapsule.com/2013/11/17/robin-hood-hashing-backward-shift-deletion/) +/// [SwissTable]: https://abseil.io/blog/20180927-swisstables +/// [here]: https://github.com/abseil/abseil-cpp/blob/master/absl/container/internal/raw_hash_set.h +/// [CppCon talk]: https://www.youtube.com/watch?v=ncHmEUmJZf4 /// /// # Examples /// @@ -407,277 +203,7 @@ const DISPLACEMENT_THRESHOLD: usize = 128; #[derive(Clone)] #[stable(feature = "rust1", since = "1.0.0")] pub struct HashMap { - // All hashes are keyed on these values, to prevent hash collision attacks. - hash_builder: S, - - table: RawTable, - - resize_policy: DefaultResizePolicy, -} - -/// Search for a pre-hashed key. -/// If you don't already know the hash, use search or search_mut instead -#[inline] -fn search_hashed(table: M, hash: SafeHash, is_match: F) -> InternalEntry - where M: Deref>, - F: FnMut(&K) -> bool -{ - // This is the only function where capacity can be zero. To avoid - // undefined behavior when Bucket::new gets the raw bucket in this - // case, immediately return the appropriate search result. - if table.capacity() == 0 { - return InternalEntry::TableIsEmpty; - } - - search_hashed_nonempty(table, hash, is_match, true) -} - -/// Search for a pre-hashed key when the hash map is known to be non-empty. -#[inline] -fn search_hashed_nonempty(table: M, hash: SafeHash, mut is_match: F, - compare_hashes: bool) - -> InternalEntry - where M: Deref>, - F: FnMut(&K) -> bool -{ - // Do not check the capacity as an extra branch could slow the lookup. - - let size = table.size(); - let mut probe = Bucket::new(table, hash); - let mut displacement = 0; - - loop { - let full = match probe.peek() { - Empty(bucket) => { - // Found a hole! - return InternalEntry::Vacant { - hash, - elem: NoElem(bucket, displacement), - }; - } - Full(bucket) => bucket, - }; - - let probe_displacement = full.displacement(); - - if probe_displacement < displacement { - // Found a luckier bucket than me. - // We can finish the search early if we hit any bucket - // with a lower distance to initial bucket than we've probed. - return InternalEntry::Vacant { - hash, - elem: NeqElem(full, probe_displacement), - }; - } - - // If the hash doesn't match, it can't be this one.. - if !compare_hashes || hash == full.hash() { - // If the key doesn't match, it can't be this one.. - if is_match(full.read().0) { - return InternalEntry::Occupied { elem: full }; - } - } - displacement += 1; - probe = full.next(); - debug_assert!(displacement <= size); - } -} - -/// Same as `search_hashed_nonempty` but for mutable access. -#[inline] -fn search_hashed_nonempty_mut(table: M, hash: SafeHash, mut is_match: F, - compare_hashes: bool) - -> InternalEntry - where M: DerefMut>, - F: FnMut(&K) -> bool -{ - // Do not check the capacity as an extra branch could slow the lookup. - - let size = table.size(); - let mut probe = Bucket::new(table, hash); - let mut displacement = 0; - - loop { - let mut full = match probe.peek() { - Empty(bucket) => { - // Found a hole! - return InternalEntry::Vacant { - hash, - elem: NoElem(bucket, displacement), - }; - } - Full(bucket) => bucket, - }; - - let probe_displacement = full.displacement(); - - if probe_displacement < displacement { - // Found a luckier bucket than me. - // We can finish the search early if we hit any bucket - // with a lower distance to initial bucket than we've probed. - return InternalEntry::Vacant { - hash, - elem: NeqElem(full, probe_displacement), - }; - } - - // If the hash doesn't match, it can't be this one.. - if hash == full.hash() || !compare_hashes { - // If the key doesn't match, it can't be this one.. - if is_match(full.read_mut().0) { - return InternalEntry::Occupied { elem: full }; - } - } - displacement += 1; - probe = full.next(); - debug_assert!(displacement <= size); - } -} - -fn pop_internal(starting_bucket: FullBucketMut<'_, K, V>) - -> (K, V, &mut RawTable) -{ - let (empty, retkey, retval) = starting_bucket.take(); - let mut gap = match empty.gap_peek() { - Ok(b) => b, - Err(b) => return (retkey, retval, b.into_table()), - }; - - while gap.full().displacement() != 0 { - gap = match gap.shift() { - Ok(b) => b, - Err(b) => { - return (retkey, retval, b.into_table()); - }, - }; - } - - // Now we've done all our shifting. Return the value we grabbed earlier. - (retkey, retval, gap.into_table()) -} - -/// Performs robin hood bucket stealing at the given `bucket`. You must -/// also pass that bucket's displacement so we don't have to recalculate it. -/// -/// `hash`, `key`, and `val` are the elements to "robin hood" into the hashtable. -fn robin_hood<'a, K: 'a, V: 'a>(bucket: FullBucketMut<'a, K, V>, - mut displacement: usize, - mut hash: SafeHash, - mut key: K, - mut val: V) - -> FullBucketMut<'a, K, V> { - let size = bucket.table().size(); - let raw_capacity = bucket.table().capacity(); - // There can be at most `size - dib` buckets to displace, because - // in the worst case, there are `size` elements and we already are - // `displacement` buckets away from the initial one. - let idx_end = (bucket.index() + size - bucket.displacement()) % raw_capacity; - // Save the *starting point*. - let mut bucket = bucket.stash(); - - loop { - let (old_hash, old_key, old_val) = bucket.replace(hash, key, val); - hash = old_hash; - key = old_key; - val = old_val; - - loop { - displacement += 1; - let probe = bucket.next(); - debug_assert!(probe.index() != idx_end); - - let full_bucket = match probe.peek() { - Empty(bucket) => { - // Found a hole! - let bucket = bucket.put(hash, key, val); - // Now that it's stolen, just read the value's pointer - // right out of the table! Go back to the *starting point*. - // - // This use of `into_table` is misleading. It turns the - // bucket, which is a FullBucket on top of a - // FullBucketMut, into just one FullBucketMut. The "table" - // refers to the inner FullBucketMut in this context. - return bucket.into_table(); - } - Full(bucket) => bucket, - }; - - let probe_displacement = full_bucket.displacement(); - - bucket = full_bucket; - - // Robin hood! Steal the spot. - if probe_displacement < displacement { - displacement = probe_displacement; - break; - } - } - } -} - -impl HashMap - where K: Eq + Hash, - S: BuildHasher -{ - fn make_hash(&self, x: &X) -> SafeHash - where X: Hash - { - table::make_hash(&self.hash_builder, x) - } - - /// Search for a key, yielding the index if it's found in the hashtable. - /// If you already have the hash for the key lying around, or if you need an - /// InternalEntry, use search_hashed or search_hashed_nonempty. - #[inline] - fn search<'a, Q: ?Sized>(&'a self, q: &Q) - -> Option>> - where K: Borrow, - Q: Eq + Hash - { - if self.is_empty() { - return None; - } - - let hash = self.make_hash(q); - search_hashed_nonempty(&self.table, hash, |k| q.eq(k.borrow()), true) - .into_occupied_bucket() - } - - #[inline] - fn search_mut<'a, Q: ?Sized>(&'a mut self, q: &Q) - -> Option>> - where K: Borrow, - Q: Eq + Hash - { - if self.is_empty() { - return None; - } - - let hash = self.make_hash(q); - search_hashed_nonempty(&mut self.table, hash, |k| q.eq(k.borrow()), true) - .into_occupied_bucket() - } - - // The caller should ensure that invariants by Robin Hood Hashing hold - // and that there's space in the underlying table. - fn insert_hashed_ordered(&mut self, hash: SafeHash, k: K, v: V) { - let mut buckets = Bucket::new(&mut self.table, hash); - let start_index = buckets.index(); - - loop { - // We don't need to compare hashes for value swap. - // Not even DIBs for Robin Hood. - buckets = match buckets.peek() { - Empty(empty) => { - empty.put(hash, k, v); - return; - } - Full(b) => b.into_bucket(), - }; - buckets.next(); - debug_assert!(buckets.index() != start_index); - } - } + base: base::HashMap, } impl HashMap { @@ -732,13 +258,7 @@ impl HashMap { #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn capacity(&self) -> usize { - self.resize_policy.capacity(self.raw_capacity()) - } - - /// Returns the hash map's raw capacity. - #[inline] - fn raw_capacity(&self) -> usize { - self.table.capacity() + self.base.capacity() } /// An iterator visiting all keys in arbitrary order. @@ -831,7 +351,7 @@ impl HashMap { /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn iter(&self) -> Iter<'_, K, V> { - Iter { inner: self.table.iter() } + Iter { base: self.base.iter() } } /// An iterator visiting all key-value pairs in arbitrary order, @@ -859,7 +379,7 @@ impl HashMap { /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn iter_mut(&mut self) -> IterMut<'_, K, V> { - IterMut { inner: self.table.iter_mut() } + IterMut { base: self.base.iter_mut() } } /// Returns the number of elements in the map. @@ -876,7 +396,7 @@ impl HashMap { /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn len(&self) -> usize { - self.table.size() + self.base.len() } /// Returns `true` if the map contains no elements. @@ -894,7 +414,7 @@ impl HashMap { #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn is_empty(&self) -> bool { - self.len() == 0 + self.base.is_empty() } /// Clears the map, returning all key-value pairs as an iterator. Keeps the @@ -919,7 +439,7 @@ impl HashMap { #[inline] #[stable(feature = "drain", since = "1.6.0")] pub fn drain(&mut self) -> Drain<'_, K, V> { - Drain { inner: self.table.drain() } + Drain { base: self.base.drain() } } /// Clears the map, removing all key-value pairs. Keeps the allocated memory @@ -938,13 +458,14 @@ impl HashMap { #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn clear(&mut self) { - self.drain(); + self.base.clear(); } } impl HashMap - where K: Eq + Hash, - S: BuildHasher +where + K: Eq + Hash, + S: BuildHasher, { /// Creates an empty `HashMap` which will use the given hash builder to hash /// keys. @@ -970,9 +491,7 @@ impl HashMap #[stable(feature = "hashmap_build_hasher", since = "1.7.0")] pub fn with_hasher(hash_builder: S) -> HashMap { HashMap { - hash_builder, - resize_policy: DefaultResizePolicy::new(), - table: RawTable::new(0), + base: base::HashMap::with_hasher(hash_builder), } } @@ -1000,12 +519,8 @@ impl HashMap #[inline] #[stable(feature = "hashmap_build_hasher", since = "1.7.0")] pub fn with_capacity_and_hasher(capacity: usize, hash_builder: S) -> HashMap { - let resize_policy = DefaultResizePolicy::new(); - let raw_cap = resize_policy.raw_capacity(capacity); HashMap { - hash_builder, - resize_policy, - table: RawTable::new(raw_cap), + base: base::HashMap::with_capacity_and_hasher(capacity, hash_builder), } } @@ -1023,9 +538,10 @@ impl HashMap /// let map: HashMap = HashMap::with_hasher(hasher); /// let hasher: &RandomState = map.hasher(); /// ``` + #[inline] #[stable(feature = "hashmap_public_hasher", since = "1.9.0")] pub fn hasher(&self) -> &S { - &self.hash_builder + self.base.hasher() } /// Reserves capacity for at least `additional` more elements to be inserted @@ -1048,11 +564,7 @@ impl HashMap #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn reserve(&mut self, additional: usize) { - match self.reserve_internal(additional, Infallible) { - Err(CollectionAllocErr::CapacityOverflow) => panic!("capacity overflow"), - Err(CollectionAllocErr::AllocErr) => unreachable!(), - Ok(()) => { /* yay */ } - } + self.base.reserve(additional) } /// Tries to reserve capacity for at least `additional` more elements to be inserted @@ -1072,92 +584,12 @@ impl HashMap /// let mut map: HashMap<&str, isize> = HashMap::new(); /// map.try_reserve(10).expect("why is the test harness OOMing on 10 bytes?"); /// ``` - #[unstable(feature = "try_reserve", reason = "new API", issue="48043")] - pub fn try_reserve(&mut self, additional: usize) -> Result<(), CollectionAllocErr> { - self.reserve_internal(additional, Fallible) - } - #[inline] - fn reserve_internal(&mut self, additional: usize, fallibility: Fallibility) - -> Result<(), CollectionAllocErr> { - - let remaining = self.capacity() - self.len(); // this can't overflow - if remaining < additional { - let min_cap = self.len() - .checked_add(additional) - .ok_or(CollectionAllocErr::CapacityOverflow)?; - let raw_cap = self.resize_policy.try_raw_capacity(min_cap)?; - self.try_resize(raw_cap, fallibility)?; - } else if self.table.tag() && remaining <= self.len() { - // Probe sequence is too long and table is half full, - // resize early to reduce probing length. - let new_capacity = self.table.capacity() * 2; - self.try_resize(new_capacity, fallibility)?; - } - Ok(()) - } - - /// Resizes the internal vectors to a new capacity. It's your - /// responsibility to: - /// 1) Ensure `new_raw_cap` is enough for all the elements, accounting - /// for the load factor. - /// 2) Ensure `new_raw_cap` is a power of two or zero. - #[inline(never)] - #[cold] - fn try_resize( - &mut self, - new_raw_cap: usize, - fallibility: Fallibility, - ) -> Result<(), CollectionAllocErr> { - assert!(self.table.size() <= new_raw_cap); - assert!(new_raw_cap.is_power_of_two() || new_raw_cap == 0); - - let mut old_table = replace( - &mut self.table, - match fallibility { - Infallible => RawTable::new(new_raw_cap), - Fallible => RawTable::try_new(new_raw_cap)?, - } - ); - let old_size = old_table.size(); - - if old_table.size() == 0 { - return Ok(()); - } - - let mut bucket = Bucket::head_bucket(&mut old_table); - - // This is how the buckets might be laid out in memory: - // ($ marks an initialized bucket) - // ________________ - // |$$$_$$$$$$_$$$$$| - // - // But we've skipped the entire initial cluster of buckets - // and will continue iteration in this order: - // ________________ - // |$$$$$$_$$$$$ - // ^ wrap around once end is reached - // ________________ - // $$$_____________| - // ^ exit once table.size == 0 - loop { - bucket = match bucket.peek() { - Full(bucket) => { - let h = bucket.hash(); - let (b, k, v) = bucket.take(); - self.insert_hashed_ordered(h, k, v); - if b.table().size() == 0 { - break; - } - b.into_bucket() - } - Empty(b) => b.into_bucket(), - }; - bucket.next(); - } - - assert_eq!(self.table.size(), old_size); - Ok(()) + #[unstable(feature = "try_reserve", reason = "new API", issue = "48043")] + pub fn try_reserve(&mut self, additional: usize) -> Result<(), CollectionAllocErr> { + self.base + .try_reserve(additional) + .map_err(map_collection_alloc_err) } /// Shrinks the capacity of the map as much as possible. It will drop @@ -1176,20 +608,10 @@ impl HashMap /// map.shrink_to_fit(); /// assert!(map.capacity() >= 2); /// ``` + #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn shrink_to_fit(&mut self) { - let new_raw_cap = self.resize_policy.raw_capacity(self.len()); - if self.raw_capacity() != new_raw_cap { - let old_table = replace(&mut self.table, RawTable::new(new_raw_cap)); - let old_size = old_table.size(); - - // Shrink the table. Naive algorithm for resizing: - for (h, k, v) in old_table.into_iter() { - self.insert_hashed_nocheck(h, k, v); - } - - debug_assert_eq!(self.table.size(), old_size); - } + self.base.shrink_to_fit(); } /// Shrinks the capacity of the map with a lower limit. It will drop @@ -1214,40 +636,14 @@ impl HashMap /// map.shrink_to(0); /// assert!(map.capacity() >= 2); /// ``` - #[unstable(feature = "shrink_to", reason = "new API", issue="56431")] + #[inline] + #[unstable(feature = "shrink_to", reason = "new API", issue = "56431")] pub fn shrink_to(&mut self, min_capacity: usize) { - assert!(self.capacity() >= min_capacity, "Tried to shrink to a larger capacity"); - - let new_raw_cap = self.resize_policy.raw_capacity(max(self.len(), min_capacity)); - if self.raw_capacity() != new_raw_cap { - let old_table = replace(&mut self.table, RawTable::new(new_raw_cap)); - let old_size = old_table.size(); - - // Shrink the table. Naive algorithm for resizing: - for (h, k, v) in old_table.into_iter() { - self.insert_hashed_nocheck(h, k, v); - } - - debug_assert_eq!(self.table.size(), old_size); - } - } - - /// Insert a pre-hashed key-value pair, without first checking - /// that there's enough room in the buckets. Returns a reference to the - /// newly insert value. - /// - /// If the key already exists, the hashtable will be returned untouched - /// and a reference to the existing element will be returned. - fn insert_hashed_nocheck(&mut self, hash: SafeHash, k: K, v: V) -> Option { - let entry = search_hashed(&mut self.table, hash, |key| *key == k).into_entry(k); - match entry { - Some(Occupied(mut elem)) => Some(elem.insert(v)), - Some(Vacant(elem)) => { - elem.insert(v); - None - } - None => unreachable!(), - } + assert!( + self.capacity() >= min_capacity, + "Tried to shrink to a larger capacity" + ); + self.base.shrink_to(min_capacity); } /// Gets the given key's corresponding entry in the map for in-place manipulation. @@ -1269,13 +665,10 @@ impl HashMap /// assert_eq!(letters[&'u'], 1); /// assert_eq!(letters.get(&'y'), None); /// ``` + #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn entry(&mut self, key: K) -> Entry<'_, K, V> { - // Gotta resize now. - self.reserve(1); - let hash = self.make_hash(&key); - search_hashed(&mut self.table, hash, |q| q.eq(&key)) - .into_entry(key).expect("unreachable") + map_entry(self.base.rustc_entry(key)) } /// Returns a reference to the value corresponding to the key. @@ -1300,10 +693,11 @@ impl HashMap #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn get(&self, k: &Q) -> Option<&V> - where K: Borrow, - Q: Hash + Eq + where + K: Borrow, + Q: Hash + Eq, { - self.search(k).map(|bucket| bucket.into_refs().1) + self.base.get(k) } /// Returns the key-value pair corresponding to the supplied key. @@ -1327,11 +721,13 @@ impl HashMap /// assert_eq!(map.get_key_value(&2), None); /// ``` #[unstable(feature = "map_get_key_value", issue = "49347")] + #[inline] pub fn get_key_value(&self, k: &Q) -> Option<(&K, &V)> - where K: Borrow, - Q: Hash + Eq + where + K: Borrow, + Q: Hash + Eq, { - self.search(k).map(|bucket| bucket.into_refs()) + self.base.get_key_value(k) } /// Returns `true` if the map contains a value for the specified key. @@ -1354,11 +750,13 @@ impl HashMap /// assert_eq!(map.contains_key(&2), false); /// ``` #[stable(feature = "rust1", since = "1.0.0")] + #[inline] pub fn contains_key(&self, k: &Q) -> bool - where K: Borrow, - Q: Hash + Eq + where + K: Borrow, + Q: Hash + Eq, { - self.search(k).is_some() + self.base.contains_key(k) } /// Returns a mutable reference to the value corresponding to the key. @@ -1383,11 +781,13 @@ impl HashMap /// assert_eq!(map[&1], "b"); /// ``` #[stable(feature = "rust1", since = "1.0.0")] + #[inline] pub fn get_mut(&mut self, k: &Q) -> Option<&mut V> - where K: Borrow, - Q: Hash + Eq + where + K: Borrow, + Q: Hash + Eq, { - self.search_mut(k).map(|bucket| bucket.into_mut_refs().1) + self.base.get_mut(k) } /// Inserts a key-value pair into the map. @@ -1416,10 +816,9 @@ impl HashMap /// assert_eq!(map[&37], "c"); /// ``` #[stable(feature = "rust1", since = "1.0.0")] + #[inline] pub fn insert(&mut self, k: K, v: V) -> Option { - let hash = self.make_hash(&k); - self.reserve(1); - self.insert_hashed_nocheck(hash, k, v) + self.base.insert(k, v) } /// Removes a key from the map, returning the value at the key if the key @@ -1443,11 +842,13 @@ impl HashMap /// assert_eq!(map.remove(&1), None); /// ``` #[stable(feature = "rust1", since = "1.0.0")] + #[inline] pub fn remove(&mut self, k: &Q) -> Option - where K: Borrow, - Q: Hash + Eq + where + K: Borrow, + Q: Hash + Eq, { - self.search_mut(k).map(|bucket| pop_internal(bucket).1) + self.base.remove(k) } /// Removes a key from the map, returning the stored key and value if the @@ -1473,15 +874,13 @@ impl HashMap /// # } /// ``` #[stable(feature = "hash_map_remove_entry", since = "1.27.0")] + #[inline] pub fn remove_entry(&mut self, k: &Q) -> Option<(K, V)> - where K: Borrow, - Q: Hash + Eq + where + K: Borrow, + Q: Hash + Eq, { - self.search_mut(k) - .map(|bucket| { - let (k, v, _) = pop_internal(bucket); - (k, v) - }) + self.base.remove_entry(k) } /// Retains only the elements specified by the predicate. @@ -1498,45 +897,18 @@ impl HashMap /// assert_eq!(map.len(), 4); /// ``` #[stable(feature = "retain_hash_collection", since = "1.18.0")] - pub fn retain(&mut self, mut f: F) - where F: FnMut(&K, &mut V) -> bool + #[inline] + pub fn retain(&mut self, f: F) + where + F: FnMut(&K, &mut V) -> bool, { - if self.table.size() == 0 { - return; - } - let mut elems_left = self.table.size(); - let mut bucket = Bucket::head_bucket(&mut self.table); - bucket.prev(); - let start_index = bucket.index(); - while elems_left != 0 { - bucket = match bucket.peek() { - Full(mut full) => { - elems_left -= 1; - let should_remove = { - let (k, v) = full.read_mut(); - !f(k, v) - }; - if should_remove { - let prev_raw = full.raw(); - let (_, _, t) = pop_internal(full); - Bucket::new_from(prev_raw, t) - } else { - full.into_bucket() - } - }, - Empty(b) => { - b.into_bucket() - } - }; - bucket.prev(); // reverse iteration - debug_assert!(elems_left == 0 || bucket.index() != start_index); - } + self.base.retain(f) } } impl HashMap - where K: Eq + Hash, - S: BuildHasher +where + S: BuildHasher, { /// Creates a raw entry builder for the HashMap. /// @@ -1569,10 +941,9 @@ impl HashMap /// so that the map now contains keys which compare equal, search may start /// acting erratically, with two keys randomly masking each other. Implementations /// are free to assume this doesn't happen (within the limits of memory-safety). - #[inline(always)] + #[inline] #[unstable(feature = "hash_raw_entry", issue = "56167")] pub fn raw_entry_mut(&mut self) -> RawEntryBuilderMut<'_, K, V, S> { - self.reserve(1); RawEntryBuilderMut { map: self } } @@ -1591,6 +962,7 @@ impl HashMap /// `get` should be preferred. /// /// Immutable raw entries have very limited use; you might instead want `raw_entry_mut`. + #[inline] #[unstable(feature = "hash_raw_entry", issue = "56167")] pub fn raw_entry(&self) -> RawEntryBuilder<'_, K, V, S> { RawEntryBuilder { map: self } @@ -1599,32 +971,36 @@ impl HashMap #[stable(feature = "rust1", since = "1.0.0")] impl PartialEq for HashMap - where K: Eq + Hash, - V: PartialEq, - S: BuildHasher +where + K: Eq + Hash, + V: PartialEq, + S: BuildHasher, { fn eq(&self, other: &HashMap) -> bool { if self.len() != other.len() { return false; } - self.iter().all(|(key, value)| other.get(key).map_or(false, |v| *value == *v)) + self.iter() + .all(|(key, value)| other.get(key).map_or(false, |v| *value == *v)) } } #[stable(feature = "rust1", since = "1.0.0")] impl Eq for HashMap - where K: Eq + Hash, - V: Eq, - S: BuildHasher +where + K: Eq + Hash, + V: Eq, + S: BuildHasher, { } #[stable(feature = "rust1", since = "1.0.0")] impl Debug for HashMap - where K: Eq + Hash + Debug, - V: Debug, - S: BuildHasher +where + K: Eq + Hash + Debug, + V: Debug, + S: BuildHasher, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_map().entries(self.iter()).finish() @@ -1633,10 +1009,12 @@ impl Debug for HashMap #[stable(feature = "rust1", since = "1.0.0")] impl Default for HashMap - where K: Eq + Hash, - S: BuildHasher + Default +where + K: Eq + Hash, + S: BuildHasher + Default, { /// Creates an empty `HashMap`, with the `Default` value for the hasher. + #[inline] fn default() -> HashMap { HashMap::with_hasher(Default::default()) } @@ -1644,9 +1022,10 @@ impl Default for HashMap #[stable(feature = "rust1", since = "1.0.0")] impl Index<&Q> for HashMap - where K: Eq + Hash + Borrow, - Q: Eq + Hash, - S: BuildHasher +where + K: Eq + Hash + Borrow, + Q: Eq + Hash, + S: BuildHasher, { type Output = V; @@ -1670,23 +1049,24 @@ impl Index<&Q> for HashMap /// [`HashMap`]: struct.HashMap.html #[stable(feature = "rust1", since = "1.0.0")] pub struct Iter<'a, K: 'a, V: 'a> { - inner: table::Iter<'a, K, V>, + base: base::Iter<'a, K, V>, } // FIXME(#26925) Remove in favor of `#[derive(Clone)]` #[stable(feature = "rust1", since = "1.0.0")] impl Clone for Iter<'_, K, V> { + #[inline] fn clone(&self) -> Self { - Iter { inner: self.inner.clone() } + Iter { + base: self.base.clone(), + } } } #[stable(feature = "std_debug", since = "1.16.0")] impl fmt::Debug for Iter<'_, K, V> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_list() - .entries(self.clone()) - .finish() + f.debug_list().entries(self.clone()).finish() } } @@ -1699,7 +1079,17 @@ impl fmt::Debug for Iter<'_, K, V> { /// [`HashMap`]: struct.HashMap.html #[stable(feature = "rust1", since = "1.0.0")] pub struct IterMut<'a, K: 'a, V: 'a> { - inner: table::IterMut<'a, K, V>, + base: base::IterMut<'a, K, V>, +} + +impl<'a, K, V> IterMut<'a, K, V> { + /// Returns a iterator of references over the remaining items. + #[inline] + pub(super) fn iter(&self) -> Iter<'_, K, V> { + Iter { + base: self.base.rustc_iter(), + } + } } /// An owning iterator over the entries of a `HashMap`. @@ -1711,7 +1101,17 @@ pub struct IterMut<'a, K: 'a, V: 'a> { /// [`HashMap`]: struct.HashMap.html #[stable(feature = "rust1", since = "1.0.0")] pub struct IntoIter { - pub(super) inner: table::IntoIter, + base: base::IntoIter, +} + +impl IntoIter { + /// Returns a iterator of references over the remaining items. + #[inline] + pub(super) fn iter(&self) -> Iter<'_, K, V> { + Iter { + base: self.base.rustc_iter(), + } + } } /// An iterator over the keys of a `HashMap`. @@ -1729,17 +1129,18 @@ pub struct Keys<'a, K: 'a, V: 'a> { // FIXME(#26925) Remove in favor of `#[derive(Clone)]` #[stable(feature = "rust1", since = "1.0.0")] impl Clone for Keys<'_, K, V> { + #[inline] fn clone(&self) -> Self { - Keys { inner: self.inner.clone() } + Keys { + inner: self.inner.clone(), + } } } #[stable(feature = "std_debug", since = "1.16.0")] impl fmt::Debug for Keys<'_, K, V> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_list() - .entries(self.clone()) - .finish() + f.debug_list().entries(self.clone()).finish() } } @@ -1758,17 +1159,18 @@ pub struct Values<'a, K: 'a, V: 'a> { // FIXME(#26925) Remove in favor of `#[derive(Clone)]` #[stable(feature = "rust1", since = "1.0.0")] impl Clone for Values<'_, K, V> { + #[inline] fn clone(&self) -> Self { - Values { inner: self.inner.clone() } + Values { + inner: self.inner.clone(), + } } } #[stable(feature = "std_debug", since = "1.16.0")] impl fmt::Debug for Values<'_, K, V> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_list() - .entries(self.clone()) - .finish() + f.debug_list().entries(self.clone()).finish() } } @@ -1781,7 +1183,17 @@ impl fmt::Debug for Values<'_, K, V> { /// [`HashMap`]: struct.HashMap.html #[stable(feature = "drain", since = "1.6.0")] pub struct Drain<'a, K: 'a, V: 'a> { - pub(super) inner: table::Drain<'a, K, V>, + base: base::Drain<'a, K, V>, +} + +impl<'a, K, V> Drain<'a, K, V> { + /// Returns a iterator of references over the remaining items. + #[inline] + pub(super) fn iter(&self) -> Iter<'_, K, V> { + Iter { + base: self.base.rustc_iter(), + } + } } /// A mutable iterator over the values of a `HashMap`. @@ -1796,47 +1208,6 @@ pub struct ValuesMut<'a, K: 'a, V: 'a> { inner: IterMut<'a, K, V>, } -enum InternalEntry { - Occupied { elem: FullBucket }, - Vacant { - hash: SafeHash, - elem: VacantEntryState, - }, - TableIsEmpty, -} - -impl InternalEntry { - #[inline] - fn into_occupied_bucket(self) -> Option> { - match self { - InternalEntry::Occupied { elem } => Some(elem), - _ => None, - } - } -} - -impl<'a, K, V> InternalEntry> { - #[inline] - fn into_entry(self, key: K) -> Option> { - match self { - InternalEntry::Occupied { elem } => { - Some(Occupied(OccupiedEntry { - key: Some(key), - elem, - })) - } - InternalEntry::Vacant { hash, elem } => { - Some(Vacant(VacantEntry { - hash, - key, - elem, - })) - } - InternalEntry::TableIsEmpty => None, - } - } -} - /// A builder for computing where in a HashMap a key-value pair would be stored. /// /// See the [`HashMap::raw_entry_mut`] docs for usage examples. @@ -1871,7 +1242,7 @@ pub enum RawEntryMut<'a, K: 'a, V: 'a, S: 'a> { /// [`RawEntryMut`]: enum.RawEntryMut.html #[unstable(feature = "hash_raw_entry", issue = "56167")] pub struct RawOccupiedEntryMut<'a, K: 'a, V: 'a> { - elem: FullBucket>, + base: base::RawOccupiedEntryMut<'a, K, V>, } /// A view into a vacant entry in a `HashMap`. @@ -1880,8 +1251,7 @@ pub struct RawOccupiedEntryMut<'a, K: 'a, V: 'a> { /// [`RawEntryMut`]: enum.RawEntryMut.html #[unstable(feature = "hash_raw_entry", issue = "56167")] pub struct RawVacantEntryMut<'a, K: 'a, V: 'a, S: 'a> { - elem: VacantEntryState>, - hash_builder: &'a S, + base: base::RawVacantEntryMut<'a, K, V, S>, } /// A builder for computing where in a HashMap a key-value pair would be stored. @@ -1895,128 +1265,81 @@ pub struct RawEntryBuilder<'a, K: 'a, V: 'a, S: 'a> { } impl<'a, K, V, S> RawEntryBuilderMut<'a, K, V, S> - where S: BuildHasher, - K: Eq + Hash, +where + S: BuildHasher, { /// Creates a `RawEntryMut` from the given key. + #[inline] #[unstable(feature = "hash_raw_entry", issue = "56167")] pub fn from_key(self, k: &Q) -> RawEntryMut<'a, K, V, S> - where K: Borrow, - Q: Hash + Eq + where + K: Borrow, + Q: Hash + Eq, { - let mut hasher = self.map.hash_builder.build_hasher(); - k.hash(&mut hasher); - self.from_key_hashed_nocheck(hasher.finish(), k) + map_raw_entry(self.map.base.raw_entry_mut().from_key(k)) } /// Creates a `RawEntryMut` from the given key and its hash. #[inline] #[unstable(feature = "hash_raw_entry", issue = "56167")] pub fn from_key_hashed_nocheck(self, hash: u64, k: &Q) -> RawEntryMut<'a, K, V, S> - where K: Borrow, - Q: Eq + where + K: Borrow, + Q: Eq, { - self.from_hash(hash, |q| q.borrow().eq(k)) + map_raw_entry( + self.map + .base + .raw_entry_mut() + .from_key_hashed_nocheck(hash, k), + ) } - #[inline] - fn search(self, hash: u64, is_match: F, compare_hashes: bool) -> RawEntryMut<'a, K, V, S> - where for<'b> F: FnMut(&'b K) -> bool, - { - match search_hashed_nonempty_mut(&mut self.map.table, - SafeHash::new(hash), - is_match, - compare_hashes) { - InternalEntry::Occupied { elem } => { - RawEntryMut::Occupied(RawOccupiedEntryMut { elem }) - } - InternalEntry::Vacant { elem, .. } => { - RawEntryMut::Vacant(RawVacantEntryMut { - elem, - hash_builder: &self.map.hash_builder, - }) - } - InternalEntry::TableIsEmpty => { - unreachable!() - } - } - } /// Creates a `RawEntryMut` from the given hash. #[inline] #[unstable(feature = "hash_raw_entry", issue = "56167")] pub fn from_hash(self, hash: u64, is_match: F) -> RawEntryMut<'a, K, V, S> - where for<'b> F: FnMut(&'b K) -> bool, + where + for<'b> F: FnMut(&'b K) -> bool, { - self.search(hash, is_match, true) - } - - /// Search possible locations for an element with hash `hash` until `is_match` returns true for - /// one of them. There is no guarantee that all keys passed to `is_match` will have the provided - /// hash. - #[unstable(feature = "hash_raw_entry", issue = "56167")] - pub fn search_bucket(self, hash: u64, is_match: F) -> RawEntryMut<'a, K, V, S> - where for<'b> F: FnMut(&'b K) -> bool, - { - self.search(hash, is_match, false) + map_raw_entry(self.map.base.raw_entry_mut().from_hash(hash, is_match)) } } impl<'a, K, V, S> RawEntryBuilder<'a, K, V, S> - where S: BuildHasher, +where + S: BuildHasher, { /// Access an entry by key. + #[inline] #[unstable(feature = "hash_raw_entry", issue = "56167")] pub fn from_key(self, k: &Q) -> Option<(&'a K, &'a V)> - where K: Borrow, - Q: Hash + Eq + where + K: Borrow, + Q: Hash + Eq, { - let mut hasher = self.map.hash_builder.build_hasher(); - k.hash(&mut hasher); - self.from_key_hashed_nocheck(hasher.finish(), k) + self.map.base.raw_entry().from_key(k) } /// Access an entry by a key and its hash. + #[inline] #[unstable(feature = "hash_raw_entry", issue = "56167")] pub fn from_key_hashed_nocheck(self, hash: u64, k: &Q) -> Option<(&'a K, &'a V)> - where K: Borrow, - Q: Hash + Eq - + where + K: Borrow, + Q: Hash + Eq, { - self.from_hash(hash, |q| q.borrow().eq(k)) - } - - fn search(self, hash: u64, is_match: F, compare_hashes: bool) -> Option<(&'a K, &'a V)> - where F: FnMut(&K) -> bool - { - if unsafe { unlikely(self.map.table.size() == 0) } { - return None; - } - match search_hashed_nonempty(&self.map.table, - SafeHash::new(hash), - is_match, - compare_hashes) { - InternalEntry::Occupied { elem } => Some(elem.into_refs()), - InternalEntry::Vacant { .. } => None, - InternalEntry::TableIsEmpty => unreachable!(), - } + self.map.base.raw_entry().from_key_hashed_nocheck(hash, k) } /// Access an entry by hash. + #[inline] #[unstable(feature = "hash_raw_entry", issue = "56167")] pub fn from_hash(self, hash: u64, is_match: F) -> Option<(&'a K, &'a V)> - where F: FnMut(&K) -> bool - { - self.search(hash, is_match, true) - } - - /// Search possible locations for an element with hash `hash` until `is_match` returns true for - /// one of them. There is no guarantee that all keys passed to `is_match` will have the provided - /// hash. - #[unstable(feature = "hash_raw_entry", issue = "56167")] - pub fn search_bucket(self, hash: u64, is_match: F) -> Option<(&'a K, &'a V)> - where F: FnMut(&K) -> bool + where + F: FnMut(&K) -> bool, { - self.search(hash, is_match, false) + self.map.base.raw_entry().from_hash(hash, is_match) } } @@ -2038,10 +1361,12 @@ impl<'a, K, V, S> RawEntryMut<'a, K, V, S> { /// *map.raw_entry_mut().from_key("poneyland").or_insert("poneyland", 10).1 *= 2; /// assert_eq!(map["poneyland"], 6); /// ``` + #[inline] #[unstable(feature = "hash_raw_entry", issue = "56167")] pub fn or_insert(self, default_key: K, default_val: V) -> (&'a mut K, &'a mut V) - where K: Hash, - S: BuildHasher, + where + K: Hash, + S: BuildHasher, { match self { RawEntryMut::Occupied(entry) => entry.into_key_value(), @@ -2066,11 +1391,13 @@ impl<'a, K, V, S> RawEntryMut<'a, K, V, S> { /// /// assert_eq!(map["poneyland"], "hoho".to_string()); /// ``` + #[inline] #[unstable(feature = "hash_raw_entry", issue = "56167")] pub fn or_insert_with(self, default: F) -> (&'a mut K, &'a mut V) - where F: FnOnce() -> (K, V), - K: Hash, - S: BuildHasher, + where + F: FnOnce() -> (K, V), + K: Hash, + S: BuildHasher, { match self { RawEntryMut::Occupied(entry) => entry.into_key_value(), @@ -2104,9 +1431,11 @@ impl<'a, K, V, S> RawEntryMut<'a, K, V, S> { /// .or_insert("poneyland", 0); /// assert_eq!(map["poneyland"], 43); /// ``` + #[inline] #[unstable(feature = "hash_raw_entry", issue = "56167")] pub fn and_modify(self, f: F) -> Self - where F: FnOnce(&mut K, &mut V) + where + F: FnOnce(&mut K, &mut V), { match self { RawEntryMut::Occupied(mut entry) => { @@ -2115,7 +1444,7 @@ impl<'a, K, V, S> RawEntryMut<'a, K, V, S> { f(k, v); } RawEntryMut::Occupied(entry) - }, + } RawEntryMut::Vacant(entry) => RawEntryMut::Vacant(entry), } } @@ -2123,130 +1452,130 @@ impl<'a, K, V, S> RawEntryMut<'a, K, V, S> { impl<'a, K, V> RawOccupiedEntryMut<'a, K, V> { /// Gets a reference to the key in the entry. + #[inline] #[unstable(feature = "hash_raw_entry", issue = "56167")] pub fn key(&self) -> &K { - self.elem.read().0 + self.base.key() } /// Gets a mutable reference to the key in the entry. + #[inline] #[unstable(feature = "hash_raw_entry", issue = "56167")] pub fn key_mut(&mut self) -> &mut K { - self.elem.read_mut().0 + self.base.key_mut() } /// Converts the entry into a mutable reference to the key in the entry /// with a lifetime bound to the map itself. + #[inline] #[unstable(feature = "hash_raw_entry", issue = "56167")] pub fn into_key(self) -> &'a mut K { - self.elem.into_mut_refs().0 + self.base.into_key() } /// Gets a reference to the value in the entry. + #[inline] #[unstable(feature = "hash_raw_entry", issue = "56167")] pub fn get(&self) -> &V { - self.elem.read().1 + self.base.get() } /// Converts the OccupiedEntry into a mutable reference to the value in the entry /// with a lifetime bound to the map itself. + #[inline] #[unstable(feature = "hash_raw_entry", issue = "56167")] pub fn into_mut(self) -> &'a mut V { - self.elem.into_mut_refs().1 + self.base.into_mut() } /// Gets a mutable reference to the value in the entry. + #[inline] #[unstable(feature = "hash_raw_entry", issue = "56167")] pub fn get_mut(&mut self) -> &mut V { - self.elem.read_mut().1 + self.base.get_mut() } /// Gets a reference to the key and value in the entry. + #[inline] #[unstable(feature = "hash_raw_entry", issue = "56167")] pub fn get_key_value(&mut self) -> (&K, &V) { - self.elem.read() + self.base.get_key_value() } /// Gets a mutable reference to the key and value in the entry. + #[inline] #[unstable(feature = "hash_raw_entry", issue = "56167")] pub fn get_key_value_mut(&mut self) -> (&mut K, &mut V) { - self.elem.read_mut() + self.base.get_key_value_mut() } /// Converts the OccupiedEntry into a mutable reference to the key and value in the entry /// with a lifetime bound to the map itself. + #[inline] #[unstable(feature = "hash_raw_entry", issue = "56167")] pub fn into_key_value(self) -> (&'a mut K, &'a mut V) { - self.elem.into_mut_refs() + self.base.into_key_value() } /// Sets the value of the entry, and returns the entry's old value. + #[inline] #[unstable(feature = "hash_raw_entry", issue = "56167")] pub fn insert(&mut self, value: V) -> V { - mem::replace(self.get_mut(), value) + self.base.insert(value) } /// Sets the value of the entry, and returns the entry's old value. + #[inline] #[unstable(feature = "hash_raw_entry", issue = "56167")] pub fn insert_key(&mut self, key: K) -> K { - mem::replace(self.key_mut(), key) + self.base.insert_key(key) } /// Takes the value out of the entry, and returns it. + #[inline] #[unstable(feature = "hash_raw_entry", issue = "56167")] pub fn remove(self) -> V { - pop_internal(self.elem).1 + self.base.remove() } /// Take the ownership of the key and value from the map. + #[inline] #[unstable(feature = "hash_raw_entry", issue = "56167")] pub fn remove_entry(self) -> (K, V) { - let (k, v, _) = pop_internal(self.elem); - (k, v) + self.base.remove_entry() } } impl<'a, K, V, S> RawVacantEntryMut<'a, K, V, S> { /// Sets the value of the entry with the VacantEntry's key, /// and returns a mutable reference to it. + #[inline] #[unstable(feature = "hash_raw_entry", issue = "56167")] pub fn insert(self, key: K, value: V) -> (&'a mut K, &'a mut V) - where K: Hash, - S: BuildHasher, + where + K: Hash, + S: BuildHasher, { - let mut hasher = self.hash_builder.build_hasher(); - key.hash(&mut hasher); - self.insert_hashed_nocheck(hasher.finish(), key, value) + self.base.insert(key, value) } /// Sets the value of the entry with the VacantEntry's key, /// and returns a mutable reference to it. #[inline] #[unstable(feature = "hash_raw_entry", issue = "56167")] - pub fn insert_hashed_nocheck(self, hash: u64, key: K, value: V) -> (&'a mut K, &'a mut V) { - let hash = SafeHash::new(hash); - let b = match self.elem { - NeqElem(mut bucket, disp) => { - if disp >= DISPLACEMENT_THRESHOLD { - bucket.table_mut().set_tag(true); - } - robin_hood(bucket, disp, hash, key, value) - }, - NoElem(mut bucket, disp) => { - if disp >= DISPLACEMENT_THRESHOLD { - bucket.table_mut().set_tag(true); - } - bucket.put(hash, key, value) - }, - }; - b.into_mut_refs() + pub fn insert_hashed_nocheck(self, hash: u64, key: K, value: V) -> (&'a mut K, &'a mut V) + where + K: Hash, + S: BuildHasher, + { + self.base.insert_hashed_nocheck(hash, key, value) } } #[unstable(feature = "hash_raw_entry", issue = "56167")] impl Debug for RawEntryBuilderMut<'_, K, V, S> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("RawEntryBuilder") - .finish() + f.debug_struct("RawEntryBuilder").finish() } } @@ -2254,16 +1583,8 @@ impl Debug for RawEntryBuilderMut<'_, K, V, S> { impl Debug for RawEntryMut<'_, K, V, S> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match *self { - RawEntryMut::Vacant(ref v) => { - f.debug_tuple("RawEntry") - .field(v) - .finish() - } - RawEntryMut::Occupied(ref o) => { - f.debug_tuple("RawEntry") - .field(o) - .finish() - } + RawEntryMut::Vacant(ref v) => f.debug_tuple("RawEntry").field(v).finish(), + RawEntryMut::Occupied(ref o) => f.debug_tuple("RawEntry").field(o).finish(), } } } @@ -2272,25 +1593,23 @@ impl Debug for RawEntryMut<'_, K, V, S> { impl Debug for RawOccupiedEntryMut<'_, K, V> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("RawOccupiedEntryMut") - .field("key", self.key()) - .field("value", self.get()) - .finish() + .field("key", self.key()) + .field("value", self.get()) + .finish() } } #[unstable(feature = "hash_raw_entry", issue = "56167")] impl Debug for RawVacantEntryMut<'_, K, V, S> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("RawVacantEntryMut") - .finish() + f.debug_struct("RawVacantEntryMut").finish() } } #[unstable(feature = "hash_raw_entry", issue = "56167")] impl Debug for RawEntryBuilder<'_, K, V, S> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("RawEntryBuilder") - .finish() + f.debug_struct("RawEntryBuilder").finish() } } @@ -2304,29 +1623,19 @@ impl Debug for RawEntryBuilder<'_, K, V, S> { pub enum Entry<'a, K: 'a, V: 'a> { /// An occupied entry. #[stable(feature = "rust1", since = "1.0.0")] - Occupied(#[stable(feature = "rust1", since = "1.0.0")] - OccupiedEntry<'a, K, V>), + Occupied(#[stable(feature = "rust1", since = "1.0.0")] OccupiedEntry<'a, K, V>), /// A vacant entry. #[stable(feature = "rust1", since = "1.0.0")] - Vacant(#[stable(feature = "rust1", since = "1.0.0")] - VacantEntry<'a, K, V>), + Vacant(#[stable(feature = "rust1", since = "1.0.0")] VacantEntry<'a, K, V>), } -#[stable(feature= "debug_hash_map", since = "1.12.0")] +#[stable(feature = "debug_hash_map", since = "1.12.0")] impl Debug for Entry<'_, K, V> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match *self { - Vacant(ref v) => { - f.debug_tuple("Entry") - .field(v) - .finish() - } - Occupied(ref o) => { - f.debug_tuple("Entry") - .field(o) - .finish() - } + Vacant(ref v) => f.debug_tuple("Entry").field(v).finish(), + Occupied(ref o) => f.debug_tuple("Entry").field(o).finish(), } } } @@ -2337,16 +1646,10 @@ impl Debug for Entry<'_, K, V> { /// [`Entry`]: enum.Entry.html #[stable(feature = "rust1", since = "1.0.0")] pub struct OccupiedEntry<'a, K: 'a, V: 'a> { - key: Option, - elem: FullBucket>, + base: base::RustcOccupiedEntry<'a, K, V>, } -#[stable(feature = "rust1", since = "1.0.0")] -unsafe impl<'a, K: 'a + Send, V: 'a + Send> Send for OccupiedEntry<'a, K, V> {} -#[stable(feature = "rust1", since = "1.0.0")] -unsafe impl<'a, K: 'a + Sync, V: 'a + Sync> Sync for OccupiedEntry<'a, K, V> {} - -#[stable(feature= "debug_hash_map", since = "1.12.0")] +#[stable(feature = "debug_hash_map", since = "1.12.0")] impl Debug for OccupiedEntry<'_, K, V> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("OccupiedEntry") @@ -2362,39 +1665,22 @@ impl Debug for OccupiedEntry<'_, K, V> { /// [`Entry`]: enum.Entry.html #[stable(feature = "rust1", since = "1.0.0")] pub struct VacantEntry<'a, K: 'a, V: 'a> { - hash: SafeHash, - key: K, - elem: VacantEntryState>, + base: base::RustcVacantEntry<'a, K, V>, } -#[stable(feature = "rust1", since = "1.0.0")] -unsafe impl<'a, K: 'a + Send, V: 'a + Send> Send for VacantEntry<'a, K, V> {} -#[stable(feature = "rust1", since = "1.0.0")] -unsafe impl<'a, K: 'a + Sync, V: 'a + Sync> Sync for VacantEntry<'a, K, V> {} - -#[stable(feature= "debug_hash_map", since = "1.12.0")] +#[stable(feature = "debug_hash_map", since = "1.12.0")] impl Debug for VacantEntry<'_, K, V> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_tuple("VacantEntry") - .field(self.key()) - .finish() + f.debug_tuple("VacantEntry").field(self.key()).finish() } } -/// Possible states of a VacantEntry. -enum VacantEntryState { - /// The index is occupied, but the key to insert has precedence, - /// and will kick the current one out on insertion. - NeqElem(FullBucket, usize), - /// The index is genuinely vacant. - NoElem(EmptyBucket, usize), -} - #[stable(feature = "rust1", since = "1.0.0")] impl<'a, K, V, S> IntoIterator for &'a HashMap { type Item = (&'a K, &'a V); type IntoIter = Iter<'a, K, V>; + #[inline] fn into_iter(self) -> Iter<'a, K, V> { self.iter() } @@ -2405,6 +1691,7 @@ impl<'a, K, V, S> IntoIterator for &'a mut HashMap { type Item = (&'a K, &'a mut V); type IntoIter = IterMut<'a, K, V>; + #[inline] fn into_iter(self) -> IterMut<'a, K, V> { self.iter_mut() } @@ -2432,8 +1719,11 @@ impl IntoIterator for HashMap { /// // Not possible with .iter() /// let vec: Vec<(&str, i32)> = map.into_iter().collect(); /// ``` + #[inline] fn into_iter(self) -> IntoIter { - IntoIter { inner: self.table.into_iter() } + IntoIter { + base: self.base.into_iter(), + } } } @@ -2443,18 +1733,18 @@ impl<'a, K, V> Iterator for Iter<'a, K, V> { #[inline] fn next(&mut self) -> Option<(&'a K, &'a V)> { - self.inner.next() + self.base.next() } #[inline] fn size_hint(&self) -> (usize, Option) { - self.inner.size_hint() + self.base.size_hint() } } #[stable(feature = "rust1", since = "1.0.0")] impl ExactSizeIterator for Iter<'_, K, V> { #[inline] fn len(&self) -> usize { - self.inner.len() + self.base.len() } } @@ -2467,18 +1757,18 @@ impl<'a, K, V> Iterator for IterMut<'a, K, V> { #[inline] fn next(&mut self) -> Option<(&'a K, &'a mut V)> { - self.inner.next() + self.base.next() } #[inline] fn size_hint(&self) -> (usize, Option) { - self.inner.size_hint() + self.base.size_hint() } } #[stable(feature = "rust1", since = "1.0.0")] impl ExactSizeIterator for IterMut<'_, K, V> { #[inline] fn len(&self) -> usize { - self.inner.len() + self.base.len() } } #[stable(feature = "fused", since = "1.26.0")] @@ -2486,13 +1776,12 @@ impl FusedIterator for IterMut<'_, K, V> {} #[stable(feature = "std_debug", since = "1.16.0")] impl fmt::Debug for IterMut<'_, K, V> - where K: fmt::Debug, - V: fmt::Debug, +where + K: fmt::Debug, + V: fmt::Debug, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_list() - .entries(self.inner.iter()) - .finish() + f.debug_list().entries(self.iter()).finish() } } @@ -2502,18 +1791,18 @@ impl Iterator for IntoIter { #[inline] fn next(&mut self) -> Option<(K, V)> { - self.inner.next().map(|(_, k, v)| (k, v)) + self.base.next() } #[inline] fn size_hint(&self) -> (usize, Option) { - self.inner.size_hint() + self.base.size_hint() } } #[stable(feature = "rust1", since = "1.0.0")] impl ExactSizeIterator for IntoIter { #[inline] fn len(&self) -> usize { - self.inner.len() + self.base.len() } } #[stable(feature = "fused", since = "1.26.0")] @@ -2522,9 +1811,7 @@ impl FusedIterator for IntoIter {} #[stable(feature = "std_debug", since = "1.16.0")] impl fmt::Debug for IntoIter { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_list() - .entries(self.inner.iter()) - .finish() + f.debug_list().entries(self.iter()).finish() } } @@ -2599,13 +1886,12 @@ impl FusedIterator for ValuesMut<'_, K, V> {} #[stable(feature = "std_debug", since = "1.16.0")] impl fmt::Debug for ValuesMut<'_, K, V> - where K: fmt::Debug, - V: fmt::Debug, +where + K: fmt::Debug, + V: fmt::Debug, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_list() - .entries(self.inner.inner.iter()) - .finish() + f.debug_list().entries(self.inner.iter()).finish() } } @@ -2615,18 +1901,18 @@ impl<'a, K, V> Iterator for Drain<'a, K, V> { #[inline] fn next(&mut self) -> Option<(K, V)> { - self.inner.next().map(|(_, k, v)| (k, v)) + self.base.next() } #[inline] fn size_hint(&self) -> (usize, Option) { - self.inner.size_hint() + self.base.size_hint() } } #[stable(feature = "drain", since = "1.6.0")] impl ExactSizeIterator for Drain<'_, K, V> { #[inline] fn len(&self) -> usize { - self.inner.len() + self.base.len() } } #[stable(feature = "fused", since = "1.26.0")] @@ -2634,13 +1920,12 @@ impl FusedIterator for Drain<'_, K, V> {} #[stable(feature = "std_debug", since = "1.16.0")] impl fmt::Debug for Drain<'_, K, V> - where K: fmt::Debug, - V: fmt::Debug, +where + K: fmt::Debug, + V: fmt::Debug, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_list() - .entries(self.inner.iter()) - .finish() + f.debug_list().entries(self.iter()).finish() } } @@ -2662,6 +1947,7 @@ impl<'a, K, V> Entry<'a, K, V> { /// *map.entry("poneyland").or_insert(10) *= 2; /// assert_eq!(map["poneyland"], 6); /// ``` + #[inline] pub fn or_insert(self, default: V) -> &'a mut V { match self { Occupied(entry) => entry.into_mut(), @@ -2685,6 +1971,7 @@ impl<'a, K, V> Entry<'a, K, V> { /// /// assert_eq!(map["poneyland"], "hoho".to_string()); /// ``` + #[inline] pub fn or_insert_with V>(self, default: F) -> &'a mut V { match self { Occupied(entry) => entry.into_mut(), @@ -2702,6 +1989,7 @@ impl<'a, K, V> Entry<'a, K, V> { /// let mut map: HashMap<&str, u32> = HashMap::new(); /// assert_eq!(map.entry("poneyland").key(), &"poneyland"); /// ``` + #[inline] #[stable(feature = "map_entry_keys", since = "1.10.0")] pub fn key(&self) -> &K { match *self { @@ -2730,19 +2018,20 @@ impl<'a, K, V> Entry<'a, K, V> { /// .or_insert(42); /// assert_eq!(map["poneyland"], 43); /// ``` + #[inline] #[stable(feature = "entry_and_modify", since = "1.26.0")] pub fn and_modify(self, f: F) -> Self - where F: FnOnce(&mut V) + where + F: FnOnce(&mut V), { match self { Occupied(mut entry) => { f(entry.get_mut()); Occupied(entry) - }, + } Vacant(entry) => Vacant(entry), } } - } impl<'a, K, V: Default> Entry<'a, K, V> { @@ -2762,6 +2051,7 @@ impl<'a, K, V: Default> Entry<'a, K, V> { /// assert_eq!(map["poneyland"], None); /// # } /// ``` + #[inline] pub fn or_default(self) -> &'a mut V { match self { Occupied(entry) => entry.into_mut(), @@ -2782,9 +2072,10 @@ impl<'a, K, V> OccupiedEntry<'a, K, V> { /// map.entry("poneyland").or_insert(12); /// assert_eq!(map.entry("poneyland").key(), &"poneyland"); /// ``` + #[inline] #[stable(feature = "map_entry_keys", since = "1.10.0")] pub fn key(&self) -> &K { - self.elem.read().0 + self.base.key() } /// Take the ownership of the key and value from the map. @@ -2805,10 +2096,10 @@ impl<'a, K, V> OccupiedEntry<'a, K, V> { /// /// assert_eq!(map.contains_key("poneyland"), false); /// ``` + #[inline] #[stable(feature = "map_entry_recover_keys2", since = "1.12.0")] pub fn remove_entry(self) -> (K, V) { - let (k, v, _) = pop_internal(self.elem); - (k, v) + self.base.remove_entry() } /// Gets a reference to the value in the entry. @@ -2826,9 +2117,10 @@ impl<'a, K, V> OccupiedEntry<'a, K, V> { /// assert_eq!(o.get(), &12); /// } /// ``` + #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn get(&self) -> &V { - self.elem.read().1 + self.base.get() } /// Gets a mutable reference to the value in the entry. @@ -2858,9 +2150,10 @@ impl<'a, K, V> OccupiedEntry<'a, K, V> { /// /// assert_eq!(map["poneyland"], 24); /// ``` + #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn get_mut(&mut self) -> &mut V { - self.elem.read_mut().1 + self.base.get_mut() } /// Converts the OccupiedEntry into a mutable reference to the value in the entry @@ -2886,9 +2179,10 @@ impl<'a, K, V> OccupiedEntry<'a, K, V> { /// /// assert_eq!(map["poneyland"], 22); /// ``` + #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn into_mut(self) -> &'a mut V { - self.elem.into_mut_refs().1 + self.base.into_mut() } /// Sets the value of the entry, and returns the entry's old value. @@ -2908,11 +2202,10 @@ impl<'a, K, V> OccupiedEntry<'a, K, V> { /// /// assert_eq!(map["poneyland"], 15); /// ``` + #[inline] #[stable(feature = "rust1", since = "1.0.0")] - pub fn insert(&mut self, mut value: V) -> V { - let old_value = self.get_mut(); - mem::swap(&mut value, old_value); - value + pub fn insert(&mut self, value: V) -> V { + self.base.insert(value) } /// Takes the value out of the entry, and returns it. @@ -2932,9 +2225,10 @@ impl<'a, K, V> OccupiedEntry<'a, K, V> { /// /// assert_eq!(map.contains_key("poneyland"), false); /// ``` + #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn remove(self) -> V { - pop_internal(self.elem).1 + self.base.remove() } /// Replaces the entry, returning the old key and value. The new key in the hash map will be @@ -2958,14 +2252,10 @@ impl<'a, K, V> OccupiedEntry<'a, K, V> { /// } /// /// ``` + #[inline] #[unstable(feature = "map_entry_replace", issue = "44286")] - pub fn replace_entry(mut self, value: V) -> (K, V) { - let (old_key, old_value) = self.elem.read_mut(); - - let old_key = mem::replace(old_key, self.key.unwrap()); - let old_value = mem::replace(old_value, value); - - (old_key, old_value) + pub fn replace_entry(self, value: V) -> (K, V) { + self.base.replace_entry(value) } /// Replaces the key in the hash map with the key used to create this entry. @@ -2993,10 +2283,10 @@ impl<'a, K, V> OccupiedEntry<'a, K, V> { /// } /// } /// ``` + #[inline] #[unstable(feature = "map_entry_replace", issue = "44286")] - pub fn replace_key(mut self) -> K { - let (old_key, _) = self.elem.read_mut(); - mem::replace(old_key, self.key.unwrap()) + pub fn replace_key(self) -> K { + self.base.replace_key() } } @@ -3012,9 +2302,10 @@ impl<'a, K: 'a, V: 'a> VacantEntry<'a, K, V> { /// let mut map: HashMap<&str, u32> = HashMap::new(); /// assert_eq!(map.entry("poneyland").key(), &"poneyland"); /// ``` + #[inline] #[stable(feature = "map_entry_keys", since = "1.10.0")] pub fn key(&self) -> &K { - &self.key + self.base.key() } /// Take ownership of the key. @@ -3031,9 +2322,10 @@ impl<'a, K: 'a, V: 'a> VacantEntry<'a, K, V> { /// v.into_key(); /// } /// ``` + #[inline] #[stable(feature = "map_entry_recover_keys2", since = "1.12.0")] pub fn into_key(self) -> K { - self.key + self.base.into_key() } /// Sets the value of the entry with the VacantEntry's key, @@ -3052,30 +2344,18 @@ impl<'a, K: 'a, V: 'a> VacantEntry<'a, K, V> { /// } /// assert_eq!(map["poneyland"], 37); /// ``` + #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn insert(self, value: V) -> &'a mut V { - let b = match self.elem { - NeqElem(mut bucket, disp) => { - if disp >= DISPLACEMENT_THRESHOLD { - bucket.table_mut().set_tag(true); - } - robin_hood(bucket, disp, self.hash, self.key, value) - }, - NoElem(mut bucket, disp) => { - if disp >= DISPLACEMENT_THRESHOLD { - bucket.table_mut().set_tag(true); - } - bucket.put(self.hash, self.key, value) - }, - }; - b.into_mut_refs().1 + self.base.insert(value) } } #[stable(feature = "rust1", since = "1.0.0")] impl FromIterator<(K, V)> for HashMap - where K: Eq + Hash, - S: BuildHasher + Default +where + K: Eq + Hash, + S: BuildHasher + Default, { fn from_iter>(iter: T) -> HashMap { let mut map = HashMap::with_hasher(Default::default()); @@ -3086,35 +2366,26 @@ impl FromIterator<(K, V)> for HashMap #[stable(feature = "rust1", since = "1.0.0")] impl Extend<(K, V)> for HashMap - where K: Eq + Hash, - S: BuildHasher +where + K: Eq + Hash, + S: BuildHasher, { + #[inline] fn extend>(&mut self, iter: T) { - // Keys may be already present or show multiple times in the iterator. - // Reserve the entire hint lower bound if the map is empty. - // Otherwise reserve half the hint (rounded up), so the map - // will only resize twice in the worst case. - let iter = iter.into_iter(); - let reserve = if self.is_empty() { - iter.size_hint().0 - } else { - (iter.size_hint().0 + 1) / 2 - }; - self.reserve(reserve); - for (k, v) in iter { - self.insert(k, v); - } + self.base.extend(iter) } } #[stable(feature = "hash_extend_copy", since = "1.4.0")] impl<'a, K, V, S> Extend<(&'a K, &'a V)> for HashMap - where K: Eq + Hash + Copy, - V: Copy, - S: BuildHasher +where + K: Eq + Hash + Copy, + V: Copy, + S: BuildHasher, { + #[inline] fn extend>(&mut self, iter: T) { - self.extend(iter.into_iter().map(|(&key, &value)| (key, value))); + self.base.extend(iter) } } @@ -3255,6 +2526,32 @@ impl fmt::Debug for RandomState { } } +#[inline] +fn map_entry<'a, K: 'a, V: 'a>(raw: base::RustcEntry<'a, K, V>) -> Entry<'a, K, V> { + match raw { + base::RustcEntry::Occupied(base) => Entry::Occupied(OccupiedEntry { base }), + base::RustcEntry::Vacant(base) => Entry::Vacant(VacantEntry { base }), + } +} + +#[inline] +fn map_collection_alloc_err(err: hashbrown::CollectionAllocErr) -> CollectionAllocErr { + match err { + hashbrown::CollectionAllocErr::CapacityOverflow => CollectionAllocErr::CapacityOverflow, + hashbrown::CollectionAllocErr::AllocErr => CollectionAllocErr::AllocErr, + } +} + +#[inline] +fn map_raw_entry<'a, K: 'a, V: 'a, S: 'a>( + raw: base::RawEntryMut<'a, K, V, S>, +) -> RawEntryMut<'a, K, V, S> { + match raw { + base::RawEntryMut::Occupied(base) => RawEntryMut::Occupied(RawOccupiedEntryMut { base }), + base::RawEntryMut::Vacant(base) => RawEntryMut::Vacant(RawVacantEntryMut { base }), + } +} + #[allow(dead_code)] fn assert_covariance() { fn map_key<'new>(v: HashMap<&'static str, u8>) -> HashMap<&'new str, u8> { @@ -3287,21 +2584,21 @@ fn assert_covariance() { fn values_val<'a, 'new>(v: Values<'a, u8, &'static str>) -> Values<'a, u8, &'new str> { v } - fn drain<'new>(d: Drain<'static, &'static str, &'static str>) - -> Drain<'new, &'new str, &'new str> { + fn drain<'new>( + d: Drain<'static, &'static str, &'static str>, + ) -> Drain<'new, &'new str, &'new str> { d } } #[cfg(test)] mod test_map { - use super::HashMap; use super::Entry::{Occupied, Vacant}; + use super::HashMap; use super::RandomState; use crate::cell::RefCell; use rand::{thread_rng, Rng}; use realstd::collections::CollectionAllocErr::*; - use realstd::mem::size_of; use realstd::usize; #[test] @@ -3438,19 +2735,19 @@ mod test_map { DROP_VECTOR.with(|v| { assert_eq!(v.borrow()[i], 1); - assert_eq!(v.borrow()[i+100], 1); + assert_eq!(v.borrow()[i + 100], 1); }); } DROP_VECTOR.with(|v| { for i in 0..50 { assert_eq!(v.borrow()[i], 0); - assert_eq!(v.borrow()[i+100], 0); + assert_eq!(v.borrow()[i + 100], 0); } for i in 50..100 { assert_eq!(v.borrow()[i], 1); - assert_eq!(v.borrow()[i+100], 1); + assert_eq!(v.borrow()[i + 100], 1); } }); } @@ -3507,13 +2804,9 @@ mod test_map { for _ in half.by_ref() {} DROP_VECTOR.with(|v| { - let nk = (0..100) - .filter(|&i| v.borrow()[i] == 1) - .count(); + let nk = (0..100).filter(|&i| v.borrow()[i] == 1).count(); - let nv = (0..100) - .filter(|&i| v.borrow()[i + 100] == 1) - .count(); + let nv = (0..100).filter(|&i| v.borrow()[i + 100] == 1).count(); assert_eq!(nk, 50); assert_eq!(nv, 50); @@ -3701,7 +2994,7 @@ mod test_map { fn test_iterate() { let mut m = HashMap::with_capacity(4); for i in 0..32 { - assert!(m.insert(i, i*2).is_none()); + assert!(m.insert(i, i * 2).is_none()); } assert_eq!(m.len(), 32); @@ -3789,8 +3082,7 @@ mod test_map { let map_str = format!("{:?}", map); - assert!(map_str == "{1: 2, 3: 4}" || - map_str == "{3: 4, 1: 2}"); + assert!(map_str == "{1: 2, 3: 4}" || map_str == "{3: 4, 1: 2}"); assert_eq!(format!("{:?}", empty), "{}"); } @@ -3817,7 +3109,7 @@ mod test_map { let mut m = HashMap::new(); assert_eq!(m.len(), 0); - assert_eq!(m.raw_capacity(), 0); + assert_eq!(m.raw_capacity(), 1); assert!(m.is_empty()); m.insert(0, 0); @@ -3857,7 +3149,7 @@ mod test_map { m.shrink_to_fit(); assert_eq!(m.raw_capacity(), raw_cap); // again, a little more than half full - for _ in 0..raw_cap / 2 - 1 { + for _ in 0..raw_cap / 2 { i -= 1; m.remove(&i); } @@ -4008,7 +3300,6 @@ mod test_map { assert_eq!(map.get(&1).unwrap(), &100); assert_eq!(map.len(), 6); - // Existing key (update) match map.entry(2) { Vacant(_) => unreachable!(), @@ -4031,7 +3322,6 @@ mod test_map { assert_eq!(map.get(&3), None); assert_eq!(map.len(), 5); - // Inexistent key (insert) match map.entry(10) { Occupied(_) => unreachable!(), @@ -4046,11 +3336,10 @@ mod test_map { #[test] fn test_entry_take_doesnt_corrupt() { #![allow(deprecated)] //rand - // Test for #19292 + // Test for #19292 fn check(m: &HashMap) { for k in m.keys() { - assert!(m.contains_key(k), - "{} is in keys() but not in the map?", k); + assert!(m.contains_key(k), "{} is in keys() but not in the map?", k); } } @@ -4155,7 +3444,7 @@ mod test_map { #[test] fn test_retain() { - let mut map: HashMap = (0..100).map(|x|(x, x*10)).collect(); + let mut map: HashMap = (0..100).map(|x| (x, x * 10)).collect(); map.retain(|&k, _| k % 2 == 0); assert_eq!(map.len(), 50); @@ -4164,51 +3453,20 @@ mod test_map { assert_eq!(map[&6], 60); } - #[test] - fn test_adaptive() { - const TEST_LEN: usize = 5000; - // by cloning we get maps with the same hasher seed - let mut first = HashMap::new(); - let mut second = first.clone(); - first.extend((0..TEST_LEN).map(|i| (i, i))); - second.extend((TEST_LEN..TEST_LEN * 2).map(|i| (i, i))); - - for (&k, &v) in &second { - let prev_cap = first.capacity(); - let expect_grow = first.len() == prev_cap; - first.insert(k, v); - if !expect_grow && first.capacity() != prev_cap { - return; - } - } - panic!("Adaptive early resize failed"); - } - #[test] fn test_try_reserve() { - - let mut empty_bytes: HashMap = HashMap::new(); + let mut empty_bytes: HashMap = HashMap::new(); const MAX_USIZE: usize = usize::MAX; - // HashMap and RawTables use complicated size calculations - // hashes_size is sizeof(HashUint) * capacity; - // pairs_size is sizeof((K. V)) * capacity; - // alignment_hashes_size is 8 - // alignment_pairs size is 4 - let size_of_multiplier = (size_of::() + size_of::<(u8, u8)>()).next_power_of_two(); - // The following formula is used to calculate the new capacity - let max_no_ovf = ((MAX_USIZE / 11) * 10) / size_of_multiplier - 1; - if let Err(CapacityOverflow) = empty_bytes.try_reserve(MAX_USIZE) { - } else { panic!("usize::MAX should trigger an overflow!"); } + } else { + panic!("usize::MAX should trigger an overflow!"); + } - if size_of::() < 8 { - if let Err(CapacityOverflow) = empty_bytes.try_reserve(max_no_ovf) { - } else { panic!("isize::MAX + 1 should trigger a CapacityOverflow!") } + if let Err(AllocErr) = empty_bytes.try_reserve(MAX_USIZE / 8) { } else { - if let Err(AllocErr) = empty_bytes.try_reserve(max_no_ovf) { - } else { panic!("isize::MAX + 1 should trigger an OOM!") } + panic!("usize::MAX / 8 should trigger an OOM!") } } @@ -4238,9 +3496,14 @@ mod test_map { } let hash1 = compute_hash(&map, 1); assert_eq!(map.raw_entry().from_key(&1).unwrap(), (&1, &100)); - assert_eq!(map.raw_entry().from_hash(hash1, |k| *k == 1).unwrap(), (&1, &100)); - assert_eq!(map.raw_entry().from_key_hashed_nocheck(hash1, &1).unwrap(), (&1, &100)); - assert_eq!(map.raw_entry().search_bucket(hash1, |k| *k == 1).unwrap(), (&1, &100)); + assert_eq!( + map.raw_entry().from_hash(hash1, |k| *k == 1).unwrap(), + (&1, &100) + ); + assert_eq!( + map.raw_entry().from_key_hashed_nocheck(hash1, &1).unwrap(), + (&1, &100) + ); assert_eq!(map.len(), 6); // Existing key (update) @@ -4254,9 +3517,14 @@ mod test_map { } let hash2 = compute_hash(&map, 2); assert_eq!(map.raw_entry().from_key(&2).unwrap(), (&2, &200)); - assert_eq!(map.raw_entry().from_hash(hash2, |k| *k == 2).unwrap(), (&2, &200)); - assert_eq!(map.raw_entry().from_key_hashed_nocheck(hash2, &2).unwrap(), (&2, &200)); - assert_eq!(map.raw_entry().search_bucket(hash2, |k| *k == 2).unwrap(), (&2, &200)); + assert_eq!( + map.raw_entry().from_hash(hash2, |k| *k == 2).unwrap(), + (&2, &200) + ); + assert_eq!( + map.raw_entry().from_key_hashed_nocheck(hash2, &2).unwrap(), + (&2, &200) + ); assert_eq!(map.len(), 6); // Existing key (take) @@ -4270,10 +3538,8 @@ mod test_map { assert_eq!(map.raw_entry().from_key(&3), None); assert_eq!(map.raw_entry().from_hash(hash3, |k| *k == 3), None); assert_eq!(map.raw_entry().from_key_hashed_nocheck(hash3, &3), None); - assert_eq!(map.raw_entry().search_bucket(hash3, |k| *k == 3), None); assert_eq!(map.len(), 5); - // Nonexistent key (insert) match map.raw_entry_mut().from_key(&10) { Occupied(_) => unreachable!(), @@ -4293,7 +3559,6 @@ mod test_map { assert_eq!(map.raw_entry().from_key(&k), kv); assert_eq!(map.raw_entry().from_hash(hash, |q| *q == k), kv); assert_eq!(map.raw_entry().from_key_hashed_nocheck(hash, &k), kv); - assert_eq!(map.raw_entry().search_bucket(hash, |q| *q == k), kv); match map.raw_entry_mut().from_key(&k) { Occupied(mut o) => assert_eq!(Some(o.get_key_value()), kv), @@ -4307,10 +3572,6 @@ mod test_map { Occupied(mut o) => assert_eq!(Some(o.get_key_value()), kv), Vacant(_) => assert_eq!(v, None), } - match map.raw_entry_mut().search_bucket(hash, |q| *q == k) { - Occupied(mut o) => assert_eq!(Some(o.get_key_value()), kv), - Vacant(_) => assert_eq!(v, None), - } } } diff --git a/src/libstd/collections/hash/mod.rs b/src/libstd/collections/hash/mod.rs index 56585477f1c17..a6d89a4d32abf 100644 --- a/src/libstd/collections/hash/mod.rs +++ b/src/libstd/collections/hash/mod.rs @@ -1,6 +1,5 @@ //! Unordered containers, implemented as hash-tables mod bench; -mod table; pub mod map; pub mod set; diff --git a/src/libstd/collections/hash/set.rs b/src/libstd/collections/hash/set.rs index b610e09ae749e..b56a27c80bc83 100644 --- a/src/libstd/collections/hash/set.rs +++ b/src/libstd/collections/hash/set.rs @@ -1227,7 +1227,6 @@ impl FusedIterator for IntoIter {} impl fmt::Debug for IntoIter { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let entries_iter = self.iter - .inner .iter() .map(|(k, _)| k); f.debug_list().entries(entries_iter).finish() @@ -1261,7 +1260,6 @@ impl FusedIterator for Drain<'_, K> {} impl fmt::Debug for Drain<'_, K> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let entries_iter = self.iter - .inner .iter() .map(|(k, _)| k); f.debug_list().entries(entries_iter).finish() diff --git a/src/libstd/collections/hash/table.rs b/src/libstd/collections/hash/table.rs deleted file mode 100644 index 2113b448910ab..0000000000000 --- a/src/libstd/collections/hash/table.rs +++ /dev/null @@ -1,1131 +0,0 @@ -use crate::alloc::{Global, Alloc, Layout, LayoutErr, handle_alloc_error}; -use crate::collections::CollectionAllocErr; -use crate::hash::{BuildHasher, Hash, Hasher}; -use crate::marker; -use crate::mem::{self, size_of, needs_drop}; -use crate::ops::{Deref, DerefMut}; -use crate::ptr::{self, Unique, NonNull}; -use crate::hint; - -use self::BucketState::*; - -/// Integer type used for stored hash values. -/// -/// No more than bit_width(usize) bits are needed to select a bucket. -/// -/// The most significant bit is ours to use for tagging `SafeHash`. -/// -/// (Even if we could have usize::MAX bytes allocated for buckets, -/// each bucket stores at least a `HashUint`, so there can be no more than -/// usize::MAX / size_of(usize) buckets.) -type HashUint = usize; - -const EMPTY_BUCKET: HashUint = 0; -const EMPTY: usize = 1; - -/// Special `Unique` that uses the lower bit of the pointer -/// to expose a boolean tag. -/// Note: when the pointer is initialized to EMPTY `.ptr()` will return -/// null and the tag functions shouldn't be used. -struct TaggedHashUintPtr(Unique); - -impl TaggedHashUintPtr { - #[inline] - unsafe fn new(ptr: *mut HashUint) -> Self { - debug_assert!(ptr as usize & 1 == 0 || ptr as usize == EMPTY as usize); - TaggedHashUintPtr(Unique::new_unchecked(ptr)) - } - - #[inline] - fn set_tag(&mut self, value: bool) { - let mut usize_ptr = self.0.as_ptr() as usize; - unsafe { - if value { - usize_ptr |= 1; - } else { - usize_ptr &= !1; - } - self.0 = Unique::new_unchecked(usize_ptr as *mut HashUint) - } - } - - #[inline] - fn tag(&self) -> bool { - (self.0.as_ptr() as usize) & 1 == 1 - } - - #[inline] - fn ptr(&self) -> *mut HashUint { - (self.0.as_ptr() as usize & !1) as *mut HashUint - } -} - -/// The raw hashtable, providing safe-ish access to the unzipped and highly -/// optimized arrays of hashes, and key-value pairs. -/// -/// This design is a lot faster than the naive -/// `Vec>`, because we don't pay for the overhead of an -/// option on every element, and we get a generally more cache-aware design. -/// -/// Essential invariants of this structure: -/// -/// - if `t.hashes[i] == EMPTY_BUCKET`, then `Bucket::at_index(&t, i).raw` -/// points to 'undefined' contents. Don't read from it. This invariant is -/// enforced outside this module with the `EmptyBucket`, `FullBucket`, -/// and `SafeHash` types. -/// -/// - An `EmptyBucket` is only constructed at an index with -/// a hash of EMPTY_BUCKET. -/// -/// - A `FullBucket` is only constructed at an index with a -/// non-EMPTY_BUCKET hash. -/// -/// - A `SafeHash` is only constructed for non-`EMPTY_BUCKET` hash. We get -/// around hashes of zero by changing them to 0x8000_0000_0000_0000, -/// which will likely map to the same bucket, while not being confused -/// with "empty". -/// -/// - Both "arrays represented by pointers" are the same length: -/// `capacity`. This is set at creation and never changes. The arrays -/// are unzipped and are more cache aware (scanning through 8 hashes -/// brings in at most 2 cache lines, since they're all right beside each -/// other). This layout may waste space in padding such as in a map from -/// u64 to u8, but is a more cache conscious layout as the key-value pairs -/// are only very shortly probed and the desired value will be in the same -/// or next cache line. -/// -/// You can kind of think of this module/data structure as a safe wrapper -/// around just the "table" part of the hashtable. It enforces some -/// invariants at the type level and employs some performance trickery, -/// but in general is just a tricked out `Vec>`. -/// -/// The hashtable also exposes a special boolean tag. The tag defaults to false -/// when the RawTable is created and is accessible with the `tag` and `set_tag` -/// functions. -pub struct RawTable { - capacity_mask: usize, - size: usize, - hashes: TaggedHashUintPtr, - - // Because K/V do not appear directly in any of the types in the struct, - // inform rustc that in fact instances of K and V are reachable from here. - marker: marker::PhantomData<(K, V)>, -} - -// An unsafe view of a RawTable bucket -// Valid indexes are within [0..table_capacity) -pub struct RawBucket { - hash_start: *mut HashUint, - // We use *const to ensure covariance with respect to K and V - pair_start: *const (K, V), - idx: usize, - _marker: marker::PhantomData<(K, V)>, -} - -impl Copy for RawBucket {} -impl Clone for RawBucket { - fn clone(&self) -> RawBucket { - *self - } -} - -pub struct Bucket { - raw: RawBucket, - table: M, -} - -impl Copy for Bucket {} -impl Clone for Bucket { - fn clone(&self) -> Bucket { - *self - } -} - -pub struct EmptyBucket { - raw: RawBucket, - table: M, -} - -pub struct FullBucket { - raw: RawBucket, - table: M, -} - -pub type FullBucketMut<'table, K, V> = FullBucket>; - -pub enum BucketState { - Empty(EmptyBucket), - Full(FullBucket), -} - -// A GapThenFull encapsulates the state of two consecutive buckets at once. -// The first bucket, called the gap, is known to be empty. -// The second bucket is full. -pub struct GapThenFull { - gap: EmptyBucket, - full: FullBucket, -} - -/// A hash that is not zero, since we use a hash of zero to represent empty -/// buckets. -#[derive(PartialEq, Copy, Clone)] -pub struct SafeHash { - hash: HashUint, -} - -impl SafeHash { - /// Peek at the hash value, which is guaranteed to be non-zero. - #[inline(always)] - pub fn inspect(&self) -> HashUint { - self.hash - } - - #[inline(always)] - pub fn new(hash: u64) -> Self { - // We need to avoid 0 in order to prevent collisions with - // EMPTY_HASH. We can maintain our precious uniform distribution - // of initial indexes by unconditionally setting the MSB, - // effectively reducing the hashes by one bit. - // - // Truncate hash to fit in `HashUint`. - let hash_bits = size_of::() * 8; - SafeHash { hash: (1 << (hash_bits - 1)) | (hash as HashUint) } - } -} - -/// We need to remove hashes of 0. That's reserved for empty buckets. -/// This function wraps up `hash_keyed` to be the only way outside this -/// module to generate a SafeHash. -pub fn make_hash(hash_state: &S, t: &T) -> SafeHash - where T: Hash, - S: BuildHasher -{ - let mut state = hash_state.build_hasher(); - t.hash(&mut state); - SafeHash::new(state.finish()) -} - -// `replace` casts a `*HashUint` to a `*SafeHash`. Since we statically -// ensure that a `FullBucket` points to an index with a non-zero hash, -// and a `SafeHash` is just a `HashUint` with a different name, this is -// safe. -// -// This test ensures that a `SafeHash` really IS the same size as a -// `HashUint`. If you need to change the size of `SafeHash` (and -// consequently made this test fail), `replace` needs to be -// modified to no longer assume this. -#[test] -fn can_alias_safehash_as_hash() { - assert_eq!(size_of::(), size_of::()) -} - -// RawBucket methods are unsafe as it's possible to -// make a RawBucket point to invalid memory using safe code. -impl RawBucket { - unsafe fn hash(&self) -> *mut HashUint { - self.hash_start.add(self.idx) - } - unsafe fn pair(&self) -> *mut (K, V) { - self.pair_start.add(self.idx) as *mut (K, V) - } - unsafe fn hash_pair(&self) -> (*mut HashUint, *mut (K, V)) { - (self.hash(), self.pair()) - } -} - -// Buckets hold references to the table. -impl FullBucket { - /// Borrow a reference to the table. - pub fn table(&self) -> &M { - &self.table - } - /// Borrow a mutable reference to the table. - pub fn table_mut(&mut self) -> &mut M { - &mut self.table - } - /// Move out the reference to the table. - pub fn into_table(self) -> M { - self.table - } - /// Gets the raw index. - pub fn index(&self) -> usize { - self.raw.idx - } - /// Gets the raw bucket. - pub fn raw(&self) -> RawBucket { - self.raw - } -} - -impl EmptyBucket { - /// Borrow a reference to the table. - pub fn table(&self) -> &M { - &self.table - } - /// Borrow a mutable reference to the table. - pub fn table_mut(&mut self) -> &mut M { - &mut self.table - } -} - -impl Bucket { - /// Gets the raw index. - pub fn index(&self) -> usize { - self.raw.idx - } - /// get the table. - pub fn into_table(self) -> M { - self.table - } -} - -impl Deref for FullBucket - where M: Deref> -{ - type Target = RawTable; - fn deref(&self) -> &RawTable { - &self.table - } -} - -/// `Put` is implemented for types which provide access to a table and cannot be invalidated -/// by filling a bucket. A similar implementation for `Take` is possible. -pub trait Put { - unsafe fn borrow_table_mut(&mut self) -> &mut RawTable; -} - - -impl Put for &mut RawTable { - unsafe fn borrow_table_mut(&mut self) -> &mut RawTable { - *self - } -} - -impl Put for Bucket - where M: Put -{ - unsafe fn borrow_table_mut(&mut self) -> &mut RawTable { - self.table.borrow_table_mut() - } -} - -impl Put for FullBucket - where M: Put -{ - unsafe fn borrow_table_mut(&mut self) -> &mut RawTable { - self.table.borrow_table_mut() - } -} - -impl>> Bucket { - #[inline] - pub fn new(table: M, hash: SafeHash) -> Bucket { - Bucket::at_index(table, hash.inspect() as usize) - } - - pub fn new_from(r: RawBucket, t: M) - -> Bucket - { - Bucket { - raw: r, - table: t, - } - } - - #[inline] - pub fn at_index(table: M, ib_index: usize) -> Bucket { - // if capacity is 0, then the RawBucket will be populated with bogus pointers. - // This is an uncommon case though, so avoid it in release builds. - debug_assert!(table.capacity() > 0, - "Table should have capacity at this point"); - let ib_index = ib_index & table.capacity_mask; - Bucket { - raw: table.raw_bucket_at(ib_index), - table, - } - } - - pub fn first(table: M) -> Bucket { - Bucket { - raw: table.raw_bucket_at(0), - table, - } - } - - // "So a few of the first shall be last: for many be called, - // but few chosen." - // - // We'll most likely encounter a few buckets at the beginning that - // have their initial buckets near the end of the table. They were - // placed at the beginning as the probe wrapped around the table - // during insertion. We must skip forward to a bucket that won't - // get reinserted too early and won't unfairly steal others spot. - // This eliminates the need for robin hood. - pub fn head_bucket(table: M) -> Bucket { - let mut bucket = Bucket::first(table); - - loop { - bucket = match bucket.peek() { - Full(full) => { - if full.displacement() == 0 { - // This bucket occupies its ideal spot. - // It indicates the start of another "cluster". - bucket = full.into_bucket(); - break; - } - // Leaving this bucket in the last cluster for later. - full.into_bucket() - } - Empty(b) => { - // Encountered a hole between clusters. - b.into_bucket() - } - }; - bucket.next(); - } - bucket - } - - /// Reads a bucket at a given index, returning an enum indicating whether - /// it's initialized or not. You need to match on this enum to get - /// the appropriate types to call most of the other functions in - /// this module. - pub fn peek(self) -> BucketState { - match unsafe { *self.raw.hash() } { - EMPTY_BUCKET => { - Empty(EmptyBucket { - raw: self.raw, - table: self.table, - }) - } - _ => { - Full(FullBucket { - raw: self.raw, - table: self.table, - }) - } - } - } - - /// Modifies the bucket in place to make it point to the next slot. - pub fn next(&mut self) { - self.raw.idx = self.raw.idx.wrapping_add(1) & self.table.capacity_mask; - } - - /// Modifies the bucket in place to make it point to the previous slot. - pub fn prev(&mut self) { - self.raw.idx = self.raw.idx.wrapping_sub(1) & self.table.capacity_mask; - } -} - -impl>> EmptyBucket { - #[inline] - pub fn next(self) -> Bucket { - let mut bucket = self.into_bucket(); - bucket.next(); - bucket - } - - #[inline] - pub fn into_bucket(self) -> Bucket { - Bucket { - raw: self.raw, - table: self.table, - } - } - - pub fn gap_peek(self) -> Result, Bucket> { - let gap = EmptyBucket { - raw: self.raw, - table: (), - }; - - match self.next().peek() { - Full(bucket) => { - Ok(GapThenFull { - gap, - full: bucket, - }) - } - Empty(e) => Err(e.into_bucket()), - } - } -} - -impl EmptyBucket - where M: Put -{ - /// Puts given key and value pair, along with the key's hash, - /// into this bucket in the hashtable. Note how `self` is 'moved' into - /// this function, because this slot will no longer be empty when - /// we return! A `FullBucket` is returned for later use, pointing to - /// the newly-filled slot in the hashtable. - /// - /// Use `make_hash` to construct a `SafeHash` to pass to this function. - pub fn put(mut self, hash: SafeHash, key: K, value: V) -> FullBucket { - unsafe { - *self.raw.hash() = hash.inspect(); - ptr::write(self.raw.pair(), (key, value)); - - self.table.borrow_table_mut().size += 1; - } - - FullBucket { - raw: self.raw, - table: self.table, - } - } -} - -impl>> FullBucket { - #[inline] - pub fn next(self) -> Bucket { - let mut bucket = self.into_bucket(); - bucket.next(); - bucket - } - - #[inline] - pub fn into_bucket(self) -> Bucket { - Bucket { - raw: self.raw, - table: self.table, - } - } - - /// Duplicates the current position. This can be useful for operations - /// on two or more buckets. - pub fn stash(self) -> FullBucket { - FullBucket { - raw: self.raw, - table: self, - } - } - - /// Gets the distance between this bucket and the 'ideal' location - /// as determined by the key's hash stored in it. - /// - /// In the cited blog posts above, this is called the "distance to - /// initial bucket", or DIB. Also known as "probe count". - pub fn displacement(&self) -> usize { - // Calculates the distance one has to travel when going from - // `hash mod capacity` onwards to `idx mod capacity`, wrapping around - // if the destination is not reached before the end of the table. - (self.raw.idx.wrapping_sub(self.hash().inspect() as usize)) & self.table.capacity_mask - } - - #[inline] - pub fn hash(&self) -> SafeHash { - unsafe { SafeHash { hash: *self.raw.hash() } } - } - - /// Gets references to the key and value at a given index. - pub fn read(&self) -> (&K, &V) { - unsafe { - let pair_ptr = self.raw.pair(); - (&(*pair_ptr).0, &(*pair_ptr).1) - } - } -} - -// We take a mutable reference to the table instead of accepting anything that -// implements `DerefMut` to prevent fn `take` from being called on `stash`ed -// buckets. -impl<'t, K, V> FullBucket> { - /// Removes this bucket's key and value from the hashtable. - /// - /// This works similarly to `put`, building an `EmptyBucket` out of the - /// taken bucket. - pub fn take(self) -> (EmptyBucket>, K, V) { - self.table.size -= 1; - - unsafe { - *self.raw.hash() = EMPTY_BUCKET; - let (k, v) = ptr::read(self.raw.pair()); - (EmptyBucket { - raw: self.raw, - table: self.table, - }, - k, - v) - } - } -} - -// This use of `Put` is misleading and restrictive, but safe and sufficient for our use cases -// where `M` is a full bucket or table reference type with mutable access to the table. -impl FullBucket - where M: Put -{ - pub fn replace(&mut self, h: SafeHash, k: K, v: V) -> (SafeHash, K, V) { - unsafe { - let old_hash = ptr::replace(self.raw.hash() as *mut SafeHash, h); - let (old_key, old_val) = ptr::replace(self.raw.pair(), (k, v)); - - (old_hash, old_key, old_val) - } - } -} - -impl FullBucket - where M: Deref> + DerefMut -{ - /// Gets mutable references to the key and value at a given index. - pub fn read_mut(&mut self) -> (&mut K, &mut V) { - unsafe { - let pair_ptr = self.raw.pair(); - (&mut (*pair_ptr).0, &mut (*pair_ptr).1) - } - } -} - -impl<'t, K, V, M> FullBucket - where M: Deref> + 't -{ - /// Exchange a bucket state for immutable references into the table. - /// Because the underlying reference to the table is also consumed, - /// no further changes to the structure of the table are possible; - /// in exchange for this, the returned references have a longer lifetime - /// than the references returned by `read()`. - pub fn into_refs(self) -> (&'t K, &'t V) { - unsafe { - let pair_ptr = self.raw.pair(); - (&(*pair_ptr).0, &(*pair_ptr).1) - } - } -} - -impl<'t, K, V, M> FullBucket - where M: Deref> + DerefMut + 't -{ - /// This works similarly to `into_refs`, exchanging a bucket state - /// for mutable references into the table. - pub fn into_mut_refs(self) -> (&'t mut K, &'t mut V) { - unsafe { - let pair_ptr = self.raw.pair(); - (&mut (*pair_ptr).0, &mut (*pair_ptr).1) - } - } -} - -impl GapThenFull - where M: Deref> -{ - #[inline] - pub fn full(&self) -> &FullBucket { - &self.full - } - - pub fn into_table(self) -> M { - self.full.into_table() - } - - pub fn shift(mut self) -> Result, Bucket> { - unsafe { - let (gap_hash, gap_pair) = self.gap.raw.hash_pair(); - let (full_hash, full_pair) = self.full.raw.hash_pair(); - *gap_hash = mem::replace(&mut *full_hash, EMPTY_BUCKET); - ptr::copy_nonoverlapping(full_pair, gap_pair, 1); - } - - let FullBucket { raw: prev_raw, .. } = self.full; - - match self.full.next().peek() { - Full(bucket) => { - self.gap.raw = prev_raw; - - self.full = bucket; - - Ok(self) - } - Empty(b) => Err(b.into_bucket()), - } - } -} - -// Returns a Layout which describes the allocation required for a hash table, -// and the offset of the array of (key, value) pairs in the allocation. -#[inline(always)] -fn calculate_layout(capacity: usize) -> Result<(Layout, usize), LayoutErr> { - let hashes = Layout::array::(capacity)?; - let pairs = Layout::array::<(K, V)>(capacity)?; - hashes.extend(pairs).map(|(layout, _)| { - // LLVM seems to have trouble properly const-propagating pairs.align(), - // possibly due to the use of NonZeroUsize. This little hack allows it - // to generate optimal code. - // - // See https://github.com/rust-lang/rust/issues/51346 for more details. - ( - layout, - hashes.size() + hashes.padding_needed_for(mem::align_of::<(K, V)>()), - ) - }) -} - -pub(crate) enum Fallibility { - Fallible, - Infallible, -} - -use self::Fallibility::*; - -impl RawTable { - /// Does not initialize the buckets. The caller should ensure they, - /// at the very least, set every hash to EMPTY_BUCKET. - /// Returns an error if it cannot allocate or capacity overflows. - unsafe fn new_uninitialized_internal( - capacity: usize, - fallibility: Fallibility, - ) -> Result, CollectionAllocErr> { - if capacity == 0 { - return Ok(RawTable { - size: 0, - capacity_mask: capacity.wrapping_sub(1), - hashes: TaggedHashUintPtr::new(EMPTY as *mut HashUint), - marker: marker::PhantomData, - }); - } - - // Allocating hashmaps is a little tricky. We need to allocate two - // arrays, but since we know their sizes and alignments up front, - // we just allocate a single array, and then have the subarrays - // point into it. - let (layout, _) = calculate_layout::(capacity)?; - let buffer = Global.alloc(layout).map_err(|e| match fallibility { - Infallible => handle_alloc_error(layout), - Fallible => e, - })?; - - Ok(RawTable { - capacity_mask: capacity.wrapping_sub(1), - size: 0, - hashes: TaggedHashUintPtr::new(buffer.cast().as_ptr()), - marker: marker::PhantomData, - }) - } - - /// Does not initialize the buckets. The caller should ensure they, - /// at the very least, set every hash to EMPTY_BUCKET. - unsafe fn new_uninitialized(capacity: usize) -> RawTable { - match Self::new_uninitialized_internal(capacity, Infallible) { - Err(CollectionAllocErr::CapacityOverflow) => panic!("capacity overflow"), - Err(CollectionAllocErr::AllocErr) => unreachable!(), - Ok(table) => { table } - } - } - - #[inline(always)] - fn raw_bucket_at(&self, index: usize) -> RawBucket { - let (_, pairs_offset) = calculate_layout::(self.capacity()) - .unwrap_or_else(|_| unsafe { hint::unreachable_unchecked() }); - let buffer = self.hashes.ptr() as *mut u8; - unsafe { - RawBucket { - hash_start: buffer as *mut HashUint, - pair_start: buffer.add(pairs_offset) as *const (K, V), - idx: index, - _marker: marker::PhantomData, - } - } - } - - #[inline] - fn new_internal( - capacity: usize, - fallibility: Fallibility, - ) -> Result, CollectionAllocErr> { - unsafe { - let ret = RawTable::new_uninitialized_internal(capacity, fallibility)?; - if capacity > 0 { - ptr::write_bytes(ret.hashes.ptr(), 0, capacity); - } - Ok(ret) - } - } - - /// Tries to create a new raw table from a given capacity. If it cannot allocate, - /// it returns with AllocErr. - #[inline] - pub fn try_new(capacity: usize) -> Result, CollectionAllocErr> { - Self::new_internal(capacity, Fallible) - } - - /// Creates a new raw table from a given capacity. All buckets are - /// initially empty. - #[inline] - pub fn new(capacity: usize) -> RawTable { - match Self::new_internal(capacity, Infallible) { - Err(CollectionAllocErr::CapacityOverflow) => panic!("capacity overflow"), - Err(CollectionAllocErr::AllocErr) => unreachable!(), - Ok(table) => { table } - } - } - - /// The hashtable's capacity, similar to a vector's. - pub fn capacity(&self) -> usize { - self.capacity_mask.wrapping_add(1) - } - - /// The number of elements ever `put` in the hashtable, minus the number - /// of elements ever `take`n. - pub fn size(&self) -> usize { - self.size - } - - fn raw_buckets(&self) -> RawBuckets<'_, K, V> { - RawBuckets { - raw: self.raw_bucket_at(0), - elems_left: self.size, - marker: marker::PhantomData, - } - } - - pub fn iter(&self) -> Iter<'_, K, V> { - Iter { - iter: self.raw_buckets(), - } - } - - pub fn iter_mut(&mut self) -> IterMut<'_, K, V> { - IterMut { - iter: self.raw_buckets(), - _marker: marker::PhantomData, - } - } - - pub fn into_iter(self) -> IntoIter { - let RawBuckets { raw, elems_left, .. } = self.raw_buckets(); - // Replace the marker regardless of lifetime bounds on parameters. - IntoIter { - iter: RawBuckets { - raw, - elems_left, - marker: marker::PhantomData, - }, - table: self, - } - } - - pub fn drain(&mut self) -> Drain<'_, K, V> { - let RawBuckets { raw, elems_left, .. } = self.raw_buckets(); - // Replace the marker regardless of lifetime bounds on parameters. - Drain { - iter: RawBuckets { - raw, - elems_left, - marker: marker::PhantomData, - }, - table: NonNull::from(self), - marker: marker::PhantomData, - } - } - - /// Drops buckets in reverse order. It leaves the table in an inconsistent - /// state and should only be used for dropping the table's remaining - /// entries. It's used in the implementation of Drop. - unsafe fn rev_drop_buckets(&mut self) { - // initialize the raw bucket past the end of the table - let mut raw = self.raw_bucket_at(self.capacity()); - let mut elems_left = self.size; - - while elems_left != 0 { - raw.idx -= 1; - - if *raw.hash() != EMPTY_BUCKET { - elems_left -= 1; - ptr::drop_in_place(raw.pair()); - } - } - } - - /// Sets the table tag. - pub fn set_tag(&mut self, value: bool) { - self.hashes.set_tag(value) - } - - /// Gets the table tag. - pub fn tag(&self) -> bool { - self.hashes.tag() - } -} - -/// A raw iterator. The basis for some other iterators in this module. Although -/// this interface is safe, it's not used outside this module. -struct RawBuckets<'a, K, V> { - raw: RawBucket, - elems_left: usize, - - // Strictly speaking, this should be &'a (K,V), but that would - // require that K:'a, and we often use RawBuckets<'static...> for - // move iterations, so that messes up a lot of other things. So - // just use `&'a (K,V)` as this is not a publicly exposed type - // anyway. - marker: marker::PhantomData<&'a ()>, -} - -// FIXME(#26925) Remove in favor of `#[derive(Clone)]` -impl Clone for RawBuckets<'_, K, V> { - fn clone(&self) -> Self { - RawBuckets { - raw: self.raw, - elems_left: self.elems_left, - marker: marker::PhantomData, - } - } -} - - -impl<'a, K, V> Iterator for RawBuckets<'a, K, V> { - type Item = RawBucket; - - fn next(&mut self) -> Option> { - if self.elems_left == 0 { - return None; - } - - loop { - unsafe { - let item = self.raw; - self.raw.idx += 1; - if *item.hash() != EMPTY_BUCKET { - self.elems_left -= 1; - return Some(item); - } - } - } - } - - fn size_hint(&self) -> (usize, Option) { - (self.elems_left, Some(self.elems_left)) - } -} - -impl ExactSizeIterator for RawBuckets<'_, K, V> { - fn len(&self) -> usize { - self.elems_left - } -} - -/// Iterator over shared references to entries in a table. -pub struct Iter<'a, K: 'a, V: 'a> { - iter: RawBuckets<'a, K, V>, -} - -unsafe impl Sync for Iter<'_, K, V> {} -unsafe impl Send for Iter<'_, K, V> {} - -// FIXME(#26925) Remove in favor of `#[derive(Clone)]` -impl Clone for Iter<'_, K, V> { - fn clone(&self) -> Self { - Iter { - iter: self.iter.clone(), - } - } -} - -/// Iterator over mutable references to entries in a table. -pub struct IterMut<'a, K: 'a, V: 'a> { - iter: RawBuckets<'a, K, V>, - // To ensure invariance with respect to V - _marker: marker::PhantomData<&'a mut V>, -} - -unsafe impl Sync for IterMut<'_, K, V> {} -// Both K: Sync and K: Send are correct for IterMut's Send impl, -// but Send is the more useful bound -unsafe impl Send for IterMut<'_, K, V> {} - -impl<'a, K: 'a, V: 'a> IterMut<'a, K, V> { - pub fn iter(&self) -> Iter<'_, K, V> { - Iter { - iter: self.iter.clone(), - } - } -} - -/// Iterator over the entries in a table, consuming the table. -pub struct IntoIter { - table: RawTable, - iter: RawBuckets<'static, K, V>, -} - -unsafe impl Sync for IntoIter {} -unsafe impl Send for IntoIter {} - -impl IntoIter { - pub fn iter(&self) -> Iter<'_, K, V> { - Iter { - iter: self.iter.clone(), - } - } -} - -/// Iterator over the entries in a table, clearing the table. -pub struct Drain<'a, K: 'a, V: 'a> { - table: NonNull>, - iter: RawBuckets<'static, K, V>, - marker: marker::PhantomData<&'a RawTable>, -} - -unsafe impl Sync for Drain<'_, K, V> {} -unsafe impl Send for Drain<'_, K, V> {} - -impl<'a, K, V> Drain<'a, K, V> { - pub fn iter(&self) -> Iter<'_, K, V> { - Iter { - iter: self.iter.clone(), - } - } -} - -impl<'a, K, V> Iterator for Iter<'a, K, V> { - type Item = (&'a K, &'a V); - - fn next(&mut self) -> Option<(&'a K, &'a V)> { - self.iter.next().map(|raw| unsafe { - let pair_ptr = raw.pair(); - (&(*pair_ptr).0, &(*pair_ptr).1) - }) - } - - fn size_hint(&self) -> (usize, Option) { - self.iter.size_hint() - } -} - -impl ExactSizeIterator for Iter<'_, K, V> { - fn len(&self) -> usize { - self.iter.len() - } -} - -impl<'a, K, V> Iterator for IterMut<'a, K, V> { - type Item = (&'a K, &'a mut V); - - fn next(&mut self) -> Option<(&'a K, &'a mut V)> { - self.iter.next().map(|raw| unsafe { - let pair_ptr = raw.pair(); - (&(*pair_ptr).0, &mut (*pair_ptr).1) - }) - } - - fn size_hint(&self) -> (usize, Option) { - self.iter.size_hint() - } -} - -impl ExactSizeIterator for IterMut<'_, K, V> { - fn len(&self) -> usize { - self.iter.len() - } -} - -impl Iterator for IntoIter { - type Item = (SafeHash, K, V); - - fn next(&mut self) -> Option<(SafeHash, K, V)> { - self.iter.next().map(|raw| { - self.table.size -= 1; - unsafe { - let (k, v) = ptr::read(raw.pair()); - (SafeHash { hash: *raw.hash() }, k, v) - } - }) - } - - fn size_hint(&self) -> (usize, Option) { - self.iter.size_hint() - } -} - -impl ExactSizeIterator for IntoIter { - fn len(&self) -> usize { - self.iter().len() - } -} - -impl<'a, K, V> Iterator for Drain<'a, K, V> { - type Item = (SafeHash, K, V); - - #[inline] - fn next(&mut self) -> Option<(SafeHash, K, V)> { - self.iter.next().map(|raw| { - unsafe { - self.table.as_mut().size -= 1; - let (k, v) = ptr::read(raw.pair()); - (SafeHash { hash: ptr::replace(&mut *raw.hash(), EMPTY_BUCKET) }, k, v) - } - }) - } - - fn size_hint(&self) -> (usize, Option) { - self.iter.size_hint() - } -} - -impl ExactSizeIterator for Drain<'_, K, V> { - fn len(&self) -> usize { - self.iter.len() - } -} - -impl Drop for Drain<'_, K, V> { - fn drop(&mut self) { - self.for_each(drop); - } -} - -impl Clone for RawTable { - fn clone(&self) -> RawTable { - unsafe { - let cap = self.capacity(); - let mut new_ht = RawTable::new_uninitialized(cap); - - let mut new_buckets = new_ht.raw_bucket_at(0); - let mut buckets = self.raw_bucket_at(0); - while buckets.idx < cap { - *new_buckets.hash() = *buckets.hash(); - if *new_buckets.hash() != EMPTY_BUCKET { - let pair_ptr = buckets.pair(); - let kv = ((*pair_ptr).0.clone(), (*pair_ptr).1.clone()); - ptr::write(new_buckets.pair(), kv); - } - buckets.idx += 1; - new_buckets.idx += 1; - } - - new_ht.size = self.size(); - new_ht.set_tag(self.tag()); - - new_ht - } - } -} - -unsafe impl<#[may_dangle] K, #[may_dangle] V> Drop for RawTable { - fn drop(&mut self) { - if self.capacity() == 0 { - return; - } - - // This is done in reverse because we've likely partially taken - // some elements out with `.into_iter()` from the front. - // Check if the size is 0, so we don't do a useless scan when - // dropping empty tables such as on resize. - // Also avoid double drop of elements that have been already moved out. - unsafe { - if needs_drop::<(K, V)>() { - // avoid linear runtime for types that don't need drop - self.rev_drop_buckets(); - } - } - - let (layout, _) = calculate_layout::(self.capacity()) - .unwrap_or_else(|_| unsafe { hint::unreachable_unchecked() }); - unsafe { - Global.dealloc(NonNull::new_unchecked(self.hashes.ptr()).cast(), layout); - // Remember how everything was allocated out of one buffer - // during initialization? We only need one call to free here. - } - } -} From 9325451ec9a044d3baecc4224c65e23941cef150 Mon Sep 17 00:00:00 2001 From: Amanieu d'Antras Date: Fri, 22 Feb 2019 13:25:10 +0000 Subject: [PATCH 06/11] Fix test --- src/test/ui/issues/issue-21763.stderr | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/test/ui/issues/issue-21763.stderr b/src/test/ui/issues/issue-21763.stderr index 62cc88776882f..87c048fdf4c11 100644 --- a/src/test/ui/issues/issue-21763.stderr +++ b/src/test/ui/issues/issue-21763.stderr @@ -4,10 +4,10 @@ error[E0277]: `std::rc::Rc<()>` cannot be sent between threads safely LL | foo::, Rc<()>>>(); | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ `std::rc::Rc<()>` cannot be sent between threads safely | - = help: within `std::collections::HashMap, std::rc::Rc<()>>`, the trait `std::marker::Send` is not implemented for `std::rc::Rc<()>` + = help: within `(std::rc::Rc<()>, std::rc::Rc<()>)`, the trait `std::marker::Send` is not implemented for `std::rc::Rc<()>` = note: required because it appears within the type `(std::rc::Rc<()>, std::rc::Rc<()>)` - = note: required because it appears within the type `std::marker::PhantomData<(std::rc::Rc<()>, std::rc::Rc<()>)>` - = note: required because it appears within the type `std::collections::hash::table::RawTable, std::rc::Rc<()>>` + = note: required because of the requirements on the impl of `std::marker::Send` for `hashbrown::raw::RawTable<(std::rc::Rc<()>, std::rc::Rc<()>)>` + = note: required because it appears within the type `hashbrown::map::HashMap, std::rc::Rc<()>, std::collections::hash_map::RandomState>` = note: required because it appears within the type `std::collections::HashMap, std::rc::Rc<()>>` note: required by `foo` --> $DIR/issue-21763.rs:6:1 From e15bf96cb227aa84343785c603919de7c3369026 Mon Sep 17 00:00:00 2001 From: Amanieu d'Antras Date: Fri, 22 Feb 2019 19:11:48 +0000 Subject: [PATCH 07/11] Remove broken tests --- src/libstd/collections/hash/map.rs | 74 ------------------------------ 1 file changed, 74 deletions(-) diff --git a/src/libstd/collections/hash/map.rs b/src/libstd/collections/hash/map.rs index ecb08314f6d05..d1ba9c267b72e 100644 --- a/src/libstd/collections/hash/map.rs +++ b/src/libstd/collections/hash/map.rs @@ -3086,80 +3086,6 @@ mod test_map { assert_eq!(format!("{:?}", empty), "{}"); } - #[test] - fn test_expand() { - let mut m = HashMap::new(); - - assert_eq!(m.len(), 0); - assert!(m.is_empty()); - - let mut i = 0; - let old_raw_cap = m.raw_capacity(); - while old_raw_cap == m.raw_capacity() { - m.insert(i, i); - i += 1; - } - - assert_eq!(m.len(), i); - assert!(!m.is_empty()); - } - - #[test] - fn test_behavior_resize_policy() { - let mut m = HashMap::new(); - - assert_eq!(m.len(), 0); - assert_eq!(m.raw_capacity(), 1); - assert!(m.is_empty()); - - m.insert(0, 0); - m.remove(&0); - assert!(m.is_empty()); - let initial_raw_cap = m.raw_capacity(); - m.reserve(initial_raw_cap); - let raw_cap = m.raw_capacity(); - - assert_eq!(raw_cap, initial_raw_cap * 2); - - let mut i = 0; - for _ in 0..raw_cap * 3 / 4 { - m.insert(i, i); - i += 1; - } - // three quarters full - - assert_eq!(m.len(), i); - assert_eq!(m.raw_capacity(), raw_cap); - - for _ in 0..raw_cap / 4 { - m.insert(i, i); - i += 1; - } - // half full - - let new_raw_cap = m.raw_capacity(); - assert_eq!(new_raw_cap, raw_cap * 2); - - for _ in 0..raw_cap / 2 - 1 { - i -= 1; - m.remove(&i); - assert_eq!(m.raw_capacity(), new_raw_cap); - } - // A little more than one quarter full. - m.shrink_to_fit(); - assert_eq!(m.raw_capacity(), raw_cap); - // again, a little more than half full - for _ in 0..raw_cap / 2 { - i -= 1; - m.remove(&i); - } - m.shrink_to_fit(); - - assert_eq!(m.len(), i); - assert!(!m.is_empty()); - assert_eq!(m.raw_capacity(), initial_raw_cap); - } - #[test] fn test_reserve_shrink_to_fit() { let mut m = HashMap::new(); From 5253366e190e8423db7256f0013d9ed1b7215b1d Mon Sep 17 00:00:00 2001 From: Amanieu d'Antras Date: Sun, 14 Apr 2019 11:48:53 +0100 Subject: [PATCH 08/11] Update hashbrown to 0.2.1 --- Cargo.lock | 6 +++--- src/libstd/Cargo.toml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6c5951f08da2f..9f15d340af11e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1068,7 +1068,7 @@ dependencies = [ [[package]] name = "hashbrown" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "compiler_builtins 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", @@ -3277,7 +3277,7 @@ dependencies = [ "core 0.0.0", "dlmalloc 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", "fortanix-sgx-abi 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", - "hashbrown 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "hashbrown 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.2.51 (registry+https://github.com/rust-lang/crates.io-index)", "panic_abort 0.0.0", "panic_unwind 0.0.0", @@ -4121,7 +4121,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" "checksum globset 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "4743617a7464bbda3c8aec8558ff2f9429047e025771037df561d383337ff865" "checksum handlebars 0.32.4 (registry+https://github.com/rust-lang/crates.io-index)" = "d89ec99d1594f285d4590fc32bac5f75cdab383f1123d504d27862c644a807dd" "checksum handlebars 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d82e5750d8027a97b9640e3fefa66bbaf852a35228e1c90790efd13c4b09c166" -"checksum hashbrown 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "58623735fa622916205f9e0a52a031b25b0e251ddaef47f7cb288444c4410beb" +"checksum hashbrown 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d22f2163f3350b00b15b96da81d4ec3a8616983c010e0b69f6e4d060a2db9cd4" "checksum heck 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ea04fa3ead4e05e51a7c806fc07271fdbde4e246a6c6d1efd52e72230b771b82" "checksum hex 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "805026a5d0141ffc30abb3be3173848ad46a1b1664fe632428479619a3644d77" "checksum home 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "80dff82fb58cfbbc617fb9a9184b010be0529201553cda50ad04372bc2333aff" diff --git a/src/libstd/Cargo.toml b/src/libstd/Cargo.toml index 83aee507f32f2..b9cf868ce2038 100644 --- a/src/libstd/Cargo.toml +++ b/src/libstd/Cargo.toml @@ -22,7 +22,7 @@ libc = { version = "0.2.51", default-features = false, features = ['rustc-dep-of compiler_builtins = { version = "0.1.9" } profiler_builtins = { path = "../libprofiler_builtins", optional = true } unwind = { path = "../libunwind" } -hashbrown = { version = "0.2.0", features = ['rustc-dep-of-std'] } +hashbrown = { version = "0.2.1", features = ['rustc-dep-of-std'] } rustc-demangle = { version = "0.1.10", features = ['rustc-dep-of-std'] } backtrace-sys = { version = "0.1.24", features = ["rustc-dep-of-std"], optional = true } From 41c2f81d5765684e5062cc67d45e037d4daf8aea Mon Sep 17 00:00:00 2001 From: Amanieu d'Antras Date: Tue, 16 Apr 2019 10:44:38 +0100 Subject: [PATCH 09/11] Remove stabilized alloc feature from rustc-std-workspace-alloc --- src/tools/rustc-std-workspace-alloc/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/tools/rustc-std-workspace-alloc/lib.rs b/src/tools/rustc-std-workspace-alloc/lib.rs index 50294e6cbad5a..c38a8d2f204e4 100644 --- a/src/tools/rustc-std-workspace-alloc/lib.rs +++ b/src/tools/rustc-std-workspace-alloc/lib.rs @@ -1,4 +1,4 @@ -#![feature(no_core, alloc)] +#![feature(no_core)] #![no_core] // See rustc-std-workspace-core for why this crate is needed. From ae388773e1b8af991650f132b1d241cc403999af Mon Sep 17 00:00:00 2001 From: Amanieu d'Antras Date: Tue, 16 Apr 2019 13:47:20 +0100 Subject: [PATCH 10/11] Update hashbrown to 0.2.2 --- Cargo.lock | 6 +++--- src/libstd/Cargo.toml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9f15d340af11e..c594a0d47c765 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1068,7 +1068,7 @@ dependencies = [ [[package]] name = "hashbrown" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "compiler_builtins 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", @@ -3277,7 +3277,7 @@ dependencies = [ "core 0.0.0", "dlmalloc 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", "fortanix-sgx-abi 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", - "hashbrown 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "hashbrown 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.2.51 (registry+https://github.com/rust-lang/crates.io-index)", "panic_abort 0.0.0", "panic_unwind 0.0.0", @@ -4121,7 +4121,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" "checksum globset 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "4743617a7464bbda3c8aec8558ff2f9429047e025771037df561d383337ff865" "checksum handlebars 0.32.4 (registry+https://github.com/rust-lang/crates.io-index)" = "d89ec99d1594f285d4590fc32bac5f75cdab383f1123d504d27862c644a807dd" "checksum handlebars 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d82e5750d8027a97b9640e3fefa66bbaf852a35228e1c90790efd13c4b09c166" -"checksum hashbrown 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d22f2163f3350b00b15b96da81d4ec3a8616983c010e0b69f6e4d060a2db9cd4" +"checksum hashbrown 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "61e4900fa4e80b3d15c78a08ec8a08433246063fa7577e7b2c6426b3b21b1f79" "checksum heck 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ea04fa3ead4e05e51a7c806fc07271fdbde4e246a6c6d1efd52e72230b771b82" "checksum hex 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "805026a5d0141ffc30abb3be3173848ad46a1b1664fe632428479619a3644d77" "checksum home 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "80dff82fb58cfbbc617fb9a9184b010be0529201553cda50ad04372bc2333aff" diff --git a/src/libstd/Cargo.toml b/src/libstd/Cargo.toml index b9cf868ce2038..6154793684458 100644 --- a/src/libstd/Cargo.toml +++ b/src/libstd/Cargo.toml @@ -22,7 +22,7 @@ libc = { version = "0.2.51", default-features = false, features = ['rustc-dep-of compiler_builtins = { version = "0.1.9" } profiler_builtins = { path = "../libprofiler_builtins", optional = true } unwind = { path = "../libunwind" } -hashbrown = { version = "0.2.1", features = ['rustc-dep-of-std'] } +hashbrown = { version = "0.2.2", features = ['rustc-dep-of-std'] } rustc-demangle = { version = "0.1.10", features = ['rustc-dep-of-std'] } backtrace-sys = { version = "0.1.24", features = ["rustc-dep-of-std"], optional = true } From e7f162fdd8a5f45ee0c2869ee6a8afe7dba69248 Mon Sep 17 00:00:00 2001 From: Amanieu d'Antras Date: Tue, 23 Apr 2019 06:31:52 +0800 Subject: [PATCH 11/11] Update hashbrown to 0.3.0 --- Cargo.lock | 6 +++--- src/libstd/Cargo.toml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c594a0d47c765..59f2bd21026ba 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1068,7 +1068,7 @@ dependencies = [ [[package]] name = "hashbrown" -version = "0.2.2" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "compiler_builtins 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", @@ -3277,7 +3277,7 @@ dependencies = [ "core 0.0.0", "dlmalloc 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", "fortanix-sgx-abi 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", - "hashbrown 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", + "hashbrown 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.2.51 (registry+https://github.com/rust-lang/crates.io-index)", "panic_abort 0.0.0", "panic_unwind 0.0.0", @@ -4121,7 +4121,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" "checksum globset 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "4743617a7464bbda3c8aec8558ff2f9429047e025771037df561d383337ff865" "checksum handlebars 0.32.4 (registry+https://github.com/rust-lang/crates.io-index)" = "d89ec99d1594f285d4590fc32bac5f75cdab383f1123d504d27862c644a807dd" "checksum handlebars 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d82e5750d8027a97b9640e3fefa66bbaf852a35228e1c90790efd13c4b09c166" -"checksum hashbrown 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "61e4900fa4e80b3d15c78a08ec8a08433246063fa7577e7b2c6426b3b21b1f79" +"checksum hashbrown 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "570178d5e4952010d138b0f1d581271ff3a02406d990f887d1e87e3d6e43b0ac" "checksum heck 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ea04fa3ead4e05e51a7c806fc07271fdbde4e246a6c6d1efd52e72230b771b82" "checksum hex 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "805026a5d0141ffc30abb3be3173848ad46a1b1664fe632428479619a3644d77" "checksum home 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "80dff82fb58cfbbc617fb9a9184b010be0529201553cda50ad04372bc2333aff" diff --git a/src/libstd/Cargo.toml b/src/libstd/Cargo.toml index 6154793684458..ac1aff845d8c9 100644 --- a/src/libstd/Cargo.toml +++ b/src/libstd/Cargo.toml @@ -22,7 +22,7 @@ libc = { version = "0.2.51", default-features = false, features = ['rustc-dep-of compiler_builtins = { version = "0.1.9" } profiler_builtins = { path = "../libprofiler_builtins", optional = true } unwind = { path = "../libunwind" } -hashbrown = { version = "0.2.2", features = ['rustc-dep-of-std'] } +hashbrown = { version = "0.3.0", features = ['rustc-dep-of-std'] } rustc-demangle = { version = "0.1.10", features = ['rustc-dep-of-std'] } backtrace-sys = { version = "0.1.24", features = ["rustc-dep-of-std"], optional = true }