Auto merge of #70052 - Amanieu:hashbrown7, r=Mark-Simulacrum

Update hashbrown to 0.8.1

This update includes:
- https://github.com/rust-lang/hashbrown/pull/146, which improves the performance of `Clone` and implements `clone_from`.
- https://github.com/rust-lang/hashbrown/pull/159, which reduces the size of `HashMap` by 8 bytes.
- https://github.com/rust-lang/hashbrown/pull/162, which avoids creating small 1-element tables.

Fixes #28481
This commit is contained in:
bors 2020-08-07 08:36:15 +00:00
commit 8b26609481
7 changed files with 47 additions and 66 deletions

View file

@ -137,12 +137,6 @@ dependencies = [
"winapi 0.3.8",
]
[[package]]
name = "autocfg"
version = "0.1.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1d49d90015b3c36167a20fe2810c5cd875ad504b39cff3d4eae7977e6b7c1cb2"
[[package]]
name = "autocfg"
version = "1.0.0"
@ -766,7 +760,7 @@ version = "0.7.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c3c7c73a2d1e9fc0886a08b93e98eb643461230d5f1925e4036204d5f2e261a8"
dependencies = [
"autocfg 1.0.0",
"autocfg",
"cfg-if",
"lazy_static",
]
@ -1245,11 +1239,11 @@ dependencies = [
[[package]]
name = "hashbrown"
version = "0.6.2"
version = "0.8.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3cd9867f119b19fecb08cd5c326ad4488d7a1da4bf75b4d95d71db742525aaab"
checksum = "34f595585f103464d8d2f6e9864682d74c1601fed5e07d62b1c9058dba8246fb"
dependencies = [
"autocfg 0.1.7",
"autocfg",
"compiler_builtins",
"rustc-std-workspace-alloc",
"rustc-std-workspace-core",
@ -2079,7 +2073,7 @@ version = "0.9.54"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1024c0a59774200a555087a6da3f253a9095a5f344e353b212ac4c8b8e450986"
dependencies = [
"autocfg 1.0.0",
"autocfg",
"cc",
"libc",
"openssl-src",

View file

@ -20,7 +20,7 @@ libc = { version = "0.2.51", default-features = false, features = ['rustc-dep-of
compiler_builtins = { version = "0.1.32" }
profiler_builtins = { path = "../profiler_builtins", optional = true }
unwind = { path = "../unwind" }
hashbrown = { version = "0.6.2", default-features = false, features = ['rustc-dep-of-std'] }
hashbrown = { version = "0.8.1", default-features = false, features = ['rustc-dep-of-std'] }
# Dependencies of the `backtrace` crate
addr2line = { version = "0.13.0", optional = true, default-features = false }

View file

@ -580,7 +580,7 @@ where
#[inline]
#[unstable(feature = "try_reserve", reason = "new API", issue = "48043")]
pub fn try_reserve(&mut self, additional: usize) -> Result<(), TryReserveError> {
self.base.try_reserve(additional).map_err(map_collection_alloc_err)
self.base.try_reserve(additional).map_err(map_try_reserve_error)
}
/// Shrinks the capacity of the map as much as possible. It will drop
@ -2569,10 +2569,10 @@ fn map_entry<'a, K: 'a, V: 'a>(raw: base::RustcEntry<'a, K, V>) -> Entry<'a, K,
}
#[inline]
fn map_collection_alloc_err(err: hashbrown::CollectionAllocErr) -> TryReserveError {
fn map_try_reserve_error(err: hashbrown::TryReserveError) -> TryReserveError {
match err {
hashbrown::CollectionAllocErr::CapacityOverflow => TryReserveError::CapacityOverflow,
hashbrown::CollectionAllocErr::AllocErr { layout } => {
hashbrown::TryReserveError::CapacityOverflow => TryReserveError::CapacityOverflow,
hashbrown::TryReserveError::AllocError { layout } => {
TryReserveError::AllocError { layout, non_exhaustive: () }
}
}

View file

@ -352,8 +352,13 @@ class StdHashMapProvider:
ctrl = table["ctrl"]["pointer"]
self.size = int(table["items"])
self.pair_type = table.type.template_argument(0)
self.new_layout = not table.type.has_key("data")
if self.new_layout:
self.data_ptr = ctrl.cast(self.pair_type.pointer())
else:
self.data_ptr = table["data"]["pointer"]
self.pair_type = self.data_ptr.dereference().type
self.valid_indices = []
for idx in range(capacity):
@ -374,6 +379,8 @@ class StdHashMapProvider:
for index in range(self.size):
idx = self.valid_indices[index]
if self.new_layout:
idx = -(idx + 1)
element = (pairs_start + idx).dereference()
if self.show_values:
yield "key{}".format(index), element[ZERO_FIELD]

View file

@ -514,6 +514,8 @@ class StdHashMapSyntheticProvider:
# type: (int) -> SBValue
pairs_start = self.data_ptr.GetValueAsUnsigned()
idx = self.valid_indices[index]
if self.new_layout:
idx = -(idx + 1)
address = pairs_start + idx * self.pair_type_size
element = self.data_ptr.CreateValueFromAddress("[%s]" % index, address, self.pair_type)
if self.show_values:
@ -529,10 +531,15 @@ class StdHashMapSyntheticProvider:
ctrl = table.GetChildMemberWithName("ctrl").GetChildAtIndex(0)
self.size = table.GetChildMemberWithName("items").GetValueAsUnsigned()
self.data_ptr = table.GetChildMemberWithName("data").GetChildAtIndex(0)
self.pair_type = self.data_ptr.Dereference().GetType()
self.pair_type = table.type.template_args[0]
self.pair_type_size = self.pair_type.GetByteSize()
self.new_layout = not table.GetChildMemberWithName("data").IsValid()
if self.new_layout:
self.data_ptr = ctrl.Cast(self.pair_type.GetPointerType())
else:
self.data_ptr = table.GetChildMemberWithName("data").GetChildAtIndex(0)
u8_type = self.valobj.GetTarget().GetBasicType(eBasicTypeUnsignedChar)
u8_type_size = self.valobj.GetTarget().GetBasicType(eBasicTypeUnsignedChar).GetByteSize()

View file

@ -30,6 +30,7 @@
<Expand>
<Item Name="[size]">base.table.items</Item>
<Item Name="[capacity]">base.table.items + base.table.growth_left</Item>
<Item Name="[state]">base.hash_builder</Item>
<CustomListItems>
<Variable Name="i" InitialValue="0" />
@ -40,7 +41,7 @@
<If Condition="(base.table.ctrl.pointer[i] &amp; 0x80) == 0">
<!-- Bucket is populated -->
<Exec>n--</Exec>
<Item Name="{base.table.data.pointer[i].__0}">base.table.data.pointer[i].__1</Item>
<Item Name="{static_cast&lt;tuple&lt;$T1, $T2&gt;*&gt;(base.table.ctrl.pointer)[-(i + 1)].__0}">static_cast&lt;tuple&lt;$T1, $T2&gt;*&gt;(base.table.ctrl.pointer)[-(i + 1)].__1</Item>
</If>
<Exec>i++</Exec>
</Loop>
@ -53,6 +54,7 @@
<Expand>
<Item Name="[size]">map.base.table.items</Item>
<Item Name="[capacity]">map.base.table.items + map.base.table.growth_left</Item>
<Item Name="[state]">map.base.hash_builder</Item>
<CustomListItems>
<Variable Name="i" InitialValue="0" />
@ -63,36 +65,7 @@
<If Condition="(map.base.table.ctrl.pointer[i] &amp; 0x80) == 0">
<!-- Bucket is populated -->
<Exec>n--</Exec>
<Item>map.base.table.data.pointer[i].__0</Item>
</If>
<Exec>i++</Exec>
</Loop>
</CustomListItems>
</Expand>
</Type>
<Type Name="hashbrown::raw::RawTable&lt;*&gt;">
<!-- RawTable has a nice and simple layout.
items Number of *populated* values in the RawTable (less than the size of ctrl.pointer / data.pointer)
growth_left Remaining capacity before growth
ctrl.pointer[i] & 0x80 Indicates the bucket is empty / should be skipped / doesn't count towards items.
data.pointer[i] The (K,V) tuple, if not empty.
-->
<DisplayString>{{ size={items} }}</DisplayString>
<Expand>
<Item Name="[size]">items</Item>
<Item Name="[capacity]">items + growth_left</Item>
<CustomListItems>
<Variable Name="i" InitialValue="0" />
<Variable Name="n" InitialValue="items" />
<Size>items</Size>
<Loop>
<Break Condition="n == 0" />
<If Condition="(ctrl.pointer[i] &amp; 0x80) == 0">
<!-- Bucket is populated -->
<Exec>n--</Exec>
<Item>data.pointer[i]</Item>
<Item>static_cast&lt;$T1*&gt;(map.base.table.ctrl.pointer)[-(i + 1)]</Item>
</If>
<Exec>i++</Exec>
</Loop>

View file

@ -9,35 +9,35 @@
// cdb-check:hash_set,d [...] : { size=15 } [Type: [...]::HashSet<u64, [...]>]
// cdb-check: [size] : 15 [Type: [...]]
// cdb-check: [capacity] : [...]
// cdb-check: [[...]] [...] : 0 [Type: unsigned __int64]
// cdb-check: [[...]] [...] : 0 [Type: u64]
// cdb-command: dx hash_set,d
// cdb-check: [[...]] [...] : 1 [Type: unsigned __int64]
// cdb-check: [[...]] [...] : 1 [Type: u64]
// cdb-command: dx hash_set,d
// cdb-check: [[...]] [...] : 2 [Type: unsigned __int64]
// cdb-check: [[...]] [...] : 2 [Type: u64]
// cdb-command: dx hash_set,d
// cdb-check: [[...]] [...] : 3 [Type: unsigned __int64]
// cdb-check: [[...]] [...] : 3 [Type: u64]
// cdb-command: dx hash_set,d
// cdb-check: [[...]] [...] : 4 [Type: unsigned __int64]
// cdb-check: [[...]] [...] : 4 [Type: u64]
// cdb-command: dx hash_set,d
// cdb-check: [[...]] [...] : 5 [Type: unsigned __int64]
// cdb-check: [[...]] [...] : 5 [Type: u64]
// cdb-command: dx hash_set,d
// cdb-check: [[...]] [...] : 6 [Type: unsigned __int64]
// cdb-check: [[...]] [...] : 6 [Type: u64]
// cdb-command: dx hash_set,d
// cdb-check: [[...]] [...] : 7 [Type: unsigned __int64]
// cdb-check: [[...]] [...] : 7 [Type: u64]
// cdb-command: dx hash_set,d
// cdb-check: [[...]] [...] : 8 [Type: unsigned __int64]
// cdb-check: [[...]] [...] : 8 [Type: u64]
// cdb-command: dx hash_set,d
// cdb-check: [[...]] [...] : 9 [Type: unsigned __int64]
// cdb-check: [[...]] [...] : 9 [Type: u64]
// cdb-command: dx hash_set,d
// cdb-check: [[...]] [...] : 10 [Type: unsigned __int64]
// cdb-check: [[...]] [...] : 10 [Type: u64]
// cdb-command: dx hash_set,d
// cdb-check: [[...]] [...] : 11 [Type: unsigned __int64]
// cdb-check: [[...]] [...] : 11 [Type: u64]
// cdb-command: dx hash_set,d
// cdb-check: [[...]] [...] : 12 [Type: unsigned __int64]
// cdb-check: [[...]] [...] : 12 [Type: u64]
// cdb-command: dx hash_set,d
// cdb-check: [[...]] [...] : 13 [Type: unsigned __int64]
// cdb-check: [[...]] [...] : 13 [Type: u64]
// cdb-command: dx hash_set,d
// cdb-check: [[...]] [...] : 14 [Type: unsigned __int64]
// cdb-check: [[...]] [...] : 14 [Type: u64]
// cdb-command: dx hash_map,d
// cdb-check:hash_map,d [...] : { size=15 } [Type: [...]::HashMap<u64, u64, [...]>]