Optimize lock_shards
This commit is contained in:
parent
b74cb78d63
commit
f458b112f8
4 changed files with 42 additions and 34 deletions
|
@ -2,9 +2,12 @@ use crate::fx::{FxHashMap, FxHasher};
|
|||
#[cfg(parallel_compiler)]
|
||||
use crate::sync::{is_dyn_thread_safe, CacheAligned};
|
||||
use crate::sync::{Lock, LockGuard};
|
||||
#[cfg(parallel_compiler)]
|
||||
use itertools::Either;
|
||||
use std::borrow::Borrow;
|
||||
use std::collections::hash_map::RawEntryMut;
|
||||
use std::hash::{Hash, Hasher};
|
||||
use std::iter;
|
||||
use std::mem;
|
||||
|
||||
// 32 shards is sufficient to reduce contention on an 8-core Ryzen 7 1700,
|
||||
|
@ -70,19 +73,27 @@ impl<T> Sharded<T> {
|
|||
}
|
||||
}
|
||||
|
||||
pub fn lock_shards(&self) -> Vec<LockGuard<'_, T>> {
|
||||
#[inline]
|
||||
pub fn lock_shards(&self) -> impl Iterator<Item = LockGuard<'_, T>> {
|
||||
match self {
|
||||
Self::Single(single) => vec![single.lock()],
|
||||
#[cfg(not(parallel_compiler))]
|
||||
Self::Single(single) => iter::once(single.lock()),
|
||||
#[cfg(parallel_compiler)]
|
||||
Self::Shards(shards) => shards.iter().map(|shard| shard.0.lock()).collect(),
|
||||
Self::Single(single) => Either::Left(iter::once(single.lock())),
|
||||
#[cfg(parallel_compiler)]
|
||||
Self::Shards(shards) => Either::Right(shards.iter().map(|shard| shard.0.lock())),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn try_lock_shards(&self) -> Option<Vec<LockGuard<'_, T>>> {
|
||||
#[inline]
|
||||
pub fn try_lock_shards(&self) -> impl Iterator<Item = Option<LockGuard<'_, T>>> {
|
||||
match self {
|
||||
Self::Single(single) => Some(vec![single.try_lock()?]),
|
||||
#[cfg(not(parallel_compiler))]
|
||||
Self::Single(single) => iter::once(single.try_lock()),
|
||||
#[cfg(parallel_compiler)]
|
||||
Self::Shards(shards) => shards.iter().map(|shard| shard.0.try_lock()).collect(),
|
||||
Self::Single(single) => Either::Left(iter::once(single.try_lock())),
|
||||
#[cfg(parallel_compiler)]
|
||||
Self::Shards(shards) => Either::Right(shards.iter().map(|shard| shard.0.try_lock())),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -101,7 +112,7 @@ pub type ShardedHashMap<K, V> = Sharded<FxHashMap<K, V>>;
|
|||
|
||||
impl<K: Eq, V> ShardedHashMap<K, V> {
|
||||
pub fn len(&self) -> usize {
|
||||
self.lock_shards().iter().map(|shard| shard.len()).sum()
|
||||
self.lock_shards().map(|shard| shard.len()).sum()
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1296,25 +1296,26 @@ macro_rules! sty_debug_print {
|
|||
};
|
||||
$(let mut $variant = total;)*
|
||||
|
||||
let shards = tcx.interners.type_.lock_shards();
|
||||
let types = shards.iter().flat_map(|shard| shard.keys());
|
||||
for &InternedInSet(t) in types {
|
||||
let variant = match t.internee {
|
||||
ty::Bool | ty::Char | ty::Int(..) | ty::Uint(..) |
|
||||
ty::Float(..) | ty::Str | ty::Never => continue,
|
||||
ty::Error(_) => /* unimportant */ continue,
|
||||
$(ty::$variant(..) => &mut $variant,)*
|
||||
};
|
||||
let lt = t.flags.intersects(ty::TypeFlags::HAS_RE_INFER);
|
||||
let ty = t.flags.intersects(ty::TypeFlags::HAS_TY_INFER);
|
||||
let ct = t.flags.intersects(ty::TypeFlags::HAS_CT_INFER);
|
||||
for shard in tcx.interners.type_.lock_shards() {
|
||||
let types = shard.keys();
|
||||
for &InternedInSet(t) in types {
|
||||
let variant = match t.internee {
|
||||
ty::Bool | ty::Char | ty::Int(..) | ty::Uint(..) |
|
||||
ty::Float(..) | ty::Str | ty::Never => continue,
|
||||
ty::Error(_) => /* unimportant */ continue,
|
||||
$(ty::$variant(..) => &mut $variant,)*
|
||||
};
|
||||
let lt = t.flags.intersects(ty::TypeFlags::HAS_RE_INFER);
|
||||
let ty = t.flags.intersects(ty::TypeFlags::HAS_TY_INFER);
|
||||
let ct = t.flags.intersects(ty::TypeFlags::HAS_CT_INFER);
|
||||
|
||||
variant.total += 1;
|
||||
total.total += 1;
|
||||
if lt { total.lt_infer += 1; variant.lt_infer += 1 }
|
||||
if ty { total.ty_infer += 1; variant.ty_infer += 1 }
|
||||
if ct { total.ct_infer += 1; variant.ct_infer += 1 }
|
||||
if lt && ty && ct { total.all_infer += 1; variant.all_infer += 1 }
|
||||
variant.total += 1;
|
||||
total.total += 1;
|
||||
if lt { total.lt_infer += 1; variant.lt_infer += 1 }
|
||||
if ty { total.ty_infer += 1; variant.ty_infer += 1 }
|
||||
if ct { total.ct_infer += 1; variant.ct_infer += 1 }
|
||||
if lt && ty && ct { total.all_infer += 1; variant.all_infer += 1 }
|
||||
}
|
||||
}
|
||||
writeln!(fmt, "Ty interner total ty lt ct all")?;
|
||||
$(writeln!(fmt, " {:18}: {uses:6} {usespc:4.1}%, \
|
||||
|
|
|
@ -70,8 +70,7 @@ where
|
|||
}
|
||||
|
||||
fn iter(&self, f: &mut dyn FnMut(&Self::Key, &Self::Value, DepNodeIndex)) {
|
||||
let shards = self.cache.lock_shards();
|
||||
for shard in shards.iter() {
|
||||
for shard in self.cache.lock_shards() {
|
||||
for (k, v) in shard.iter() {
|
||||
f(k, &v.0, v.1);
|
||||
}
|
||||
|
@ -160,8 +159,7 @@ where
|
|||
}
|
||||
|
||||
fn iter(&self, f: &mut dyn FnMut(&Self::Key, &Self::Value, DepNodeIndex)) {
|
||||
let shards = self.cache.lock_shards();
|
||||
for shard in shards.iter() {
|
||||
for shard in self.cache.lock_shards() {
|
||||
for (k, v) in shard.iter_enumerated() {
|
||||
if let Some(v) = v {
|
||||
f(&k, &v.0, v.1);
|
||||
|
|
|
@ -50,8 +50,7 @@ where
|
|||
D: DepKind,
|
||||
{
|
||||
pub fn all_inactive(&self) -> bool {
|
||||
let shards = self.active.lock_shards();
|
||||
shards.iter().all(|shard| shard.is_empty())
|
||||
self.active.lock_shards().all(|shard| shard.is_empty())
|
||||
}
|
||||
|
||||
pub fn try_collect_active_jobs<Qcx: Copy>(
|
||||
|
@ -64,9 +63,8 @@ where
|
|||
|
||||
// We use try_lock_shards here since we are called from the
|
||||
// deadlock handler, and this shouldn't be locked.
|
||||
let shards = self.active.try_lock_shards()?;
|
||||
for shard in shards.iter() {
|
||||
for (k, v) in shard.iter() {
|
||||
for shard in self.active.try_lock_shards() {
|
||||
for (k, v) in shard?.iter() {
|
||||
if let QueryResult::Started(ref job) = *v {
|
||||
active.push((*k, job.clone()));
|
||||
}
|
||||
|
|
Loading…
Add table
Reference in a new issue