Switched some uses to UnordMap

This commit is contained in:
Andrew Xie 2023-05-07 19:52:19 -04:00
parent 2a96c6e517
commit 1be19f710c
7 changed files with 148 additions and 134 deletions

View file

@ -24,7 +24,7 @@
use crate::errors;
use rustc_ast as ast;
use rustc_data_structures::fx::FxIndexSet;
use rustc_data_structures::unord::UnordSet;
use rustc_hir::def_id::LOCAL_CRATE;
use rustc_middle::mir::mono::CodegenUnitNameBuilder;
use rustc_middle::ty::TyCtxt;
@ -52,7 +52,7 @@ pub fn assert_module_sources(tcx: TyCtxt<'_>) {
struct AssertModuleSource<'tcx> {
tcx: TyCtxt<'tcx>,
available_cgus: FxIndexSet<Symbol>,
available_cgus: UnordSet<Symbol>,
}
impl<'tcx> AssertModuleSource<'tcx> {
@ -118,9 +118,8 @@ impl<'tcx> AssertModuleSource<'tcx> {
debug!("mapping '{}' to cgu name '{}'", self.field(attr, sym::module), cgu_name);
if !self.available_cgus.contains(&cgu_name) {
let mut cgu_names: Vec<&str> =
self.available_cgus.iter().map(|cgu| cgu.as_str()).collect();
cgu_names.sort();
let cgu_names: Vec<String> =
self.available_cgus.items().map(|cgu| cgu.as_str().to_owned()).into_sorted(&());
self.tcx.sess.emit_err(errors::NoModuleNamed {
span: attr.span,
user_path,

View file

@ -21,7 +21,8 @@
use crate::errors;
use rustc_ast::{self as ast, Attribute, NestedMetaItem};
use rustc_data_structures::fx::{FxHashSet, FxIndexSet};
use rustc_data_structures::fx::FxHashSet;
use rustc_data_structures::unord::UnordSet;
use rustc_hir::def_id::LocalDefId;
use rustc_hir::intravisit;
use rustc_hir::Node as HirNode;
@ -125,7 +126,7 @@ const LABELS_ADT: &[&[&str]] = &[BASE_HIR, BASE_STRUCT];
//
// type_of for these.
type Labels = FxIndexSet<String>;
type Labels = UnordSet<String>;
/// Represents the requested configuration by rustc_clean/dirty
struct Assertion {
@ -197,7 +198,7 @@ impl<'tcx> DirtyCleanVisitor<'tcx> {
let (name, mut auto) = self.auto_labels(item_id, attr);
let except = self.except(attr);
let loaded_from_disk = self.loaded_from_disk(attr);
for e in except.iter() {
for e in except.to_sorted(&(), false) {
if !auto.remove(e) {
self.tcx.sess.emit_fatal(errors::AssertionAuto { span: attr.span, name, e });
}
@ -376,18 +377,21 @@ impl<'tcx> DirtyCleanVisitor<'tcx> {
continue;
};
self.checked_attrs.insert(attr.id);
for label in assertion.clean {
assertion.clean.items().all(|label| {
let dep_node = DepNode::from_label_string(self.tcx, &label, def_path_hash).unwrap();
self.assert_clean(item_span, dep_node);
}
for label in assertion.dirty {
true
});
assertion.dirty.items().all(|label| {
let dep_node = DepNode::from_label_string(self.tcx, &label, def_path_hash).unwrap();
self.assert_dirty(item_span, dep_node);
}
for label in assertion.loaded_from_disk {
true
});
assertion.loaded_from_disk.items().all(|label| {
let dep_node = DepNode::from_label_string(self.tcx, &label, def_path_hash).unwrap();
self.assert_loaded_from_disk(item_span, dep_node);
}
true
});
}
}
}

View file

@ -104,8 +104,9 @@
//! implemented.
use crate::errors;
use rustc_data_structures::fx::{FxHashSet, FxIndexMap, FxIndexSet};
use rustc_data_structures::fx::{FxHashSet, FxIndexSet};
use rustc_data_structures::svh::Svh;
use rustc_data_structures::unord::{UnordMap, UnordSet};
use rustc_data_structures::{base_n, flock};
use rustc_errors::ErrorGuaranteed;
use rustc_fs_util::{link_or_copy, try_canonicalize, LinkOrCopy};
@ -636,7 +637,7 @@ pub fn garbage_collect_session_directories(sess: &Session) -> io::Result<()> {
// First do a pass over the crate directory, collecting lock files and
// session directories
let mut session_directories = FxIndexSet::default();
let mut lock_files = FxIndexSet::default();
let mut lock_files = UnordSet::default();
for dir_entry in crate_directory.read_dir()? {
let Ok(dir_entry) = dir_entry else {
@ -659,9 +660,8 @@ pub fn garbage_collect_session_directories(sess: &Session) -> io::Result<()> {
}
// Now map from lock files to session directories
let lock_file_to_session_dir: FxIndexMap<String, Option<String>> = lock_files
.into_iter()
.map(|lock_file_name| {
let lock_file_to_session_dir: UnordMap<String, Option<String>> =
UnordMap::from(lock_files.into_items().map(|lock_file_name| {
assert!(lock_file_name.ends_with(LOCK_FILE_EXT));
let dir_prefix_end = lock_file_name.len() - LOCK_FILE_EXT.len();
let session_dir = {
@ -669,12 +669,11 @@ pub fn garbage_collect_session_directories(sess: &Session) -> io::Result<()> {
session_directories.iter().find(|dir_name| dir_name.starts_with(dir_prefix))
};
(lock_file_name, session_dir.map(String::clone))
})
.collect();
}));
// Delete all lock files, that don't have an associated directory. They must
// be some kind of leftover
for (lock_file_name, directory_name) in &lock_file_to_session_dir {
lock_file_to_session_dir.items().all(|(lock_file_name, directory_name)| {
if directory_name.is_none() {
let Ok(timestamp) = extract_timestamp_from_session_dir(lock_file_name) else {
debug!(
@ -682,7 +681,7 @@ pub fn garbage_collect_session_directories(sess: &Session) -> io::Result<()> {
crate_directory.join(&lock_file_name).display()
);
// Ignore it
continue;
return true;
};
let lock_file_path = crate_directory.join(&**lock_file_name);
@ -702,17 +701,18 @@ pub fn garbage_collect_session_directories(sess: &Session) -> io::Result<()> {
);
}
}
}
true
});
// Filter out `None` directories
let lock_file_to_session_dir: FxIndexMap<String, String> = lock_file_to_session_dir
.into_iter()
.filter_map(|(lock_file_name, directory_name)| directory_name.map(|n| (lock_file_name, n)))
.collect();
let lock_file_to_session_dir: UnordMap<String, String> =
UnordMap::from(lock_file_to_session_dir.into_items().filter_map(
|(lock_file_name, directory_name)| directory_name.map(|n| (lock_file_name, n)),
));
// Delete all session directories that don't have a lock file.
for directory_name in session_directories {
if !lock_file_to_session_dir.values().any(|dir| *dir == directory_name) {
if !lock_file_to_session_dir.items().any(|(_, dir)| *dir == directory_name) {
let path = crate_directory.join(directory_name);
if let Err(err) = safe_remove_dir_all(&path) {
sess.emit_warning(errors::InvalidGcFailed { path: &path, err });
@ -721,103 +721,103 @@ pub fn garbage_collect_session_directories(sess: &Session) -> io::Result<()> {
}
// Now garbage collect the valid session directories.
let mut deletion_candidates = vec![];
let deletion_candidates =
lock_file_to_session_dir.items().filter_map(|(lock_file_name, directory_name)| {
debug!("garbage_collect_session_directories() - inspecting: {}", directory_name);
for (lock_file_name, directory_name) in &lock_file_to_session_dir {
debug!("garbage_collect_session_directories() - inspecting: {}", directory_name);
let Ok(timestamp) = extract_timestamp_from_session_dir(directory_name) else {
let Ok(timestamp) = extract_timestamp_from_session_dir(directory_name) else {
debug!(
"found session-dir with malformed timestamp: {}",
crate_directory.join(directory_name).display()
);
// Ignore it
continue;
return None;
};
if is_finalized(directory_name) {
let lock_file_path = crate_directory.join(lock_file_name);
match flock::Lock::new(
&lock_file_path,
false, // don't wait
false, // don't create the lock-file
true,
) {
// get an exclusive lock
Ok(lock) => {
debug!(
"garbage_collect_session_directories() - \
if is_finalized(directory_name) {
let lock_file_path = crate_directory.join(lock_file_name);
match flock::Lock::new(
&lock_file_path,
false, // don't wait
false, // don't create the lock-file
true,
) {
// get an exclusive lock
Ok(lock) => {
debug!(
"garbage_collect_session_directories() - \
successfully acquired lock"
);
debug!(
"garbage_collect_session_directories() - adding \
);
debug!(
"garbage_collect_session_directories() - adding \
deletion candidate: {}",
directory_name
);
directory_name
);
// Note that we are holding on to the lock
deletion_candidates.push((
timestamp,
crate_directory.join(directory_name),
Some(lock),
));
}
Err(_) => {
debug!(
"garbage_collect_session_directories() - \
// Note that we are holding on to the lock
return Some((
(timestamp, crate_directory.join(directory_name)),
Some(lock),
));
}
Err(_) => {
debug!(
"garbage_collect_session_directories() - \
not collecting, still in use"
);
);
}
}
}
} else if is_old_enough_to_be_collected(timestamp) {
// When cleaning out "-working" session directories, i.e.
// session directories that might still be in use by another
// compiler instance, we only look a directories that are
// at least ten seconds old. This is supposed to reduce the
// chance of deleting a directory in the time window where
// the process has allocated the directory but has not yet
// acquired the file-lock on it.
} else if is_old_enough_to_be_collected(timestamp) {
// When cleaning out "-working" session directories, i.e.
// session directories that might still be in use by another
// compiler instance, we only look a directories that are
// at least ten seconds old. This is supposed to reduce the
// chance of deleting a directory in the time window where
// the process has allocated the directory but has not yet
// acquired the file-lock on it.
// Try to acquire the directory lock. If we can't, it
// means that the owning process is still alive and we
// leave this directory alone.
let lock_file_path = crate_directory.join(lock_file_name);
match flock::Lock::new(
&lock_file_path,
false, // don't wait
false, // don't create the lock-file
true,
) {
// get an exclusive lock
Ok(lock) => {
debug!(
"garbage_collect_session_directories() - \
// Try to acquire the directory lock. If we can't, it
// means that the owning process is still alive and we
// leave this directory alone.
let lock_file_path = crate_directory.join(lock_file_name);
match flock::Lock::new(
&lock_file_path,
false, // don't wait
false, // don't create the lock-file
true,
) {
// get an exclusive lock
Ok(lock) => {
debug!(
"garbage_collect_session_directories() - \
successfully acquired lock"
);
);
delete_old(sess, &crate_directory.join(directory_name));
delete_old(sess, &crate_directory.join(directory_name));
// Let's make it explicit that the file lock is released at this point,
// or rather, that we held on to it until here
drop(lock);
}
Err(_) => {
debug!(
"garbage_collect_session_directories() - \
// Let's make it explicit that the file lock is released at this point,
// or rather, that we held on to it until here
drop(lock);
}
Err(_) => {
debug!(
"garbage_collect_session_directories() - \
not collecting, still in use"
);
);
}
}
}
} else {
debug!(
"garbage_collect_session_directories() - not finalized, not \
} else {
debug!(
"garbage_collect_session_directories() - not finalized, not \
old enough"
);
}
}
);
}
None
});
let deletion_candidates = UnordMap::from(deletion_candidates);
// Delete all but the most recent of the candidates
for (path, lock) in all_except_most_recent(deletion_candidates) {
all_except_most_recent(deletion_candidates).into_items().all(|(path, lock)| {
debug!("garbage_collect_session_directories() - deleting `{}`", path.display());
if let Err(err) = safe_remove_dir_all(&path) {
@ -829,7 +829,8 @@ pub fn garbage_collect_session_directories(sess: &Session) -> io::Result<()> {
// Let's make it explicit that the file lock is released at this point,
// or rather, that we held on to it until here
drop(lock);
}
true
});
Ok(())
}
@ -845,18 +846,19 @@ fn delete_old(sess: &Session, path: &Path) {
}
fn all_except_most_recent(
deletion_candidates: Vec<(SystemTime, PathBuf, Option<flock::Lock>)>,
) -> FxIndexMap<PathBuf, Option<flock::Lock>> {
let most_recent = deletion_candidates.iter().map(|&(timestamp, ..)| timestamp).max();
deletion_candidates: UnordMap<(SystemTime, PathBuf), Option<flock::Lock>>,
) -> UnordMap<PathBuf, Option<flock::Lock>> {
let most_recent = deletion_candidates.items().map(|(&(timestamp, _), _)| timestamp).max();
if let Some(most_recent) = most_recent {
deletion_candidates
.into_iter()
.filter(|&(timestamp, ..)| timestamp != most_recent)
.map(|(_, path, lock)| (path, lock))
.collect()
UnordMap::from(
deletion_candidates
.into_items()
.filter(|&((timestamp, _), _)| timestamp != most_recent)
.map(|((_, path), lock)| (path, lock)),
)
} else {
FxIndexMap::default()
UnordMap::default()
}
}

View file

@ -2,26 +2,26 @@ use super::*;
#[test]
fn test_all_except_most_recent() {
let computed: UnordMap<_, Option<flock::Lock>> = UnordMap::from_iter([
((UNIX_EPOCH + Duration::new(4, 0), PathBuf::from("4")), None),
((UNIX_EPOCH + Duration::new(1, 0), PathBuf::from("1")), None),
((UNIX_EPOCH + Duration::new(5, 0), PathBuf::from("5")), None),
((UNIX_EPOCH + Duration::new(3, 0), PathBuf::from("3")), None),
((UNIX_EPOCH + Duration::new(2, 0), PathBuf::from("2")), None),
]);
let mut paths = UnordSet::default();
UnordSet::extend_unord(&mut paths, computed.into_items().map(|((_, path), _)| path));
assert_eq!(
all_except_most_recent(vec![
(UNIX_EPOCH + Duration::new(4, 0), PathBuf::from("4"), None),
(UNIX_EPOCH + Duration::new(1, 0), PathBuf::from("1"), None),
(UNIX_EPOCH + Duration::new(5, 0), PathBuf::from("5"), None),
(UNIX_EPOCH + Duration::new(3, 0), PathBuf::from("3"), None),
(UNIX_EPOCH + Duration::new(2, 0), PathBuf::from("2"), None),
UnordSet::from(paths),
UnordSet::from_iter([
PathBuf::from("1"),
PathBuf::from("2"),
PathBuf::from("3"),
PathBuf::from("4")
])
.keys()
.cloned()
.collect::<FxHashSet<PathBuf>>(),
[PathBuf::from("1"), PathBuf::from("2"), PathBuf::from("3"), PathBuf::from("4"),]
.into_iter()
.collect::<FxHashSet<PathBuf>>()
);
assert_eq!(
all_except_most_recent(vec![]).keys().cloned().collect::<FxHashSet<PathBuf>>(),
FxHashSet::default()
);
assert!(all_except_most_recent(UnordMap::default()).is_empty());
}
#[test]

View file

@ -1,8 +1,8 @@
//! Code to save/load the dep-graph from files.
use crate::errors;
use rustc_data_structures::fx::FxIndexMap;
use rustc_data_structures::memmap::Mmap;
use rustc_data_structures::unord::UnordMap;
use rustc_middle::dep_graph::{SerializedDepGraph, WorkProduct, WorkProductId};
use rustc_middle::query::on_disk_cache::OnDiskCache;
use rustc_serialize::opaque::MemDecoder;
@ -16,7 +16,7 @@ use super::file_format;
use super::fs::*;
use super::work_product;
type WorkProductMap = FxIndexMap<WorkProductId, WorkProduct>;
type WorkProductMap = UnordMap<WorkProductId, WorkProduct>;
#[derive(Debug)]
/// Represents the result of an attempt to load incremental compilation data.

View file

@ -5,6 +5,7 @@ use crate::passes;
use rustc_ast as ast;
use rustc_codegen_ssa::traits::CodegenBackend;
use rustc_codegen_ssa::CodegenResults;
use rustc_data_structures::fx::FxIndexMap;
use rustc_data_structures::steal::Steal;
use rustc_data_structures::svh::Svh;
use rustc_data_structures::sync::{AppendOnlyIndexVec, Lrc, OnceCell, RwLock, WorkerLocal};
@ -195,7 +196,8 @@ impl<'tcx> Queries<'tcx> {
.and_then(|future| {
let (prev_graph, prev_work_products) =
sess.time("blocked_on_dep_graph_loading", || future.open().open(sess));
let prev_work_products =
FxIndexMap::from_iter(prev_work_products.into_sorted(&(), false));
rustc_incremental::build_dep_graph(sess, prev_graph, prev_work_products)
})
.unwrap_or_else(DepGraph::new_disabled);

View file

@ -46,7 +46,7 @@ use super::{DepContext, DepKind, FingerprintStyle};
use crate::ich::StableHashingContext;
use rustc_data_structures::fingerprint::{Fingerprint, PackedFingerprint};
use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
use rustc_data_structures::stable_hasher::{HashStable, StableHasher, ToStableHashKey};
use rustc_hir::definitions::DefPathHash;
use std::fmt;
use std::hash::Hash;
@ -247,3 +247,10 @@ impl<HCX> HashStable<HCX> for WorkProductId {
self.hash.hash_stable(hcx, hasher)
}
}
impl<HCX> ToStableHashKey<HCX> for WorkProductId {
type KeyType = Fingerprint;
#[inline]
fn to_stable_hash_key(&self, _: &HCX) -> Self::KeyType {
self.hash
}
}