Stream the dep-graph to a file.
This commit is contained in:
parent
16156fb278
commit
6bfaf3a9cb
18 changed files with 710 additions and 918 deletions
|
@ -40,8 +40,9 @@ use rustc_graphviz as dot;
|
|||
use rustc_hir as hir;
|
||||
use rustc_hir::def_id::DefId;
|
||||
use rustc_hir::intravisit::{self, NestedVisitorMap, Visitor};
|
||||
use rustc_middle::dep_graph::debug::{DepNodeFilter, EdgeFilter};
|
||||
use rustc_middle::dep_graph::{DepGraphQuery, DepKind, DepNode, DepNodeExt};
|
||||
use rustc_middle::dep_graph::{
|
||||
DepGraphQuery, DepKind, DepNode, DepNodeExt, DepNodeFilter, EdgeFilter,
|
||||
};
|
||||
use rustc_middle::hir::map::Map;
|
||||
use rustc_middle::ty::TyCtxt;
|
||||
use rustc_span::symbol::{sym, Symbol};
|
||||
|
@ -54,7 +55,7 @@ use std::io::{BufWriter, Write};
|
|||
pub fn assert_dep_graph(tcx: TyCtxt<'_>) {
|
||||
tcx.dep_graph.with_ignore(|| {
|
||||
if tcx.sess.opts.debugging_opts.dump_dep_graph {
|
||||
dump_graph(tcx);
|
||||
tcx.dep_graph.with_query(dump_graph);
|
||||
}
|
||||
|
||||
if !tcx.sess.opts.debugging_opts.query_dep_graph {
|
||||
|
@ -200,29 +201,29 @@ fn check_paths<'tcx>(tcx: TyCtxt<'tcx>, if_this_changed: &Sources, then_this_wou
|
|||
}
|
||||
return;
|
||||
}
|
||||
let query = tcx.dep_graph.query();
|
||||
for &(_, source_def_id, ref source_dep_node) in if_this_changed {
|
||||
let dependents = query.transitive_predecessors(source_dep_node);
|
||||
for &(target_span, ref target_pass, _, ref target_dep_node) in then_this_would_need {
|
||||
if !dependents.contains(&target_dep_node) {
|
||||
tcx.sess.span_err(
|
||||
target_span,
|
||||
&format!(
|
||||
"no path from `{}` to `{}`",
|
||||
tcx.def_path_str(source_def_id),
|
||||
target_pass
|
||||
),
|
||||
);
|
||||
} else {
|
||||
tcx.sess.span_err(target_span, "OK");
|
||||
tcx.dep_graph.with_query(|query| {
|
||||
for &(_, source_def_id, ref source_dep_node) in if_this_changed {
|
||||
let dependents = query.transitive_predecessors(source_dep_node);
|
||||
for &(target_span, ref target_pass, _, ref target_dep_node) in then_this_would_need {
|
||||
if !dependents.contains(&target_dep_node) {
|
||||
tcx.sess.span_err(
|
||||
target_span,
|
||||
&format!(
|
||||
"no path from `{}` to `{}`",
|
||||
tcx.def_path_str(source_def_id),
|
||||
target_pass
|
||||
),
|
||||
);
|
||||
} else {
|
||||
tcx.sess.span_err(target_span, "OK");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
fn dump_graph(tcx: TyCtxt<'_>) {
|
||||
fn dump_graph(query: &DepGraphQuery) {
|
||||
let path: String = env::var("RUST_DEP_GRAPH").unwrap_or_else(|_| "dep_graph".to_string());
|
||||
let query = tcx.dep_graph.query();
|
||||
|
||||
let nodes = match env::var("RUST_DEP_GRAPH_FILTER") {
|
||||
Ok(string) => {
|
||||
|
|
|
@ -14,7 +14,7 @@ mod assert_dep_graph;
|
|||
pub mod assert_module_sources;
|
||||
mod persist;
|
||||
|
||||
pub use assert_dep_graph::assert_dep_graph;
|
||||
use assert_dep_graph::assert_dep_graph;
|
||||
pub use persist::copy_cgu_workproduct_to_incr_comp_cache_dir;
|
||||
pub use persist::delete_workproduct_files;
|
||||
pub use persist::finalize_session_directory;
|
||||
|
@ -26,4 +26,4 @@ pub use persist::prepare_session_directory;
|
|||
pub use persist::save_dep_graph;
|
||||
pub use persist::save_work_product_index;
|
||||
pub use persist::LoadResult;
|
||||
pub use persist::{load_dep_graph, DepGraphFuture};
|
||||
pub use persist::{build_dep_graph, load_dep_graph, DepGraphFuture};
|
||||
|
|
|
@ -14,7 +14,6 @@
|
|||
//! the required condition is not met.
|
||||
|
||||
use rustc_ast::{self as ast, Attribute, NestedMetaItem};
|
||||
use rustc_data_structures::fingerprint::Fingerprint;
|
||||
use rustc_data_structures::fx::FxHashSet;
|
||||
use rustc_hir as hir;
|
||||
use rustc_hir::def_id::{DefId, LocalDefId};
|
||||
|
@ -381,10 +380,7 @@ impl DirtyCleanVisitor<'tcx> {
|
|||
fn assert_dirty(&self, item_span: Span, dep_node: DepNode) {
|
||||
debug!("assert_dirty({:?})", dep_node);
|
||||
|
||||
let current_fingerprint = self.get_fingerprint(&dep_node);
|
||||
let prev_fingerprint = self.tcx.dep_graph.prev_fingerprint_of(&dep_node);
|
||||
|
||||
if current_fingerprint == prev_fingerprint {
|
||||
if self.tcx.dep_graph.is_green(&dep_node) {
|
||||
let dep_node_str = self.dep_node_str(&dep_node);
|
||||
self.tcx
|
||||
.sess
|
||||
|
@ -392,28 +388,12 @@ impl DirtyCleanVisitor<'tcx> {
|
|||
}
|
||||
}
|
||||
|
||||
fn get_fingerprint(&self, dep_node: &DepNode) -> Option<Fingerprint> {
|
||||
if self.tcx.dep_graph.dep_node_exists(dep_node) {
|
||||
let dep_node_index = self.tcx.dep_graph.dep_node_index_of(dep_node);
|
||||
Some(self.tcx.dep_graph.fingerprint_of(dep_node_index))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
fn assert_clean(&self, item_span: Span, dep_node: DepNode) {
|
||||
debug!("assert_clean({:?})", dep_node);
|
||||
|
||||
let current_fingerprint = self.get_fingerprint(&dep_node);
|
||||
let prev_fingerprint = self.tcx.dep_graph.prev_fingerprint_of(&dep_node);
|
||||
|
||||
// if the node wasn't previously evaluated and now is (or vice versa),
|
||||
// then the node isn't actually clean or dirty.
|
||||
if (current_fingerprint == None) ^ (prev_fingerprint == None) {
|
||||
return;
|
||||
}
|
||||
|
||||
if current_fingerprint != prev_fingerprint {
|
||||
if self.tcx.dep_graph.is_red(&dep_node) {
|
||||
let dep_node_str = self.dep_node_str(&dep_node);
|
||||
self.tcx
|
||||
.sess
|
||||
|
|
|
@ -122,6 +122,7 @@ mod tests;
|
|||
|
||||
const LOCK_FILE_EXT: &str = ".lock";
|
||||
const DEP_GRAPH_FILENAME: &str = "dep-graph.bin";
|
||||
const STAGING_DEP_GRAPH_FILENAME: &str = "dep-graph.part.bin";
|
||||
const WORK_PRODUCTS_FILENAME: &str = "work-products.bin";
|
||||
const QUERY_CACHE_FILENAME: &str = "query-cache.bin";
|
||||
|
||||
|
@ -134,6 +135,9 @@ const INT_ENCODE_BASE: usize = base_n::CASE_INSENSITIVE;
|
|||
pub fn dep_graph_path(sess: &Session) -> PathBuf {
|
||||
in_incr_comp_dir_sess(sess, DEP_GRAPH_FILENAME)
|
||||
}
|
||||
pub fn staging_dep_graph_path(sess: &Session) -> PathBuf {
|
||||
in_incr_comp_dir_sess(sess, STAGING_DEP_GRAPH_FILENAME)
|
||||
}
|
||||
pub fn dep_graph_path_from(incr_comp_session_dir: &Path) -> PathBuf {
|
||||
in_incr_comp_dir(incr_comp_session_dir, DEP_GRAPH_FILENAME)
|
||||
}
|
||||
|
|
|
@ -5,7 +5,7 @@ use rustc_hir::definitions::Definitions;
|
|||
use rustc_middle::dep_graph::{PreviousDepGraph, SerializedDepGraph, WorkProduct, WorkProductId};
|
||||
use rustc_middle::ty::query::OnDiskCache;
|
||||
use rustc_serialize::opaque::Decoder;
|
||||
use rustc_serialize::Decodable as RustcDecodable;
|
||||
use rustc_serialize::Decodable;
|
||||
use rustc_session::Session;
|
||||
use std::path::Path;
|
||||
|
||||
|
@ -120,7 +120,7 @@ pub fn load_dep_graph(sess: &Session) -> DepGraphFuture {
|
|||
// Decode the list of work_products
|
||||
let mut work_product_decoder = Decoder::new(&work_products_data[..], start_pos);
|
||||
let work_products: Vec<SerializedWorkProduct> =
|
||||
RustcDecodable::decode(&mut work_product_decoder).unwrap_or_else(|e| {
|
||||
Decodable::decode(&mut work_product_decoder).unwrap_or_else(|e| {
|
||||
let msg = format!(
|
||||
"Error decoding `work-products` from incremental \
|
||||
compilation session directory: {}",
|
||||
|
|
|
@ -18,6 +18,7 @@ pub use fs::prepare_session_directory;
|
|||
pub use load::load_query_result_cache;
|
||||
pub use load::LoadResult;
|
||||
pub use load::{load_dep_graph, DepGraphFuture};
|
||||
pub use save::build_dep_graph;
|
||||
pub use save::save_dep_graph;
|
||||
pub use save::save_work_product_index;
|
||||
pub use work_product::copy_cgu_workproduct_to_incr_comp_cache_dir;
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
use rustc_data_structures::fx::FxHashMap;
|
||||
use rustc_data_structures::sync::join;
|
||||
use rustc_middle::dep_graph::{DepGraph, WorkProduct, WorkProductId};
|
||||
use rustc_middle::dep_graph::{DepGraph, PreviousDepGraph, WorkProduct, WorkProductId};
|
||||
use rustc_middle::ty::TyCtxt;
|
||||
use rustc_serialize::opaque::{FileEncodeResult, FileEncoder};
|
||||
use rustc_serialize::Encodable as RustcEncodable;
|
||||
|
@ -15,6 +15,9 @@ use super::file_format;
|
|||
use super::fs::*;
|
||||
use super::work_product;
|
||||
|
||||
/// Save and dump the DepGraph.
|
||||
///
|
||||
/// No query must be invoked after this function.
|
||||
pub fn save_dep_graph(tcx: TyCtxt<'_>) {
|
||||
debug!("save_dep_graph()");
|
||||
tcx.dep_graph.with_ignore(|| {
|
||||
|
@ -29,6 +32,16 @@ pub fn save_dep_graph(tcx: TyCtxt<'_>) {
|
|||
|
||||
let query_cache_path = query_cache_path(sess);
|
||||
let dep_graph_path = dep_graph_path(sess);
|
||||
let staging_dep_graph_path = staging_dep_graph_path(sess);
|
||||
|
||||
join(
|
||||
|| sess.time("assert_dep_graph", || crate::assert_dep_graph(tcx)),
|
||||
|| sess.time("check_dirty_clean", || dirty_clean::check_dirty_clean_annotations(tcx)),
|
||||
);
|
||||
|
||||
if sess.opts.debugging_opts.incremental_info {
|
||||
tcx.dep_graph.print_incremental_info()
|
||||
}
|
||||
|
||||
join(
|
||||
move || {
|
||||
|
@ -36,16 +49,26 @@ pub fn save_dep_graph(tcx: TyCtxt<'_>) {
|
|||
save_in(sess, query_cache_path, "query cache", |e| encode_query_cache(tcx, e));
|
||||
});
|
||||
},
|
||||
|| {
|
||||
move || {
|
||||
sess.time("incr_comp_persist_dep_graph", || {
|
||||
save_in(sess, dep_graph_path, "dependency graph", |e| {
|
||||
sess.time("incr_comp_encode_dep_graph", || encode_dep_graph(tcx, e))
|
||||
});
|
||||
if let Err(err) = tcx.dep_graph.encode() {
|
||||
sess.err(&format!(
|
||||
"failed to write dependency graph to `{}`: {}",
|
||||
staging_dep_graph_path.display(),
|
||||
err
|
||||
));
|
||||
}
|
||||
if let Err(err) = fs::rename(&staging_dep_graph_path, &dep_graph_path) {
|
||||
sess.err(&format!(
|
||||
"failed to move dependency graph from `{}` to `{}`: {}",
|
||||
staging_dep_graph_path.display(),
|
||||
dep_graph_path.display(),
|
||||
err
|
||||
));
|
||||
}
|
||||
});
|
||||
},
|
||||
);
|
||||
|
||||
dirty_clean::check_dirty_clean_annotations(tcx);
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -92,7 +115,7 @@ pub fn save_work_product_index(
|
|||
});
|
||||
}
|
||||
|
||||
fn save_in<F>(sess: &Session, path_buf: PathBuf, name: &str, encode: F)
|
||||
pub(crate) fn save_in<F>(sess: &Session, path_buf: PathBuf, name: &str, encode: F)
|
||||
where
|
||||
F: FnOnce(&mut FileEncoder) -> FileEncodeResult,
|
||||
{
|
||||
|
@ -144,21 +167,6 @@ where
|
|||
debug!("save: data written to disk successfully");
|
||||
}
|
||||
|
||||
fn encode_dep_graph(tcx: TyCtxt<'_>, encoder: &mut FileEncoder) -> FileEncodeResult {
|
||||
// First encode the commandline arguments hash
|
||||
tcx.sess.opts.dep_tracking_hash().encode(encoder)?;
|
||||
|
||||
if tcx.sess.opts.debugging_opts.incremental_info {
|
||||
tcx.dep_graph.print_incremental_info();
|
||||
}
|
||||
|
||||
// There is a tiny window between printing the incremental info above and encoding the dep
|
||||
// graph below in which the dep graph could change, thus making the printed incremental info
|
||||
// slightly out of date. If this matters to you, please feel free to submit a patch. :)
|
||||
|
||||
tcx.sess.time("incr_comp_encode_serialized_dep_graph", || tcx.dep_graph.encode(encoder))
|
||||
}
|
||||
|
||||
fn encode_work_product_index(
|
||||
work_products: &FxHashMap<WorkProductId, WorkProduct>,
|
||||
encoder: &mut FileEncoder,
|
||||
|
@ -177,3 +185,56 @@ fn encode_work_product_index(
|
|||
fn encode_query_cache(tcx: TyCtxt<'_>, encoder: &mut FileEncoder) -> FileEncodeResult {
|
||||
tcx.sess.time("incr_comp_serialize_result_cache", || tcx.serialize_query_result_cache(encoder))
|
||||
}
|
||||
|
||||
pub fn build_dep_graph(
|
||||
sess: &Session,
|
||||
prev_graph: PreviousDepGraph,
|
||||
prev_work_products: FxHashMap<WorkProductId, WorkProduct>,
|
||||
) -> Option<DepGraph> {
|
||||
if sess.opts.incremental.is_none() {
|
||||
// No incremental compilation.
|
||||
return None;
|
||||
}
|
||||
|
||||
// Stream the dep-graph to an alternate file, to avoid overwriting anything in case of errors.
|
||||
let path_buf = staging_dep_graph_path(sess);
|
||||
|
||||
let mut encoder = match FileEncoder::new(&path_buf) {
|
||||
Ok(encoder) => encoder,
|
||||
Err(err) => {
|
||||
sess.err(&format!(
|
||||
"failed to create dependency graph at `{}`: {}",
|
||||
path_buf.display(),
|
||||
err
|
||||
));
|
||||
return None;
|
||||
}
|
||||
};
|
||||
|
||||
if let Err(err) = file_format::write_file_header(&mut encoder, sess.is_nightly_build()) {
|
||||
sess.err(&format!(
|
||||
"failed to write dependency graph header to `{}`: {}",
|
||||
path_buf.display(),
|
||||
err
|
||||
));
|
||||
return None;
|
||||
}
|
||||
|
||||
// First encode the commandline arguments hash
|
||||
if let Err(err) = sess.opts.dep_tracking_hash().encode(&mut encoder) {
|
||||
sess.err(&format!(
|
||||
"failed to write dependency graph hash `{}`: {}",
|
||||
path_buf.display(),
|
||||
err
|
||||
));
|
||||
return None;
|
||||
}
|
||||
|
||||
Some(DepGraph::new(
|
||||
prev_graph,
|
||||
prev_work_products,
|
||||
encoder,
|
||||
sess.opts.debugging_opts.query_dep_graph,
|
||||
sess.opts.debugging_opts.incremental_info,
|
||||
))
|
||||
}
|
||||
|
|
|
@ -1021,9 +1021,6 @@ pub fn start_codegen<'tcx>(
|
|||
rustc_symbol_mangling::test::report_symbol_names(tcx);
|
||||
}
|
||||
|
||||
tcx.sess.time("assert_dep_graph", || rustc_incremental::assert_dep_graph(tcx));
|
||||
tcx.sess.time("serialize_dep_graph", || rustc_incremental::save_dep_graph(tcx));
|
||||
|
||||
info!("Post-codegen\n{:?}", tcx.debug_stats());
|
||||
|
||||
if tcx.sess.opts.output_types.contains_key(&OutputType::Mir) {
|
||||
|
|
|
@ -207,7 +207,13 @@ impl<'tcx> Queries<'tcx> {
|
|||
})
|
||||
.open(self.session())
|
||||
});
|
||||
DepGraph::new(prev_graph, prev_work_products)
|
||||
|
||||
rustc_incremental::build_dep_graph(
|
||||
self.session(),
|
||||
prev_graph,
|
||||
prev_work_products,
|
||||
)
|
||||
.unwrap_or_else(DepGraph::new_disabled)
|
||||
}
|
||||
})
|
||||
})
|
||||
|
@ -435,6 +441,9 @@ impl Compiler {
|
|||
if self.session().opts.debugging_opts.query_stats {
|
||||
gcx.enter(rustc_query_impl::print_stats);
|
||||
}
|
||||
|
||||
self.session()
|
||||
.time("serialize_dep_graph", || gcx.enter(rustc_incremental::save_dep_graph));
|
||||
}
|
||||
|
||||
_timer = Some(self.session().timer("free_global_ctxt"));
|
||||
|
|
|
@ -8,8 +8,8 @@ use rustc_session::Session;
|
|||
mod dep_node;
|
||||
|
||||
pub use rustc_query_system::dep_graph::{
|
||||
debug, hash_result, DepContext, DepNodeColor, DepNodeIndex, SerializedDepNodeIndex,
|
||||
WorkProduct, WorkProductId,
|
||||
debug::DepNodeFilter, hash_result, DepContext, DepNodeColor, DepNodeIndex,
|
||||
SerializedDepNodeIndex, WorkProduct, WorkProductId,
|
||||
};
|
||||
|
||||
crate use dep_node::make_compile_codegen_unit;
|
||||
|
@ -20,6 +20,7 @@ pub type TaskDeps = rustc_query_system::dep_graph::TaskDeps<DepKind>;
|
|||
pub type DepGraphQuery = rustc_query_system::dep_graph::DepGraphQuery<DepKind>;
|
||||
pub type PreviousDepGraph = rustc_query_system::dep_graph::PreviousDepGraph<DepKind>;
|
||||
pub type SerializedDepGraph = rustc_query_system::dep_graph::SerializedDepGraph<DepKind>;
|
||||
pub type EdgeFilter = rustc_query_system::dep_graph::debug::EdgeFilter<DepKind>;
|
||||
|
||||
impl rustc_query_system::dep_graph::DepKind for DepKind {
|
||||
const NULL: Self = DepKind::Null;
|
||||
|
|
|
@ -477,10 +477,7 @@ macro_rules! define_queries {
|
|||
return
|
||||
}
|
||||
|
||||
debug_assert!(tcx.dep_graph
|
||||
.node_color(dep_node)
|
||||
.map(|c| c.is_green())
|
||||
.unwrap_or(false));
|
||||
debug_assert!(tcx.dep_graph.is_green(dep_node));
|
||||
|
||||
let key = recover(*tcx, dep_node).unwrap_or_else(|| panic!("Failed to recover key for {:?} with hash {}", dep_node, dep_node.hash));
|
||||
if queries::$name::cache_on_disk(tcx, &key, None) {
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
//! Code for debugging the dep-graph.
|
||||
|
||||
use super::{DepKind, DepNode};
|
||||
use super::{DepKind, DepNode, DepNodeIndex};
|
||||
use rustc_data_structures::fx::FxHashMap;
|
||||
use rustc_data_structures::sync::Lock;
|
||||
use std::error::Error;
|
||||
|
||||
/// A dep-node filter goes from a user-defined string to a query over
|
||||
|
@ -34,13 +36,14 @@ impl DepNodeFilter {
|
|||
|
||||
/// A filter like `F -> G` where `F` and `G` are valid dep-node
|
||||
/// filters. This can be used to test the source/target independently.
|
||||
pub struct EdgeFilter {
|
||||
pub struct EdgeFilter<K: DepKind> {
|
||||
pub source: DepNodeFilter,
|
||||
pub target: DepNodeFilter,
|
||||
pub index_to_node: Lock<FxHashMap<DepNodeIndex, DepNode<K>>>,
|
||||
}
|
||||
|
||||
impl EdgeFilter {
|
||||
pub fn new(test: &str) -> Result<EdgeFilter, Box<dyn Error>> {
|
||||
impl<K: DepKind> EdgeFilter<K> {
|
||||
pub fn new(test: &str) -> Result<EdgeFilter<K>, Box<dyn Error>> {
|
||||
let parts: Vec<_> = test.split("->").collect();
|
||||
if parts.len() != 2 {
|
||||
Err(format!("expected a filter like `a&b -> c&d`, not `{}`", test).into())
|
||||
|
@ -48,12 +51,13 @@ impl EdgeFilter {
|
|||
Ok(EdgeFilter {
|
||||
source: DepNodeFilter::new(parts[0]),
|
||||
target: DepNodeFilter::new(parts[1]),
|
||||
index_to_node: Lock::new(FxHashMap::default()),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(debug_assertions)]
|
||||
pub fn test<K: DepKind>(&self, source: &DepNode<K>, target: &DepNode<K>) -> bool {
|
||||
pub fn test(&self, source: &DepNode<K>, target: &DepNode<K>) -> bool {
|
||||
self.source.test(source) && self.target.test(target)
|
||||
}
|
||||
}
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -13,6 +13,7 @@ pub use serialized::{SerializedDepGraph, SerializedDepNodeIndex};
|
|||
|
||||
use rustc_data_structures::profiling::SelfProfilerRef;
|
||||
use rustc_data_structures::sync::Lock;
|
||||
use rustc_serialize::{opaque::FileEncoder, Encodable};
|
||||
use rustc_session::Session;
|
||||
|
||||
use std::fmt;
|
||||
|
@ -59,7 +60,7 @@ impl<T: DepContext> HasDepContext for T {
|
|||
}
|
||||
|
||||
/// Describe the different families of dependency nodes.
|
||||
pub trait DepKind: Copy + fmt::Debug + Eq + Hash {
|
||||
pub trait DepKind: Copy + fmt::Debug + Eq + Hash + Send + Encodable<FileEncoder> + 'static {
|
||||
const NULL: Self;
|
||||
|
||||
/// Return whether this kind always require evaluation.
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
use rustc_data_structures::fx::FxHashMap;
|
||||
use rustc_data_structures::graph::implementation::{Direction, Graph, NodeIndex, INCOMING};
|
||||
|
||||
use super::{DepKind, DepNode};
|
||||
use super::{DepKind, DepNode, DepNodeIndex};
|
||||
|
||||
pub struct DepGraphQuery<K> {
|
||||
pub graph: Graph<DepNode<K>, ()>,
|
||||
|
@ -9,28 +9,27 @@ pub struct DepGraphQuery<K> {
|
|||
}
|
||||
|
||||
impl<K: DepKind> DepGraphQuery<K> {
|
||||
pub fn new(
|
||||
nodes: &[DepNode<K>],
|
||||
edge_list_indices: &[(usize, usize)],
|
||||
edge_list_data: &[usize],
|
||||
) -> DepGraphQuery<K> {
|
||||
let mut graph = Graph::with_capacity(nodes.len(), edge_list_data.len());
|
||||
let mut indices = FxHashMap::default();
|
||||
for node in nodes {
|
||||
indices.insert(*node, graph.add_node(*node));
|
||||
}
|
||||
pub fn new(prev_node_count: usize) -> DepGraphQuery<K> {
|
||||
let node_count = prev_node_count + prev_node_count / 4;
|
||||
let edge_count = 6 * node_count;
|
||||
|
||||
for (source, &(start, end)) in edge_list_indices.iter().enumerate() {
|
||||
for &target in &edge_list_data[start..end] {
|
||||
let source = indices[&nodes[source]];
|
||||
let target = indices[&nodes[target]];
|
||||
graph.add_edge(source, target, ());
|
||||
}
|
||||
}
|
||||
let graph = Graph::with_capacity(node_count, edge_count);
|
||||
let indices = FxHashMap::default();
|
||||
|
||||
DepGraphQuery { graph, indices }
|
||||
}
|
||||
|
||||
pub fn push(&mut self, index: DepNodeIndex, node: DepNode<K>, edges: &[DepNodeIndex]) {
|
||||
let source = self.graph.add_node(node);
|
||||
debug_assert_eq!(index.index(), source.0);
|
||||
self.indices.insert(node, source);
|
||||
|
||||
for &target in edges.iter() {
|
||||
let target = NodeIndex(target.index());
|
||||
self.graph.add_edge(source, target, ());
|
||||
}
|
||||
}
|
||||
|
||||
pub fn nodes(&self) -> Vec<&DepNode<K>> {
|
||||
self.graph.all_nodes().iter().map(|n| &n.data).collect()
|
||||
}
|
||||
|
|
|
@ -1,9 +1,18 @@
|
|||
//! The data that we will serialize and deserialize.
|
||||
|
||||
use super::{DepKind, DepNode};
|
||||
use super::query::DepGraphQuery;
|
||||
use super::{DepKind, DepNode, DepNodeIndex};
|
||||
use rustc_data_structures::fingerprint::Fingerprint;
|
||||
use rustc_index::vec::IndexVec;
|
||||
use rustc_serialize::{Decodable, Decoder};
|
||||
use rustc_data_structures::fx::FxHashMap;
|
||||
use rustc_data_structures::sync::{AtomicU32, Lock, Lrc, Ordering};
|
||||
use rustc_index::vec::{Idx, IndexVec};
|
||||
use rustc_serialize::opaque::{self, FileEncodeResult, FileEncoder, IntEncodedWithFixedSize};
|
||||
use rustc_serialize::{Decodable, Encodable};
|
||||
use smallvec::SmallVec;
|
||||
use std::convert::TryInto;
|
||||
|
||||
#[cfg(parallel_compiler)]
|
||||
use {rustc_data_structures::sync::WorkerLocal, std::sync::mpsc, std::thread};
|
||||
|
||||
// The maximum value of `SerializedDepNodeIndex` leaves the upper two bits
|
||||
// unused so that we can store multiple index types in `CompressedHybridIndex`,
|
||||
|
@ -50,78 +59,347 @@ impl<K: DepKind> SerializedDepGraph<K> {
|
|||
}
|
||||
}
|
||||
|
||||
impl<D: Decoder, K: DepKind + Decodable<D>> Decodable<D> for SerializedDepGraph<K> {
|
||||
fn decode(d: &mut D) -> Result<SerializedDepGraph<K>, D::Error> {
|
||||
// We used to serialize the dep graph by creating and serializing a `SerializedDepGraph`
|
||||
// using data copied from the `DepGraph`. But copying created a large memory spike, so we
|
||||
// now serialize directly from the `DepGraph` as if it's a `SerializedDepGraph`. Because we
|
||||
// deserialize that data into a `SerializedDepGraph` in the next compilation session, we
|
||||
// need `DepGraph`'s `Encodable` and `SerializedDepGraph`'s `Decodable` implementations to
|
||||
// be in sync. If you update this decoding, be sure to update the encoding, and vice-versa.
|
||||
//
|
||||
// We mimic the sequence of `Encode` and `Encodable` method calls used by the `DepGraph`'s
|
||||
// `Encodable` implementation with the corresponding sequence of `Decode` and `Decodable`
|
||||
// method calls. E.g. `Decode::read_struct` pairs with `Encode::emit_struct`, `DepNode`'s
|
||||
// `decode` pairs with `DepNode`'s `encode`, and so on. Any decoding methods not associated
|
||||
// with corresponding encoding methods called in `DepGraph`'s `Encodable` implementation
|
||||
// are off limits, because we'd be relying on their implementation details.
|
||||
//
|
||||
// For example, because we know it happens to do the right thing, its tempting to just use
|
||||
// `IndexVec`'s `Decodable` implementation to decode into some of the collections below,
|
||||
// even though `DepGraph` doesn't use its `Encodable` implementation. But the `IndexVec`
|
||||
// implementation could change, and we'd have a bug.
|
||||
//
|
||||
// Variables below are explicitly typed so that anyone who changes the `SerializedDepGraph`
|
||||
// representation without updating this function will encounter a compilation error, and
|
||||
// know to update this and possibly the `DepGraph` `Encodable` implementation accordingly
|
||||
// (the latter should serialize data in a format compatible with our representation).
|
||||
impl<'a, K: DepKind + Decodable<opaque::Decoder<'a>>> Decodable<opaque::Decoder<'a>>
|
||||
for SerializedDepGraph<K>
|
||||
{
|
||||
#[instrument(skip(d))]
|
||||
fn decode(d: &mut opaque::Decoder<'a>) -> Result<SerializedDepGraph<K>, String> {
|
||||
let position = d.position();
|
||||
|
||||
d.read_struct("SerializedDepGraph", 4, |d| {
|
||||
let nodes: IndexVec<SerializedDepNodeIndex, DepNode<K>> =
|
||||
d.read_struct_field("nodes", 0, |d| {
|
||||
d.read_seq(|d, len| {
|
||||
let mut v = IndexVec::with_capacity(len);
|
||||
for i in 0..len {
|
||||
v.push(d.read_seq_elt(i, |d| Decodable::decode(d))?);
|
||||
}
|
||||
Ok(v)
|
||||
})
|
||||
})?;
|
||||
// The last 16 bytes are the node count and edge count.
|
||||
debug!("position: {:?}", d.position());
|
||||
d.set_position(d.data.len() - 2 * IntEncodedWithFixedSize::ENCODED_SIZE);
|
||||
debug!("position: {:?}", d.position());
|
||||
|
||||
let fingerprints: IndexVec<SerializedDepNodeIndex, Fingerprint> =
|
||||
d.read_struct_field("fingerprints", 1, |d| {
|
||||
d.read_seq(|d, len| {
|
||||
let mut v = IndexVec::with_capacity(len);
|
||||
for i in 0..len {
|
||||
v.push(d.read_seq_elt(i, |d| Decodable::decode(d))?);
|
||||
}
|
||||
Ok(v)
|
||||
})
|
||||
})?;
|
||||
let node_count = IntEncodedWithFixedSize::decode(d)?.0 as usize;
|
||||
let edge_count = IntEncodedWithFixedSize::decode(d)?.0 as usize;
|
||||
debug!(?node_count, ?edge_count);
|
||||
|
||||
let edge_list_indices: IndexVec<SerializedDepNodeIndex, (u32, u32)> = d
|
||||
.read_struct_field("edge_list_indices", 2, |d| {
|
||||
d.read_seq(|d, len| {
|
||||
let mut v = IndexVec::with_capacity(len);
|
||||
for i in 0..len {
|
||||
v.push(d.read_seq_elt(i, |d| Decodable::decode(d))?);
|
||||
}
|
||||
Ok(v)
|
||||
})
|
||||
})?;
|
||||
debug!("position: {:?}", d.position());
|
||||
d.set_position(position);
|
||||
debug!("position: {:?}", d.position());
|
||||
|
||||
let edge_list_data: Vec<SerializedDepNodeIndex> =
|
||||
d.read_struct_field("edge_list_data", 3, |d| {
|
||||
d.read_seq(|d, len| {
|
||||
let mut v = Vec::with_capacity(len);
|
||||
for i in 0..len {
|
||||
v.push(d.read_seq_elt(i, |d| Decodable::decode(d))?);
|
||||
}
|
||||
Ok(v)
|
||||
})
|
||||
})?;
|
||||
let mut nodes = IndexVec::with_capacity(node_count);
|
||||
let mut fingerprints = IndexVec::with_capacity(node_count);
|
||||
let mut edge_list_indices = IndexVec::with_capacity(node_count);
|
||||
let mut edge_list_data = Vec::with_capacity(edge_count);
|
||||
|
||||
Ok(SerializedDepGraph { nodes, fingerprints, edge_list_indices, edge_list_data })
|
||||
})
|
||||
for _index in 0..node_count {
|
||||
let node = NodeInfo::<K, SerializedDepNodeIndex>::decode(d)?;
|
||||
debug!(?_index, ?node);
|
||||
|
||||
let _i: SerializedDepNodeIndex = nodes.push(node.node);
|
||||
debug_assert_eq!(_i.index(), _index);
|
||||
let _i: SerializedDepNodeIndex = fingerprints.push(node.fingerprint);
|
||||
debug_assert_eq!(_i.index(), _index);
|
||||
|
||||
let start = edge_list_data.len().try_into().unwrap();
|
||||
edge_list_data.extend(node.edges.into_iter());
|
||||
let end = edge_list_data.len().try_into().unwrap();
|
||||
|
||||
let _i: SerializedDepNodeIndex = edge_list_indices.push((start, end));
|
||||
debug_assert_eq!(_i.index(), _index);
|
||||
}
|
||||
|
||||
Ok(SerializedDepGraph { nodes, fingerprints, edge_list_indices, edge_list_data })
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Encodable, Decodable)]
|
||||
pub struct NodeInfo<K: DepKind, I: Idx> {
|
||||
node: DepNode<K>,
|
||||
fingerprint: Fingerprint,
|
||||
edges: SmallVec<[I; 8]>,
|
||||
}
|
||||
|
||||
struct Stat<K: DepKind> {
|
||||
kind: K,
|
||||
node_counter: u64,
|
||||
edge_counter: u64,
|
||||
}
|
||||
|
||||
struct Stats<K: DepKind> {
|
||||
stats: FxHashMap<K, Stat<K>>,
|
||||
total_node_count: usize,
|
||||
total_edge_count: usize,
|
||||
}
|
||||
|
||||
#[instrument(skip(encoder, _record_graph, record_stats))]
|
||||
fn encode_node<K: DepKind>(
|
||||
encoder: &mut FileEncoder,
|
||||
_index: DepNodeIndex,
|
||||
node: &NodeInfo<K, DepNodeIndex>,
|
||||
_record_graph: &Option<Lrc<Lock<DepGraphQuery<K>>>>,
|
||||
record_stats: &Option<Lrc<Lock<Stats<K>>>>,
|
||||
) -> FileEncodeResult {
|
||||
#[cfg(debug_assertions)]
|
||||
if let Some(record_graph) = &_record_graph {
|
||||
record_graph.lock().push(_index, node.node, &node.edges);
|
||||
}
|
||||
|
||||
if let Some(record_stats) = &record_stats {
|
||||
let mut stats = record_stats.lock();
|
||||
let kind = node.node.kind;
|
||||
let edge_count = node.edges.len();
|
||||
|
||||
let stat =
|
||||
stats.stats.entry(kind).or_insert(Stat { kind, node_counter: 0, edge_counter: 0 });
|
||||
stat.node_counter += 1;
|
||||
stat.edge_counter += edge_count as u64;
|
||||
stats.total_node_count += 1;
|
||||
stats.total_edge_count += edge_count;
|
||||
}
|
||||
|
||||
debug!(?_index, ?node);
|
||||
node.encode(encoder)
|
||||
}
|
||||
|
||||
fn encode_counts(
|
||||
mut encoder: FileEncoder,
|
||||
node_count: usize,
|
||||
edge_count: usize,
|
||||
) -> FileEncodeResult {
|
||||
let node_count = node_count.try_into().unwrap();
|
||||
let edge_count = edge_count.try_into().unwrap();
|
||||
|
||||
debug!(?node_count, ?edge_count);
|
||||
debug!("position: {:?}", encoder.position());
|
||||
IntEncodedWithFixedSize(node_count).encode(&mut encoder)?;
|
||||
IntEncodedWithFixedSize(edge_count).encode(&mut encoder)?;
|
||||
debug!("position: {:?}", encoder.position());
|
||||
// Drop the encoder so that nothing is written after the counts.
|
||||
encoder.flush()
|
||||
}
|
||||
|
||||
#[cfg(not(parallel_compiler))]
|
||||
pub struct GraphEncoder<K: DepKind> {
|
||||
status: Lock<(FileEncoder, usize, FileEncodeResult)>,
|
||||
counter: AtomicU32,
|
||||
record_graph: Option<Lrc<Lock<DepGraphQuery<K>>>>,
|
||||
record_stats: Option<Lrc<Lock<Stats<K>>>>,
|
||||
}
|
||||
|
||||
#[cfg(parallel_compiler)]
|
||||
pub struct GraphEncoder<K: DepKind> {
|
||||
send: WorkerLocal<mpsc::Sender<(DepNodeIndex, NodeInfo<K, DepNodeIndex>)>>,
|
||||
thread: thread::JoinHandle<FileEncodeResult>,
|
||||
counter: AtomicU32,
|
||||
record_graph: Option<Lrc<Lock<DepGraphQuery<K>>>>,
|
||||
record_stats: Option<Lrc<Lock<Stats<K>>>>,
|
||||
}
|
||||
|
||||
impl<K: DepKind + Encodable<FileEncoder>> GraphEncoder<K> {
|
||||
pub fn new(
|
||||
encoder: FileEncoder,
|
||||
prev_node_count: usize,
|
||||
record_graph: bool,
|
||||
record_stats: bool,
|
||||
) -> Self {
|
||||
let record_graph = if cfg!(debug_assertions) && record_graph {
|
||||
Some(Lrc::new(Lock::new(DepGraphQuery::new(prev_node_count))))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let record_stats = if record_stats {
|
||||
Some(Lrc::new(Lock::new(Stats {
|
||||
stats: FxHashMap::default(),
|
||||
total_node_count: 0,
|
||||
total_edge_count: 0,
|
||||
})))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let counter = AtomicU32::new(0);
|
||||
|
||||
#[cfg(not(parallel_compiler))]
|
||||
{
|
||||
let status = Lock::new((encoder, 0, Ok(())));
|
||||
GraphEncoder { status, counter, record_graph, record_stats }
|
||||
}
|
||||
#[cfg(parallel_compiler)]
|
||||
{
|
||||
let (send, recv) = mpsc::channel();
|
||||
let thread = {
|
||||
let record_graph = record_graph.clone();
|
||||
let record_stats = record_stats.clone();
|
||||
thread::spawn(move || {
|
||||
encode_graph(encoder, recv, |encoder, index, node| {
|
||||
encode_node(encoder, index, node, &record_graph, &record_stats)
|
||||
})
|
||||
})
|
||||
};
|
||||
let send = WorkerLocal::new(move |_| send.clone());
|
||||
|
||||
GraphEncoder { send, thread, counter, record_graph, record_stats }
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn with_query(&self, f: impl Fn(&DepGraphQuery<K>)) {
|
||||
if let Some(record_graph) = &self.record_graph {
|
||||
f(&record_graph.lock())
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn print_incremental_info(
|
||||
&self,
|
||||
total_read_count: u64,
|
||||
total_duplicate_read_count: u64,
|
||||
) {
|
||||
if let Some(record_stats) = &self.record_stats {
|
||||
let record_stats = record_stats.lock();
|
||||
|
||||
let mut stats: Vec<_> = record_stats.stats.values().collect();
|
||||
stats.sort_by_key(|s| -(s.node_counter as i64));
|
||||
|
||||
const SEPARATOR: &str = "[incremental] --------------------------------\
|
||||
----------------------------------------------\
|
||||
------------";
|
||||
|
||||
eprintln!("[incremental]");
|
||||
eprintln!("[incremental] DepGraph Statistics");
|
||||
eprintln!("{}", SEPARATOR);
|
||||
eprintln!("[incremental]");
|
||||
eprintln!("[incremental] Total Node Count: {}", record_stats.total_node_count);
|
||||
eprintln!("[incremental] Total Edge Count: {}", record_stats.total_edge_count);
|
||||
|
||||
if cfg!(debug_assertions) {
|
||||
eprintln!("[incremental] Total Edge Reads: {}", total_read_count);
|
||||
eprintln!(
|
||||
"[incremental] Total Duplicate Edge Reads: {}",
|
||||
total_duplicate_read_count
|
||||
);
|
||||
}
|
||||
|
||||
eprintln!("[incremental]");
|
||||
eprintln!(
|
||||
"[incremental] {:<36}| {:<17}| {:<12}| {:<17}|",
|
||||
"Node Kind", "Node Frequency", "Node Count", "Avg. Edge Count"
|
||||
);
|
||||
eprintln!("{}", SEPARATOR);
|
||||
|
||||
for stat in stats {
|
||||
let node_kind_ratio =
|
||||
(100.0 * (stat.node_counter as f64)) / (record_stats.total_node_count as f64);
|
||||
let node_kind_avg_edges = (stat.edge_counter as f64) / (stat.node_counter as f64);
|
||||
|
||||
eprintln!(
|
||||
"[incremental] {:<36}|{:>16.1}% |{:>12} |{:>17.1} |",
|
||||
format!("{:?}", stat.kind),
|
||||
node_kind_ratio,
|
||||
stat.node_counter,
|
||||
node_kind_avg_edges,
|
||||
);
|
||||
}
|
||||
|
||||
eprintln!("{}", SEPARATOR);
|
||||
eprintln!("[incremental]");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(not(parallel_compiler))]
|
||||
impl<K: DepKind + Encodable<FileEncoder>> GraphEncoder<K> {
|
||||
pub(crate) fn send(
|
||||
&self,
|
||||
node: DepNode<K>,
|
||||
fingerprint: Fingerprint,
|
||||
edges: SmallVec<[DepNodeIndex; 8]>,
|
||||
) -> DepNodeIndex {
|
||||
let index = self.counter.fetch_add(1, Ordering::SeqCst);
|
||||
let index = DepNodeIndex::from_u32(index);
|
||||
let &mut (ref mut encoder, ref mut edge_count, ref mut result) = &mut *self.status.lock();
|
||||
*edge_count += edges.len();
|
||||
*result = std::mem::replace(result, Ok(())).and_then(|()| {
|
||||
let node = NodeInfo { node, fingerprint, edges };
|
||||
encode_node(encoder, index, &node, &self.record_graph, &self.record_stats)
|
||||
});
|
||||
index
|
||||
}
|
||||
|
||||
pub fn finish(self) -> FileEncodeResult {
|
||||
let (encoder, edge_count, result) = self.status.into_inner();
|
||||
let () = result?;
|
||||
let node_count = self.counter.into_inner() as usize;
|
||||
|
||||
encode_counts(encoder, node_count, edge_count)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(parallel_compiler)]
|
||||
impl<K: DepKind + Encodable<FileEncoder>> GraphEncoder<K> {
|
||||
pub(crate) fn send(
|
||||
&self,
|
||||
node: DepNode<K>,
|
||||
fingerprint: Fingerprint,
|
||||
edges: SmallVec<[DepNodeIndex; 8]>,
|
||||
) -> DepNodeIndex {
|
||||
let node = NodeInfo { node, fingerprint, edges };
|
||||
let index = self.counter.fetch_add(1, Ordering::SeqCst);
|
||||
let index = DepNodeIndex::from_u32(index);
|
||||
self.send.send((index, node)).unwrap();
|
||||
index
|
||||
}
|
||||
|
||||
pub fn finish(self) -> FileEncodeResult {
|
||||
std::mem::drop(self.send);
|
||||
self.thread.join().unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(parallel_compiler)]
|
||||
#[instrument(skip(encoder, recv, process))]
|
||||
fn encode_graph<K: DepKind + Encodable<FileEncoder>>(
|
||||
mut encoder: FileEncoder,
|
||||
recv: mpsc::Receiver<(DepNodeIndex, NodeInfo<K, DepNodeIndex>)>,
|
||||
process: impl Fn(&mut FileEncoder, DepNodeIndex, &NodeInfo<K, DepNodeIndex>) -> FileEncodeResult,
|
||||
) -> FileEncodeResult {
|
||||
let mut edge_count: usize = 0;
|
||||
let node_count: usize = ordered_recv(recv, |index, node| {
|
||||
edge_count += node.edges.len();
|
||||
process(&mut encoder, index, node)
|
||||
})?;
|
||||
|
||||
encode_counts(encoder, node_count, edge_count)
|
||||
}
|
||||
|
||||
/// Since there are multiple producers assigning the DepNodeIndex using an atomic,
|
||||
/// the messages may not arrive in order. This function sorts them as they come.
|
||||
#[cfg(parallel_compiler)]
|
||||
fn ordered_recv<K: DepKind + Encodable<opaque::FileEncoder>>(
|
||||
recv: mpsc::Receiver<(DepNodeIndex, NodeInfo<K, DepNodeIndex>)>,
|
||||
mut f: impl FnMut(DepNodeIndex, &NodeInfo<K, DepNodeIndex>) -> FileEncodeResult,
|
||||
) -> Result<usize, std::io::Error> {
|
||||
let mut pending = Vec::<(DepNodeIndex, _)>::new();
|
||||
let mut expected = DepNodeIndex::new(0);
|
||||
|
||||
// INVARIANT: No message can arrive with an index less than `expected`.
|
||||
'outer: loop {
|
||||
pending.sort_by_key(|n| n.0);
|
||||
for (index, node) in pending.drain_filter(|(index, _)| {
|
||||
if *index == expected {
|
||||
expected.increment_by(1);
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}) {
|
||||
f(index, &node)?;
|
||||
}
|
||||
|
||||
while let Ok((index, node)) = recv.recv() {
|
||||
if index > expected {
|
||||
pending.push((index, node));
|
||||
} else if index == expected {
|
||||
f(index, &node)?;
|
||||
expected.increment_by(1);
|
||||
continue 'outer;
|
||||
} else {
|
||||
panic!("Unexpected index {:?} while waiting for {:?}", index, expected);
|
||||
}
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
Ok(expected.as_u32() as usize)
|
||||
}
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
#![feature(const_fn)]
|
||||
#![feature(const_panic)]
|
||||
#![feature(core_intrinsics)]
|
||||
#![feature(drain_filter)]
|
||||
#![feature(hash_raw_entry)]
|
||||
#![feature(iter_zip)]
|
||||
#![feature(min_specialization)]
|
||||
|
|
|
@ -537,7 +537,7 @@ where
|
|||
// If `-Zincremental-verify-ich` is specified, re-hash results from
|
||||
// the cache and make sure that they have the expected fingerprint.
|
||||
if unlikely!(tcx.dep_context().sess().opts.debugging_opts.incremental_verify_ich) {
|
||||
incremental_verify_ich(*tcx.dep_context(), &result, dep_node, dep_node_index, query);
|
||||
incremental_verify_ich(*tcx.dep_context(), &result, dep_node, query);
|
||||
}
|
||||
|
||||
result
|
||||
|
@ -560,7 +560,7 @@ where
|
|||
//
|
||||
// See issue #82920 for an example of a miscompilation that would get turned into
|
||||
// an ICE by this check
|
||||
incremental_verify_ich(*tcx.dep_context(), &result, dep_node, dep_node_index, query);
|
||||
incremental_verify_ich(*tcx.dep_context(), &result, dep_node, query);
|
||||
|
||||
result
|
||||
}
|
||||
|
@ -570,14 +570,12 @@ fn incremental_verify_ich<CTX, K, V: Debug>(
|
|||
tcx: CTX::DepContext,
|
||||
result: &V,
|
||||
dep_node: &DepNode<CTX::DepKind>,
|
||||
dep_node_index: DepNodeIndex,
|
||||
query: &QueryVtable<CTX, K, V>,
|
||||
) where
|
||||
CTX: QueryContext,
|
||||
{
|
||||
assert!(
|
||||
Some(tcx.dep_graph().fingerprint_of(dep_node_index))
|
||||
== tcx.dep_graph().prev_fingerprint_of(dep_node),
|
||||
tcx.dep_graph().is_green(dep_node),
|
||||
"fingerprint for green query instance not loaded from cache: {:?}",
|
||||
dep_node,
|
||||
);
|
||||
|
@ -588,9 +586,15 @@ fn incremental_verify_ich<CTX, K, V: Debug>(
|
|||
let new_hash = query.hash_result(&mut hcx, result).unwrap_or(Fingerprint::ZERO);
|
||||
debug!("END verify_ich({:?})", dep_node);
|
||||
|
||||
let old_hash = tcx.dep_graph().fingerprint_of(dep_node_index);
|
||||
let old_hash = tcx.dep_graph().prev_fingerprint_of(dep_node);
|
||||
|
||||
assert!(new_hash == old_hash, "found unstable fingerprints for {:?}: {:?}", dep_node, result);
|
||||
assert_eq!(
|
||||
Some(new_hash),
|
||||
old_hash,
|
||||
"found unstable fingerprints for {:?}: {:?}",
|
||||
dep_node,
|
||||
result
|
||||
);
|
||||
}
|
||||
|
||||
fn force_query_with_job<C, CTX>(
|
||||
|
|
Loading…
Add table
Reference in a new issue