Auto merge of #115920 - Zoxc:depkind-u16, r=cjgillot

Move `DepKind` to `rustc_query_system` and define it as `u16`

This moves the `DepKind` type to `rustc_query_system` where it's defined with an inner `u16` field. This decouples it from `rustc_middle` and is a step towards letting other crates define dep kinds. It also allows some type parameters to be removed. The `DepKind` trait is replaced with a `Deps` trait. That's used when some operations or information about dep kinds which is unavailable in `rustc_query_system` are still needed.

r? `@cjgillot`
This commit is contained in:
bors 2023-09-22 00:46:13 +00:00
commit b757318718
24 changed files with 509 additions and 519 deletions

View file

@ -42,7 +42,7 @@ use rustc_hir as hir;
use rustc_hir::def_id::{DefId, LocalDefId, CRATE_DEF_ID}; use rustc_hir::def_id::{DefId, LocalDefId, CRATE_DEF_ID};
use rustc_hir::intravisit::{self, Visitor}; use rustc_hir::intravisit::{self, Visitor};
use rustc_middle::dep_graph::{ use rustc_middle::dep_graph::{
DepGraphQuery, DepKind, DepNode, DepNodeExt, DepNodeFilter, EdgeFilter, dep_kinds, DepGraphQuery, DepKind, DepNode, DepNodeExt, DepNodeFilter, EdgeFilter,
}; };
use rustc_middle::hir::nested_filter; use rustc_middle::hir::nested_filter;
use rustc_middle::ty::TyCtxt; use rustc_middle::ty::TyCtxt;
@ -129,7 +129,7 @@ impl<'tcx> IfThisChanged<'tcx> {
let dep_node_interned = self.argument(attr); let dep_node_interned = self.argument(attr);
let dep_node = match dep_node_interned { let dep_node = match dep_node_interned {
None => { None => {
DepNode::from_def_path_hash(self.tcx, def_path_hash, DepKind::hir_owner) DepNode::from_def_path_hash(self.tcx, def_path_hash, dep_kinds::hir_owner)
} }
Some(n) => { Some(n) => {
match DepNode::from_label_string(self.tcx, n.as_str(), def_path_hash) { match DepNode::from_label_string(self.tcx, n.as_str(), def_path_hash) {

View file

@ -3,7 +3,7 @@
use crate::errors; use crate::errors;
use rustc_data_structures::memmap::Mmap; use rustc_data_structures::memmap::Mmap;
use rustc_data_structures::unord::UnordMap; use rustc_data_structures::unord::UnordMap;
use rustc_middle::dep_graph::{SerializedDepGraph, WorkProductMap}; use rustc_middle::dep_graph::{DepsType, SerializedDepGraph, WorkProductMap};
use rustc_middle::query::on_disk_cache::OnDiskCache; use rustc_middle::query::on_disk_cache::OnDiskCache;
use rustc_serialize::opaque::MemDecoder; use rustc_serialize::opaque::MemDecoder;
use rustc_serialize::Decodable; use rustc_serialize::Decodable;
@ -208,7 +208,7 @@ pub fn load_dep_graph(sess: &Session) -> DepGraphFuture {
return LoadResult::DataOutOfDate; return LoadResult::DataOutOfDate;
} }
let dep_graph = SerializedDepGraph::decode(&mut decoder); let dep_graph = SerializedDepGraph::decode::<DepsType>(&mut decoder);
LoadResult::Ok { data: (dep_graph, prev_work_products) } LoadResult::Ok { data: (dep_graph, prev_work_products) }
} }

View file

@ -10,8 +10,10 @@
//! origin crate when the `TyCtxt` is not present in TLS. //! origin crate when the `TyCtxt` is not present in TLS.
use rustc_errors::{Diagnostic, TRACK_DIAGNOSTICS}; use rustc_errors::{Diagnostic, TRACK_DIAGNOSTICS};
use rustc_middle::dep_graph::TaskDepsRef; use rustc_middle::dep_graph::{DepNodeExt, TaskDepsRef};
use rustc_middle::ty::tls; use rustc_middle::ty::tls;
use rustc_query_system::dep_graph::dep_node::default_dep_kind_debug;
use rustc_query_system::dep_graph::{DepContext, DepKind, DepNode};
use std::fmt; use std::fmt;
fn track_span_parent(def_id: rustc_span::def_id::LocalDefId) { fn track_span_parent(def_id: rustc_span::def_id::LocalDefId) {
@ -59,10 +61,49 @@ fn def_id_debug(def_id: rustc_hir::def_id::DefId, f: &mut fmt::Formatter<'_>) ->
write!(f, ")") write!(f, ")")
} }
/// This is a callback from `rustc_query_system` as it cannot access the implicit state
/// in `rustc_middle` otherwise.
pub fn dep_kind_debug(kind: DepKind, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
tls::with_opt(|opt_tcx| {
if let Some(tcx) = opt_tcx {
write!(f, "{}", tcx.dep_kind_info(kind).name)
} else {
default_dep_kind_debug(kind, f)
}
})
}
/// This is a callback from `rustc_query_system` as it cannot access the implicit state
/// in `rustc_middle` otherwise.
pub fn dep_node_debug(node: DepNode, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{:?}(", node.kind)?;
tls::with_opt(|opt_tcx| {
if let Some(tcx) = opt_tcx {
if let Some(def_id) = node.extract_def_id(tcx) {
write!(f, "{}", tcx.def_path_debug_str(def_id))?;
} else if let Some(ref s) = tcx.dep_graph.dep_node_debug_str(node) {
write!(f, "{s}")?;
} else {
write!(f, "{}", node.hash)?;
}
} else {
write!(f, "{}", node.hash)?;
}
Ok(())
})?;
write!(f, ")")
}
/// Sets up the callbacks in prior crates which we want to refer to the /// Sets up the callbacks in prior crates which we want to refer to the
/// TyCtxt in. /// TyCtxt in.
pub fn setup_callbacks() { pub fn setup_callbacks() {
rustc_span::SPAN_TRACK.swap(&(track_span_parent as fn(_))); rustc_span::SPAN_TRACK.swap(&(track_span_parent as fn(_)));
rustc_hir::def_id::DEF_ID_DEBUG.swap(&(def_id_debug as fn(_, &mut fmt::Formatter<'_>) -> _)); rustc_hir::def_id::DEF_ID_DEBUG.swap(&(def_id_debug as fn(_, &mut fmt::Formatter<'_>) -> _));
rustc_query_system::dep_graph::dep_node::DEP_KIND_DEBUG
.swap(&(dep_kind_debug as fn(_, &mut fmt::Formatter<'_>) -> _));
rustc_query_system::dep_graph::dep_node::DEP_NODE_DEBUG
.swap(&(dep_node_debug as fn(_, &mut fmt::Formatter<'_>) -> _));
TRACK_DIAGNOSTICS.swap(&(track_diagnostic as _)); TRACK_DIAGNOSTICS.swap(&(track_diagnostic as _));
} }

View file

@ -126,8 +126,8 @@ macro_rules! provide_one {
// External query providers call `crate_hash` in order to register a dependency // External query providers call `crate_hash` in order to register a dependency
// on the crate metadata. The exception is `crate_hash` itself, which obviously // on the crate metadata. The exception is `crate_hash` itself, which obviously
// doesn't need to do this (and can't, as it would cause a query cycle). // doesn't need to do this (and can't, as it would cause a query cycle).
use rustc_middle::dep_graph::DepKind; use rustc_middle::dep_graph::dep_kinds;
if DepKind::$name != DepKind::crate_hash && $tcx.dep_graph.is_fully_enabled() { if dep_kinds::$name != dep_kinds::crate_hash && $tcx.dep_graph.is_fully_enabled() {
$tcx.ensure().crate_hash($def_id.krate); $tcx.ensure().crate_hash($def_id.krate);
} }

View file

@ -65,9 +65,9 @@ use rustc_hir::definitions::DefPathHash;
use rustc_hir::{HirId, ItemLocalId, OwnerId}; use rustc_hir::{HirId, ItemLocalId, OwnerId};
use rustc_query_system::dep_graph::FingerprintStyle; use rustc_query_system::dep_graph::FingerprintStyle;
use rustc_span::symbol::Symbol; use rustc_span::symbol::Symbol;
use std::hash::Hash;
pub use rustc_query_system::dep_graph::{DepContext, DepNodeParams}; pub use rustc_query_system::dep_graph::dep_node::DepKind;
pub use rustc_query_system::dep_graph::{DepContext, DepNode, DepNodeParams};
macro_rules! define_dep_nodes { macro_rules! define_dep_nodes {
( (
@ -84,55 +84,39 @@ macro_rules! define_dep_nodes {
// encoding. The derived Encodable/Decodable uses leb128 encoding which is // encoding. The derived Encodable/Decodable uses leb128 encoding which is
// dense when only considering this enum. But DepKind is encoded in a larger // dense when only considering this enum. But DepKind is encoded in a larger
// struct, and there we can take advantage of the unused bits in the u16. // struct, and there we can take advantage of the unused bits in the u16.
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
#[allow(non_camel_case_types)] #[allow(non_camel_case_types)]
#[repr(u16)] #[repr(u16)] // Must be kept in sync with the inner type of `DepKind`.
pub enum DepKind { enum DepKindDefs {
$( $( #[$attr] )* $variant),* $( $( #[$attr] )* $variant),*
} }
impl DepKind { #[allow(non_upper_case_globals)]
// This const implements two things: A bounds check so that we can decode pub mod dep_kinds {
// a DepKind from a u16 with just one check, and a const check that the use super::*;
// discriminants of the variants have been assigned consecutively from 0
// so that just the one comparison suffices to check that the u16 can be $(
// transmuted to a DepKind. // The `as u16` cast must be kept in sync with the inner type of `DepKind`.
pub const VARIANTS: u16 = { pub const $variant: DepKind = DepKind::new(DepKindDefs::$variant as u16);
let deps: &[DepKind] = &[$(DepKind::$variant,)*]; )*
let mut i = 0;
while i < deps.len() {
if i as u16 != deps[i] as u16 {
panic!();
}
i += 1;
}
deps.len() as u16
};
} }
impl<S: rustc_serialize::Encoder> rustc_serialize::Encodable<S> for DepKind { // This checks that the discriminants of the variants have been assigned consecutively
#[inline] // from 0 so that they can be used as a dense index.
fn encode(&self, s: &mut S) { pub const DEP_KIND_VARIANTS: u16 = {
s.emit_u16(*self as u16); let deps = &[$(dep_kinds::$variant,)*];
} let mut i = 0;
} while i < deps.len() {
if i != deps[i].as_usize() {
impl<D: rustc_serialize::Decoder> rustc_serialize::Decodable<D> for DepKind { panic!();
#[inline]
fn decode(d: &mut D) -> DepKind {
let discrim = d.read_u16();
assert!(discrim < DepKind::VARIANTS);
// SAFETY: DepKind::VARIANTS checks that the discriminant values permit
// this one check to soundly guard the transmute.
unsafe {
std::mem::transmute::<u16, DepKind>(discrim)
} }
i += 1;
} }
} deps.len() as u16
};
pub(super) fn dep_kind_from_label_string(label: &str) -> Result<DepKind, ()> { pub(super) fn dep_kind_from_label_string(label: &str) -> Result<DepKind, ()> {
match label { match label {
$(stringify!($variant) => Ok(DepKind::$variant),)* $(stringify!($variant) => Ok(dep_kinds::$variant),)*
_ => Err(()), _ => Err(()),
} }
} }
@ -158,12 +142,10 @@ rustc_query_append!(define_dep_nodes![
[] fn CompileMonoItem() -> (), [] fn CompileMonoItem() -> (),
]); ]);
static_assert_size!(DepKind, 2);
// WARNING: `construct` is generic and does not know that `CompileCodegenUnit` takes `Symbol`s as keys. // WARNING: `construct` is generic and does not know that `CompileCodegenUnit` takes `Symbol`s as keys.
// Be very careful changing this type signature! // Be very careful changing this type signature!
pub(crate) fn make_compile_codegen_unit(tcx: TyCtxt<'_>, name: Symbol) -> DepNode { pub(crate) fn make_compile_codegen_unit(tcx: TyCtxt<'_>, name: Symbol) -> DepNode {
DepNode::construct(tcx, DepKind::CompileCodegenUnit, &name) DepNode::construct(tcx, dep_kinds::CompileCodegenUnit, &name)
} }
// WARNING: `construct` is generic and does not know that `CompileMonoItem` takes `MonoItem`s as keys. // WARNING: `construct` is generic and does not know that `CompileMonoItem` takes `MonoItem`s as keys.
@ -172,20 +154,9 @@ pub(crate) fn make_compile_mono_item<'tcx>(
tcx: TyCtxt<'tcx>, tcx: TyCtxt<'tcx>,
mono_item: &MonoItem<'tcx>, mono_item: &MonoItem<'tcx>,
) -> DepNode { ) -> DepNode {
DepNode::construct(tcx, DepKind::CompileMonoItem, mono_item) DepNode::construct(tcx, dep_kinds::CompileMonoItem, mono_item)
} }
pub type DepNode = rustc_query_system::dep_graph::DepNode<DepKind>;
// We keep a lot of `DepNode`s in memory during compilation. It's not
// required that their size stay the same, but we don't want to change
// it inadvertently. This assert just ensures we're aware of any change.
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
static_assert_size!(DepNode, 18);
#[cfg(not(any(target_arch = "x86", target_arch = "x86_64")))]
static_assert_size!(DepNode, 24);
pub trait DepNodeExt: Sized { pub trait DepNodeExt: Sized {
/// Extracts the DefId corresponding to this DepNode. This will work /// Extracts the DefId corresponding to this DepNode. This will work
/// if two conditions are met: /// if two conditions are met:

View file

@ -6,49 +6,24 @@ use rustc_session::Session;
#[macro_use] #[macro_use]
mod dep_node; mod dep_node;
pub use rustc_query_system::dep_graph::debug::EdgeFilter;
pub use rustc_query_system::dep_graph::{ pub use rustc_query_system::dep_graph::{
debug::DepNodeFilter, hash_result, DepContext, DepNodeColor, DepNodeIndex, debug::DepNodeFilter, hash_result, DepContext, DepGraphQuery, DepNodeColor, DepNodeIndex, Deps,
SerializedDepNodeIndex, WorkProduct, WorkProductId, WorkProductMap, SerializedDepGraph, SerializedDepNodeIndex, TaskDeps, TaskDepsRef, WorkProduct, WorkProductId,
WorkProductMap,
}; };
pub use dep_node::{label_strs, DepKind, DepNode, DepNodeExt}; pub use dep_node::{dep_kinds, label_strs, DepKind, DepNode, DepNodeExt};
pub(crate) use dep_node::{make_compile_codegen_unit, make_compile_mono_item}; pub(crate) use dep_node::{make_compile_codegen_unit, make_compile_mono_item};
pub type DepGraph = rustc_query_system::dep_graph::DepGraph<DepKind>; pub type DepGraph = rustc_query_system::dep_graph::DepGraph<DepsType>;
pub type TaskDeps = rustc_query_system::dep_graph::TaskDeps<DepKind>;
pub type TaskDepsRef<'a> = rustc_query_system::dep_graph::TaskDepsRef<'a, DepKind>;
pub type DepGraphQuery = rustc_query_system::dep_graph::DepGraphQuery<DepKind>;
pub type SerializedDepGraph = rustc_query_system::dep_graph::SerializedDepGraph<DepKind>;
pub type EdgeFilter = rustc_query_system::dep_graph::debug::EdgeFilter<DepKind>;
pub type DepKindStruct<'tcx> = rustc_query_system::dep_graph::DepKindStruct<TyCtxt<'tcx>>; pub type DepKindStruct<'tcx> = rustc_query_system::dep_graph::DepKindStruct<TyCtxt<'tcx>>;
impl rustc_query_system::dep_graph::DepKind for DepKind { #[derive(Clone)]
const NULL: Self = DepKind::Null; pub struct DepsType;
const RED: Self = DepKind::Red;
const MAX: u16 = DepKind::VARIANTS - 1;
fn debug_node(node: &DepNode, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{:?}(", node.kind)?;
ty::tls::with_opt(|opt_tcx| {
if let Some(tcx) = opt_tcx {
if let Some(def_id) = node.extract_def_id(tcx) {
write!(f, "{}", tcx.def_path_debug_str(def_id))?;
} else if let Some(ref s) = tcx.dep_graph.dep_node_debug_str(*node) {
write!(f, "{s}")?;
} else {
write!(f, "{}", node.hash)?;
}
} else {
write!(f, "{}", node.hash)?;
}
Ok(())
})?;
write!(f, ")")
}
impl Deps for DepsType {
fn with_deps<OP, R>(task_deps: TaskDepsRef<'_>, op: OP) -> R fn with_deps<OP, R>(task_deps: TaskDepsRef<'_>, op: OP) -> R
where where
OP: FnOnce() -> R, OP: FnOnce() -> R,
@ -70,24 +45,13 @@ impl rustc_query_system::dep_graph::DepKind for DepKind {
}) })
} }
#[track_caller] const DEP_KIND_NULL: DepKind = dep_kinds::Null;
#[inline] const DEP_KIND_RED: DepKind = dep_kinds::Red;
fn from_u16(u: u16) -> Self { const DEP_KIND_MAX: u16 = dep_node::DEP_KIND_VARIANTS - 1;
if u > Self::MAX {
panic!("Invalid DepKind {u}");
}
// SAFETY: See comment on DepKind::VARIANTS
unsafe { std::mem::transmute(u) }
}
#[inline]
fn to_u16(self) -> u16 {
self as u16
}
} }
impl<'tcx> DepContext for TyCtxt<'tcx> { impl<'tcx> DepContext for TyCtxt<'tcx> {
type DepKind = DepKind; type Deps = DepsType;
#[inline] #[inline]
fn with_stable_hashing_context<R>(self, f: impl FnOnce(StableHashingContext<'_>) -> R) -> R { fn with_stable_hashing_context<R>(self, f: impl FnOnce(StableHashingContext<'_>) -> R) -> R {
@ -111,6 +75,6 @@ impl<'tcx> DepContext for TyCtxt<'tcx> {
#[inline] #[inline]
fn dep_kind_info(&self, dk: DepKind) -> &DepKindStruct<'tcx> { fn dep_kind_info(&self, dk: DepKind) -> &DepKindStruct<'tcx> {
&self.query_kinds[dk as usize] &self.query_kinds[dk.as_usize()]
} }
} }

View file

@ -7,7 +7,6 @@
#![allow(unused_parens)] #![allow(unused_parens)]
use crate::dep_graph; use crate::dep_graph;
use crate::dep_graph::DepKind;
use crate::infer::canonical::{self, Canonical}; use crate::infer::canonical::{self, Canonical};
use crate::lint::LintExpectation; use crate::lint::LintExpectation;
use crate::metadata::ModChild; use crate::metadata::ModChild;

View file

@ -37,7 +37,7 @@ pub struct DynamicQuery<'tcx, C: QueryCache> {
pub eval_always: bool, pub eval_always: bool,
pub dep_kind: DepKind, pub dep_kind: DepKind,
pub handle_cycle_error: HandleCycleError, pub handle_cycle_error: HandleCycleError,
pub query_state: FieldOffset<QueryStates<'tcx>, QueryState<C::Key, DepKind>>, pub query_state: FieldOffset<QueryStates<'tcx>, QueryState<C::Key>>,
pub query_cache: FieldOffset<QueryCaches<'tcx>, C>, pub query_cache: FieldOffset<QueryCaches<'tcx>, C>,
pub cache_on_disk: fn(tcx: TyCtxt<'tcx>, key: &C::Key) -> bool, pub cache_on_disk: fn(tcx: TyCtxt<'tcx>, key: &C::Key) -> bool,
pub execute_query: fn(tcx: TyCtxt<'tcx>, k: C::Key) -> C::Value, pub execute_query: fn(tcx: TyCtxt<'tcx>, k: C::Key) -> C::Value,
@ -53,7 +53,7 @@ pub struct DynamicQuery<'tcx, C: QueryCache> {
fn(tcx: TyCtxt<'tcx>, key: &C::Key, index: SerializedDepNodeIndex) -> bool, fn(tcx: TyCtxt<'tcx>, key: &C::Key, index: SerializedDepNodeIndex) -> bool,
pub hash_result: HashResult<C::Value>, pub hash_result: HashResult<C::Value>,
pub value_from_cycle_error: pub value_from_cycle_error:
fn(tcx: TyCtxt<'tcx>, cycle: &[QueryInfo<DepKind>], guar: ErrorGuaranteed) -> C::Value, fn(tcx: TyCtxt<'tcx>, cycle: &[QueryInfo], guar: ErrorGuaranteed) -> C::Value,
pub format_value: fn(&C::Value) -> String, pub format_value: fn(&C::Value) -> String,
} }
@ -402,7 +402,7 @@ macro_rules! define_callbacks {
#[derive(Default)] #[derive(Default)]
pub struct QueryStates<'tcx> { pub struct QueryStates<'tcx> {
$( $(
pub $name: QueryState<$($K)*, DepKind>, pub $name: QueryState<$($K)*>,
)* )*
} }
@ -516,7 +516,7 @@ macro_rules! define_feedable {
} }
} }
None => { None => {
let dep_node = dep_graph::DepNode::construct(tcx, dep_graph::DepKind::$name, &key); let dep_node = dep_graph::DepNode::construct(tcx, dep_graph::dep_kinds::$name, &key);
let dep_node_index = tcx.dep_graph.with_feed_task( let dep_node_index = tcx.dep_graph.with_feed_task(
dep_node, dep_node,
tcx, tcx,

View file

@ -1,4 +1,4 @@
use crate::dep_graph::DepKind; use crate::dep_graph::dep_kinds;
use crate::query::plumbing::CyclePlaceholder; use crate::query::plumbing::CyclePlaceholder;
use rustc_data_structures::fx::FxHashSet; use rustc_data_structures::fx::FxHashSet;
use rustc_errors::{pluralize, struct_span_err, Applicability, MultiSpan}; use rustc_errors::{pluralize, struct_span_err, Applicability, MultiSpan};
@ -13,34 +13,22 @@ use rustc_span::{ErrorGuaranteed, Span};
use std::fmt::Write; use std::fmt::Write;
impl<'tcx> Value<TyCtxt<'tcx>, DepKind> for Ty<'_> { impl<'tcx> Value<TyCtxt<'tcx>> for Ty<'_> {
fn from_cycle_error( fn from_cycle_error(tcx: TyCtxt<'tcx>, _: &[QueryInfo], guar: ErrorGuaranteed) -> Self {
tcx: TyCtxt<'tcx>,
_: &[QueryInfo<DepKind>],
guar: ErrorGuaranteed,
) -> Self {
// SAFETY: This is never called when `Self` is not `Ty<'tcx>`. // SAFETY: This is never called when `Self` is not `Ty<'tcx>`.
// FIXME: Represent the above fact in the trait system somehow. // FIXME: Represent the above fact in the trait system somehow.
unsafe { std::mem::transmute::<Ty<'tcx>, Ty<'_>>(Ty::new_error(tcx, guar)) } unsafe { std::mem::transmute::<Ty<'tcx>, Ty<'_>>(Ty::new_error(tcx, guar)) }
} }
} }
impl<'tcx> Value<TyCtxt<'tcx>, DepKind> for Result<ty::EarlyBinder<Ty<'_>>, CyclePlaceholder> { impl<'tcx> Value<TyCtxt<'tcx>> for Result<ty::EarlyBinder<Ty<'_>>, CyclePlaceholder> {
fn from_cycle_error( fn from_cycle_error(_tcx: TyCtxt<'tcx>, _: &[QueryInfo], guar: ErrorGuaranteed) -> Self {
_tcx: TyCtxt<'tcx>,
_: &[QueryInfo<DepKind>],
guar: ErrorGuaranteed,
) -> Self {
Err(CyclePlaceholder(guar)) Err(CyclePlaceholder(guar))
} }
} }
impl<'tcx> Value<TyCtxt<'tcx>, DepKind> for ty::SymbolName<'_> { impl<'tcx> Value<TyCtxt<'tcx>> for ty::SymbolName<'_> {
fn from_cycle_error( fn from_cycle_error(tcx: TyCtxt<'tcx>, _: &[QueryInfo], _guar: ErrorGuaranteed) -> Self {
tcx: TyCtxt<'tcx>,
_: &[QueryInfo<DepKind>],
_guar: ErrorGuaranteed,
) -> Self {
// SAFETY: This is never called when `Self` is not `SymbolName<'tcx>`. // SAFETY: This is never called when `Self` is not `SymbolName<'tcx>`.
// FIXME: Represent the above fact in the trait system somehow. // FIXME: Represent the above fact in the trait system somehow.
unsafe { unsafe {
@ -51,16 +39,12 @@ impl<'tcx> Value<TyCtxt<'tcx>, DepKind> for ty::SymbolName<'_> {
} }
} }
impl<'tcx> Value<TyCtxt<'tcx>, DepKind> for ty::Binder<'_, ty::FnSig<'_>> { impl<'tcx> Value<TyCtxt<'tcx>> for ty::Binder<'_, ty::FnSig<'_>> {
fn from_cycle_error( fn from_cycle_error(tcx: TyCtxt<'tcx>, stack: &[QueryInfo], guar: ErrorGuaranteed) -> Self {
tcx: TyCtxt<'tcx>,
stack: &[QueryInfo<DepKind>],
guar: ErrorGuaranteed,
) -> Self {
let err = Ty::new_error(tcx, guar); let err = Ty::new_error(tcx, guar);
let arity = if let Some(frame) = stack.get(0) let arity = if let Some(frame) = stack.get(0)
&& frame.query.dep_kind == DepKind::fn_sig && frame.query.dep_kind == dep_kinds::fn_sig
&& let Some(def_id) = frame.query.def_id && let Some(def_id) = frame.query.def_id
&& let Some(node) = tcx.hir().get_if_local(def_id) && let Some(node) = tcx.hir().get_if_local(def_id)
&& let Some(sig) = node.fn_sig() && let Some(sig) = node.fn_sig()
@ -85,16 +69,12 @@ impl<'tcx> Value<TyCtxt<'tcx>, DepKind> for ty::Binder<'_, ty::FnSig<'_>> {
} }
} }
impl<'tcx> Value<TyCtxt<'tcx>, DepKind> for Representability { impl<'tcx> Value<TyCtxt<'tcx>> for Representability {
fn from_cycle_error( fn from_cycle_error(tcx: TyCtxt<'tcx>, cycle: &[QueryInfo], _guar: ErrorGuaranteed) -> Self {
tcx: TyCtxt<'tcx>,
cycle: &[QueryInfo<DepKind>],
_guar: ErrorGuaranteed,
) -> Self {
let mut item_and_field_ids = Vec::new(); let mut item_and_field_ids = Vec::new();
let mut representable_ids = FxHashSet::default(); let mut representable_ids = FxHashSet::default();
for info in cycle { for info in cycle {
if info.query.dep_kind == DepKind::representability if info.query.dep_kind == dep_kinds::representability
&& let Some(field_id) = info.query.def_id && let Some(field_id) = info.query.def_id
&& let Some(field_id) = field_id.as_local() && let Some(field_id) = field_id.as_local()
&& let Some(DefKind::Field) = info.query.def_kind && let Some(DefKind::Field) = info.query.def_kind
@ -108,7 +88,7 @@ impl<'tcx> Value<TyCtxt<'tcx>, DepKind> for Representability {
} }
} }
for info in cycle { for info in cycle {
if info.query.dep_kind == DepKind::representability_adt_ty if info.query.dep_kind == dep_kinds::representability_adt_ty
&& let Some(def_id) = info.query.ty_adt_id && let Some(def_id) = info.query.ty_adt_id
&& let Some(def_id) = def_id.as_local() && let Some(def_id) = def_id.as_local()
&& !item_and_field_ids.iter().any(|&(id, _)| id == def_id) && !item_and_field_ids.iter().any(|&(id, _)| id == def_id)
@ -121,32 +101,20 @@ impl<'tcx> Value<TyCtxt<'tcx>, DepKind> for Representability {
} }
} }
impl<'tcx> Value<TyCtxt<'tcx>, DepKind> for ty::EarlyBinder<Ty<'_>> { impl<'tcx> Value<TyCtxt<'tcx>> for ty::EarlyBinder<Ty<'_>> {
fn from_cycle_error( fn from_cycle_error(tcx: TyCtxt<'tcx>, cycle: &[QueryInfo], guar: ErrorGuaranteed) -> Self {
tcx: TyCtxt<'tcx>,
cycle: &[QueryInfo<DepKind>],
guar: ErrorGuaranteed,
) -> Self {
ty::EarlyBinder::bind(Ty::from_cycle_error(tcx, cycle, guar)) ty::EarlyBinder::bind(Ty::from_cycle_error(tcx, cycle, guar))
} }
} }
impl<'tcx> Value<TyCtxt<'tcx>, DepKind> for ty::EarlyBinder<ty::Binder<'_, ty::FnSig<'_>>> { impl<'tcx> Value<TyCtxt<'tcx>> for ty::EarlyBinder<ty::Binder<'_, ty::FnSig<'_>>> {
fn from_cycle_error( fn from_cycle_error(tcx: TyCtxt<'tcx>, cycle: &[QueryInfo], guar: ErrorGuaranteed) -> Self {
tcx: TyCtxt<'tcx>,
cycle: &[QueryInfo<DepKind>],
guar: ErrorGuaranteed,
) -> Self {
ty::EarlyBinder::bind(ty::Binder::from_cycle_error(tcx, cycle, guar)) ty::EarlyBinder::bind(ty::Binder::from_cycle_error(tcx, cycle, guar))
} }
} }
impl<'tcx, T> Value<TyCtxt<'tcx>, DepKind> for Result<T, &'_ ty::layout::LayoutError<'_>> { impl<'tcx, T> Value<TyCtxt<'tcx>> for Result<T, &'_ ty::layout::LayoutError<'_>> {
fn from_cycle_error( fn from_cycle_error(_tcx: TyCtxt<'tcx>, _cycle: &[QueryInfo], _guar: ErrorGuaranteed) -> Self {
_tcx: TyCtxt<'tcx>,
_cycle: &[QueryInfo<DepKind>],
_guar: ErrorGuaranteed,
) -> Self {
// tcx.arena.alloc cannot be used because we are not allowed to use &'tcx LayoutError under // tcx.arena.alloc cannot be used because we are not allowed to use &'tcx LayoutError under
// min_specialization. Since this is an error path anyways, leaking doesn't matter (and really, // min_specialization. Since this is an error path anyways, leaking doesn't matter (and really,
// tcx.arena.alloc is pretty much equal to leaking). // tcx.arena.alloc is pretty much equal to leaking).

View file

@ -92,7 +92,7 @@ where
} }
#[inline(always)] #[inline(always)]
fn query_state<'a>(self, qcx: QueryCtxt<'tcx>) -> &'a QueryState<Self::Key, DepKind> fn query_state<'a>(self, qcx: QueryCtxt<'tcx>) -> &'a QueryState<Self::Key>
where where
QueryCtxt<'tcx>: 'a, QueryCtxt<'tcx>: 'a,
{ {
@ -145,7 +145,7 @@ where
fn value_from_cycle_error( fn value_from_cycle_error(
self, self,
tcx: TyCtxt<'tcx>, tcx: TyCtxt<'tcx>,
cycle: &[QueryInfo<DepKind>], cycle: &[QueryInfo],
guar: ErrorGuaranteed, guar: ErrorGuaranteed,
) -> Self::Value { ) -> Self::Value {
(self.dynamic.value_from_cycle_error)(tcx, cycle, guar) (self.dynamic.value_from_cycle_error)(tcx, cycle, guar)
@ -198,6 +198,8 @@ trait QueryConfigRestored<'tcx> {
type RestoredValue; type RestoredValue;
type Config: QueryConfig<QueryCtxt<'tcx>>; type Config: QueryConfig<QueryCtxt<'tcx>>;
const NAME: &'static &'static str;
fn config(tcx: TyCtxt<'tcx>) -> Self::Config; fn config(tcx: TyCtxt<'tcx>) -> Self::Config;
fn restore(value: <Self::Config as QueryConfig<QueryCtxt<'tcx>>>::Value) fn restore(value: <Self::Config as QueryConfig<QueryCtxt<'tcx>>>::Value)
-> Self::RestoredValue; -> Self::RestoredValue;

View file

@ -8,7 +8,9 @@ use crate::QueryConfigRestored;
use rustc_data_structures::stable_hasher::{Hash64, HashStable, StableHasher}; use rustc_data_structures::stable_hasher::{Hash64, HashStable, StableHasher};
use rustc_data_structures::sync::Lock; use rustc_data_structures::sync::Lock;
use rustc_errors::Diagnostic; use rustc_errors::Diagnostic;
use rustc_index::Idx; use rustc_index::Idx;
use rustc_middle::dep_graph::dep_kinds;
use rustc_middle::dep_graph::{ use rustc_middle::dep_graph::{
self, DepKind, DepKindStruct, DepNode, DepNodeIndex, SerializedDepNodeIndex, self, DepKind, DepKindStruct, DepNode, DepNodeIndex, SerializedDepNodeIndex,
}; };
@ -53,7 +55,7 @@ impl<'tcx> std::ops::Deref for QueryCtxt<'tcx> {
} }
impl<'tcx> HasDepContext for QueryCtxt<'tcx> { impl<'tcx> HasDepContext for QueryCtxt<'tcx> {
type DepKind = rustc_middle::dep_graph::DepKind; type Deps = rustc_middle::dep_graph::DepsType;
type DepContext = TyCtxt<'tcx>; type DepContext = TyCtxt<'tcx>;
#[inline] #[inline]
@ -78,7 +80,7 @@ impl QueryContext for QueryCtxt<'_> {
tls::with_related_context(self.tcx, |icx| icx.query) tls::with_related_context(self.tcx, |icx| icx.query)
} }
fn try_collect_active_jobs(self) -> Option<QueryMap<DepKind>> { fn try_collect_active_jobs(self) -> Option<QueryMap> {
let mut jobs = QueryMap::default(); let mut jobs = QueryMap::default();
for collect in super::TRY_COLLECT_ACTIVE_JOBS.iter() { for collect in super::TRY_COLLECT_ACTIVE_JOBS.iter() {
@ -154,7 +156,7 @@ impl QueryContext for QueryCtxt<'_> {
let mut span = None; let mut span = None;
let mut layout_of_depth = None; let mut layout_of_depth = None;
if let Some(map) = self.try_collect_active_jobs() { if let Some(map) = self.try_collect_active_jobs() {
if let Some((info, depth)) = job.try_find_layout_root(map) { if let Some((info, depth)) = job.try_find_layout_root(map, dep_kinds::layout_of) {
span = Some(info.job.span); span = Some(info.job.span);
layout_of_depth = Some(LayoutOfDepth { desc: info.query.description, depth }); layout_of_depth = Some(LayoutOfDepth { desc: info.query.description, depth });
} }
@ -300,7 +302,7 @@ pub(crate) fn create_query_frame<
key: K, key: K,
kind: DepKind, kind: DepKind,
name: &'static str, name: &'static str,
) -> QueryStackFrame<DepKind> { ) -> QueryStackFrame {
// Avoid calling queries while formatting the description // Avoid calling queries while formatting the description
let description = ty::print::with_no_queries!( let description = ty::print::with_no_queries!(
// Disable visible paths printing for performance reasons. // Disable visible paths printing for performance reasons.
@ -312,7 +314,7 @@ pub(crate) fn create_query_frame<
); );
let description = let description =
if tcx.sess.verbose() { format!("{description} [{name:?}]") } else { description }; if tcx.sess.verbose() { format!("{description} [{name:?}]") } else { description };
let span = if kind == dep_graph::DepKind::def_span || with_no_queries() { let span = if kind == dep_graph::dep_kinds::def_span || with_no_queries() {
// The `def_span` query is used to calculate `default_span`, // The `def_span` query is used to calculate `default_span`,
// so exit to avoid infinite recursion. // so exit to avoid infinite recursion.
None None
@ -320,7 +322,7 @@ pub(crate) fn create_query_frame<
Some(key.default_span(tcx)) Some(key.default_span(tcx))
}; };
let def_id = key.key_as_def_id(); let def_id = key.key_as_def_id();
let def_kind = if kind == dep_graph::DepKind::opt_def_kind || with_no_queries() { let def_kind = if kind == dep_graph::dep_kinds::opt_def_kind || with_no_queries() {
// Try to avoid infinite recursion. // Try to avoid infinite recursion.
None None
} else { } else {
@ -329,7 +331,7 @@ pub(crate) fn create_query_frame<
let hash = || { let hash = || {
tcx.with_stable_hashing_context(|mut hcx| { tcx.with_stable_hashing_context(|mut hcx| {
let mut hasher = StableHasher::new(); let mut hasher = StableHasher::new();
std::mem::discriminant(&kind).hash_stable(&mut hcx, &mut hasher); kind.as_usize().hash_stable(&mut hcx, &mut hasher);
key.hash_stable(&mut hcx, &mut hasher); key.hash_stable(&mut hcx, &mut hasher);
hasher.finish::<Hash64>() hasher.finish::<Hash64>()
}) })
@ -430,8 +432,8 @@ where
// hit the cache instead of having to go through `force_from_dep_node`. // hit the cache instead of having to go through `force_from_dep_node`.
// This assertion makes sure, we actually keep applying the solution above. // This assertion makes sure, we actually keep applying the solution above.
debug_assert!( debug_assert!(
dep_node.kind != DepKind::codegen_unit, dep_node.kind != dep_kinds::codegen_unit,
"calling force_from_dep_node() on DepKind::codegen_unit" "calling force_from_dep_node() on dep_kinds::codegen_unit"
); );
if let Some(key) = Q::Key::recover(tcx, &dep_node) { if let Some(key) = Q::Key::recover(tcx, &dep_node) {
@ -457,6 +459,7 @@ where
fingerprint_style, fingerprint_style,
force_from_dep_node: None, force_from_dep_node: None,
try_load_from_on_disk_cache: None, try_load_from_on_disk_cache: None,
name: Q::NAME,
}; };
} }
@ -470,6 +473,7 @@ where
try_load_from_on_disk_cache: Some(|tcx, dep_node| { try_load_from_on_disk_cache: Some(|tcx, dep_node| {
try_load_from_on_disk_cache(Q::config(tcx), tcx, dep_node) try_load_from_on_disk_cache(Q::config(tcx), tcx, dep_node)
}), }),
name: Q::NAME,
} }
} }
@ -565,7 +569,7 @@ macro_rules! define_queries {
DynamicQuery { DynamicQuery {
name: stringify!($name), name: stringify!($name),
eval_always: is_eval_always!([$($modifiers)*]), eval_always: is_eval_always!([$($modifiers)*]),
dep_kind: dep_graph::DepKind::$name, dep_kind: dep_graph::dep_kinds::$name,
handle_cycle_error: handle_cycle_error!([$($modifiers)*]), handle_cycle_error: handle_cycle_error!([$($modifiers)*]),
query_state: offset_of!(QueryStates<'tcx> => $name), query_state: offset_of!(QueryStates<'tcx> => $name),
query_cache: offset_of!(QueryCaches<'tcx> => $name), query_cache: offset_of!(QueryCaches<'tcx> => $name),
@ -636,6 +640,8 @@ macro_rules! define_queries {
{ feedable!([$($modifiers)*]) }, { feedable!([$($modifiers)*]) },
>; >;
const NAME: &'static &'static str = &stringify!($name);
#[inline(always)] #[inline(always)]
fn config(tcx: TyCtxt<'tcx>) -> Self::Config { fn config(tcx: TyCtxt<'tcx>) -> Self::Config {
DynamicConfig { DynamicConfig {
@ -649,9 +655,9 @@ macro_rules! define_queries {
} }
} }
pub fn try_collect_active_jobs<'tcx>(tcx: TyCtxt<'tcx>, qmap: &mut QueryMap<DepKind>) { pub fn try_collect_active_jobs<'tcx>(tcx: TyCtxt<'tcx>, qmap: &mut QueryMap) {
let make_query = |tcx, key| { let make_query = |tcx, key| {
let kind = rustc_middle::dep_graph::DepKind::$name; let kind = rustc_middle::dep_graph::dep_kinds::$name;
let name = stringify!($name); let name = stringify!($name);
$crate::plumbing::create_query_frame(tcx, rustc_middle::query::descs::$name, key, kind, name) $crate::plumbing::create_query_frame(tcx, rustc_middle::query::descs::$name, key, kind, name)
}; };
@ -709,7 +715,7 @@ macro_rules! define_queries {
// These arrays are used for iteration and can't be indexed by `DepKind`. // These arrays are used for iteration and can't be indexed by `DepKind`.
const TRY_COLLECT_ACTIVE_JOBS: &[for<'tcx> fn(TyCtxt<'tcx>, &mut QueryMap<DepKind>)] = const TRY_COLLECT_ACTIVE_JOBS: &[for<'tcx> fn(TyCtxt<'tcx>, &mut QueryMap)] =
&[$(query_impl::$name::try_collect_active_jobs),*]; &[$(query_impl::$name::try_collect_active_jobs),*];
const ALLOC_SELF_PROFILE_QUERY_STRINGS: &[ const ALLOC_SELF_PROFILE_QUERY_STRINGS: &[
@ -737,6 +743,7 @@ macro_rules! define_queries {
fingerprint_style: FingerprintStyle::Unit, fingerprint_style: FingerprintStyle::Unit,
force_from_dep_node: Some(|_, dep_node| bug!("force_from_dep_node: encountered {:?}", dep_node)), force_from_dep_node: Some(|_, dep_node| bug!("force_from_dep_node: encountered {:?}", dep_node)),
try_load_from_on_disk_cache: None, try_load_from_on_disk_cache: None,
name: &"Null",
} }
} }
@ -748,6 +755,7 @@ macro_rules! define_queries {
fingerprint_style: FingerprintStyle::Unit, fingerprint_style: FingerprintStyle::Unit,
force_from_dep_node: Some(|_, dep_node| bug!("force_from_dep_node: encountered {:?}", dep_node)), force_from_dep_node: Some(|_, dep_node| bug!("force_from_dep_node: encountered {:?}", dep_node)),
try_load_from_on_disk_cache: None, try_load_from_on_disk_cache: None,
name: &"Red",
} }
} }
@ -758,6 +766,7 @@ macro_rules! define_queries {
fingerprint_style: FingerprintStyle::Unit, fingerprint_style: FingerprintStyle::Unit,
force_from_dep_node: None, force_from_dep_node: None,
try_load_from_on_disk_cache: None, try_load_from_on_disk_cache: None,
name: &"TraitSelect",
} }
} }
@ -768,6 +777,7 @@ macro_rules! define_queries {
fingerprint_style: FingerprintStyle::Opaque, fingerprint_style: FingerprintStyle::Opaque,
force_from_dep_node: None, force_from_dep_node: None,
try_load_from_on_disk_cache: None, try_load_from_on_disk_cache: None,
name: &"CompileCodegenUnit",
} }
} }
@ -778,6 +788,7 @@ macro_rules! define_queries {
fingerprint_style: FingerprintStyle::Opaque, fingerprint_style: FingerprintStyle::Opaque,
force_from_dep_node: None, force_from_dep_node: None,
try_load_from_on_disk_cache: None, try_load_from_on_disk_cache: None,
name: &"CompileMonoItem",
} }
} }

View file

@ -1,6 +1,6 @@
//! Code for debugging the dep-graph. //! Code for debugging the dep-graph.
use super::{DepKind, DepNode, DepNodeIndex}; use super::{DepNode, DepNodeIndex};
use rustc_data_structures::fx::FxHashMap; use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::sync::Lock; use rustc_data_structures::sync::Lock;
use std::error::Error; use std::error::Error;
@ -28,7 +28,7 @@ impl DepNodeFilter {
} }
/// Tests whether `node` meets the filter, returning true if so. /// Tests whether `node` meets the filter, returning true if so.
pub fn test<K: DepKind>(&self, node: &DepNode<K>) -> bool { pub fn test(&self, node: &DepNode) -> bool {
let debug_str = format!("{node:?}"); let debug_str = format!("{node:?}");
self.text.split('&').map(|s| s.trim()).all(|f| debug_str.contains(f)) self.text.split('&').map(|s| s.trim()).all(|f| debug_str.contains(f))
} }
@ -36,14 +36,14 @@ impl DepNodeFilter {
/// A filter like `F -> G` where `F` and `G` are valid dep-node /// A filter like `F -> G` where `F` and `G` are valid dep-node
/// filters. This can be used to test the source/target independently. /// filters. This can be used to test the source/target independently.
pub struct EdgeFilter<K: DepKind> { pub struct EdgeFilter {
pub source: DepNodeFilter, pub source: DepNodeFilter,
pub target: DepNodeFilter, pub target: DepNodeFilter,
pub index_to_node: Lock<FxHashMap<DepNodeIndex, DepNode<K>>>, pub index_to_node: Lock<FxHashMap<DepNodeIndex, DepNode>>,
} }
impl<K: DepKind> EdgeFilter<K> { impl EdgeFilter {
pub fn new(test: &str) -> Result<EdgeFilter<K>, Box<dyn Error>> { pub fn new(test: &str) -> Result<EdgeFilter, Box<dyn Error>> {
let parts: Vec<_> = test.split("->").collect(); let parts: Vec<_> = test.split("->").collect();
if parts.len() != 2 { if parts.len() != 2 {
Err(format!("expected a filter like `a&b -> c&d`, not `{test}`").into()) Err(format!("expected a filter like `a&b -> c&d`, not `{test}`").into())
@ -57,7 +57,7 @@ impl<K: DepKind> EdgeFilter<K> {
} }
#[cfg(debug_assertions)] #[cfg(debug_assertions)]
pub fn test(&self, source: &DepNode<K>, target: &DepNode<K>) -> bool { pub fn test(&self, source: &DepNode, target: &DepNode) -> bool {
self.source.test(source) && self.target.test(target) self.source.test(source) && self.target.test(target)
} }
} }

View file

@ -42,36 +42,84 @@
//! `DefId` it was computed from. In other cases, too much information gets //! `DefId` it was computed from. In other cases, too much information gets
//! lost during fingerprint computation. //! lost during fingerprint computation.
use super::{DepContext, DepKind, FingerprintStyle}; use super::{DepContext, FingerprintStyle};
use crate::ich::StableHashingContext; use crate::ich::StableHashingContext;
use rustc_data_structures::fingerprint::{Fingerprint, PackedFingerprint}; use rustc_data_structures::fingerprint::{Fingerprint, PackedFingerprint};
use rustc_data_structures::stable_hasher::{HashStable, StableHasher, StableOrd, ToStableHashKey}; use rustc_data_structures::stable_hasher::{HashStable, StableHasher, StableOrd, ToStableHashKey};
use rustc_data_structures::AtomicRef;
use rustc_hir::definitions::DefPathHash; use rustc_hir::definitions::DefPathHash;
use std::fmt; use std::fmt;
use std::hash::Hash; use std::hash::Hash;
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Encodable, Decodable)] /// This serves as an index into arrays built by `make_dep_kind_array`.
pub struct DepNode<K> { #[derive(Clone, Copy, PartialEq, Eq, Hash)]
pub kind: K, pub struct DepKind {
variant: u16,
}
impl DepKind {
#[inline]
pub const fn new(variant: u16) -> Self {
Self { variant }
}
#[inline]
pub const fn as_inner(&self) -> u16 {
self.variant
}
#[inline]
pub const fn as_usize(&self) -> usize {
self.variant as usize
}
}
static_assert_size!(DepKind, 2);
pub fn default_dep_kind_debug(kind: DepKind, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("DepKind").field("variant", &kind.variant).finish()
}
pub static DEP_KIND_DEBUG: AtomicRef<fn(DepKind, &mut fmt::Formatter<'_>) -> fmt::Result> =
AtomicRef::new(&(default_dep_kind_debug as fn(_, &mut fmt::Formatter<'_>) -> _));
impl fmt::Debug for DepKind {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
(*DEP_KIND_DEBUG)(*self, f)
}
}
#[derive(Clone, Copy, PartialEq, Eq, Hash)]
pub struct DepNode {
pub kind: DepKind,
pub hash: PackedFingerprint, pub hash: PackedFingerprint,
} }
impl<K: DepKind> DepNode<K> { // We keep a lot of `DepNode`s in memory during compilation. It's not
// required that their size stay the same, but we don't want to change
// it inadvertently. This assert just ensures we're aware of any change.
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
static_assert_size!(DepNode, 18);
#[cfg(not(any(target_arch = "x86", target_arch = "x86_64")))]
static_assert_size!(DepNode, 24);
impl DepNode {
/// Creates a new, parameterless DepNode. This method will assert /// Creates a new, parameterless DepNode. This method will assert
/// that the DepNode corresponding to the given DepKind actually /// that the DepNode corresponding to the given DepKind actually
/// does not require any parameters. /// does not require any parameters.
pub fn new_no_params<Tcx>(tcx: Tcx, kind: K) -> DepNode<K> pub fn new_no_params<Tcx>(tcx: Tcx, kind: DepKind) -> DepNode
where where
Tcx: super::DepContext<DepKind = K>, Tcx: super::DepContext,
{ {
debug_assert_eq!(tcx.fingerprint_style(kind), FingerprintStyle::Unit); debug_assert_eq!(tcx.fingerprint_style(kind), FingerprintStyle::Unit);
DepNode { kind, hash: Fingerprint::ZERO.into() } DepNode { kind, hash: Fingerprint::ZERO.into() }
} }
pub fn construct<Tcx, Key>(tcx: Tcx, kind: K, arg: &Key) -> DepNode<K> pub fn construct<Tcx, Key>(tcx: Tcx, kind: DepKind, arg: &Key) -> DepNode
where where
Tcx: super::DepContext<DepKind = K>, Tcx: super::DepContext,
Key: DepNodeParams<Tcx>, Key: DepNodeParams<Tcx>,
{ {
let hash = arg.to_fingerprint(tcx); let hash = arg.to_fingerprint(tcx);
@ -93,18 +141,25 @@ impl<K: DepKind> DepNode<K> {
/// Construct a DepNode from the given DepKind and DefPathHash. This /// Construct a DepNode from the given DepKind and DefPathHash. This
/// method will assert that the given DepKind actually requires a /// method will assert that the given DepKind actually requires a
/// single DefId/DefPathHash parameter. /// single DefId/DefPathHash parameter.
pub fn from_def_path_hash<Tcx>(tcx: Tcx, def_path_hash: DefPathHash, kind: K) -> Self pub fn from_def_path_hash<Tcx>(tcx: Tcx, def_path_hash: DefPathHash, kind: DepKind) -> Self
where where
Tcx: super::DepContext<DepKind = K>, Tcx: super::DepContext,
{ {
debug_assert!(tcx.fingerprint_style(kind) == FingerprintStyle::DefPathHash); debug_assert!(tcx.fingerprint_style(kind) == FingerprintStyle::DefPathHash);
DepNode { kind, hash: def_path_hash.0.into() } DepNode { kind, hash: def_path_hash.0.into() }
} }
} }
impl<K: DepKind> fmt::Debug for DepNode<K> { pub fn default_dep_node_debug(node: DepNode, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("DepNode").field("kind", &node.kind).field("hash", &node.hash).finish()
}
pub static DEP_NODE_DEBUG: AtomicRef<fn(DepNode, &mut fmt::Formatter<'_>) -> fmt::Result> =
AtomicRef::new(&(default_dep_node_debug as fn(_, &mut fmt::Formatter<'_>) -> _));
impl fmt::Debug for DepNode {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
K::debug_node(self, f) (*DEP_NODE_DEBUG)(*self, f)
} }
} }
@ -129,7 +184,7 @@ pub trait DepNodeParams<Tcx: DepContext>: fmt::Debug + Sized {
/// `fingerprint_style()` is not `FingerprintStyle::Opaque`. /// `fingerprint_style()` is not `FingerprintStyle::Opaque`.
/// It is always valid to return `None` here, in which case incremental /// It is always valid to return `None` here, in which case incremental
/// compilation will treat the query as having changed instead of forcing it. /// compilation will treat the query as having changed instead of forcing it.
fn recover(tcx: Tcx, dep_node: &DepNode<Tcx::DepKind>) -> Option<Self>; fn recover(tcx: Tcx, dep_node: &DepNode) -> Option<Self>;
} }
impl<Tcx: DepContext, T> DepNodeParams<Tcx> for T impl<Tcx: DepContext, T> DepNodeParams<Tcx> for T
@ -156,7 +211,7 @@ where
} }
#[inline(always)] #[inline(always)]
default fn recover(_: Tcx, _: &DepNode<Tcx::DepKind>) -> Option<Self> { default fn recover(_: Tcx, _: &DepNode) -> Option<Self> {
None None
} }
} }
@ -216,10 +271,13 @@ pub struct DepKindStruct<Tcx: DepContext> {
/// with kind `MirValidated`, we know that the GUID/fingerprint of the `DepNode` /// with kind `MirValidated`, we know that the GUID/fingerprint of the `DepNode`
/// is actually a `DefPathHash`, and can therefore just look up the corresponding /// is actually a `DefPathHash`, and can therefore just look up the corresponding
/// `DefId` in `tcx.def_path_hash_to_def_id`. /// `DefId` in `tcx.def_path_hash_to_def_id`.
pub force_from_dep_node: Option<fn(tcx: Tcx, dep_node: DepNode<Tcx::DepKind>) -> bool>, pub force_from_dep_node: Option<fn(tcx: Tcx, dep_node: DepNode) -> bool>,
/// Invoke a query to put the on-disk cached value in memory. /// Invoke a query to put the on-disk cached value in memory.
pub try_load_from_on_disk_cache: Option<fn(Tcx, DepNode<Tcx::DepKind>)>, pub try_load_from_on_disk_cache: Option<fn(Tcx, DepNode)>,
/// The name of this dep kind.
pub name: &'static &'static str,
} }
/// A "work product" corresponds to a `.o` (or other) file that we /// A "work product" corresponds to a `.o` (or other) file that we

View file

@ -17,7 +17,7 @@ use std::sync::atomic::Ordering::Relaxed;
use super::query::DepGraphQuery; use super::query::DepGraphQuery;
use super::serialized::{GraphEncoder, SerializedDepGraph, SerializedDepNodeIndex}; use super::serialized::{GraphEncoder, SerializedDepGraph, SerializedDepNodeIndex};
use super::{DepContext, DepKind, DepNode, HasDepContext, WorkProductId}; use super::{DepContext, DepKind, DepNode, Deps, HasDepContext, WorkProductId};
use crate::dep_graph::EdgesVec; use crate::dep_graph::EdgesVec;
use crate::ich::StableHashingContext; use crate::ich::StableHashingContext;
use crate::query::{QueryContext, QuerySideEffects}; use crate::query::{QueryContext, QuerySideEffects};
@ -26,8 +26,8 @@ use crate::query::{QueryContext, QuerySideEffects};
use {super::debug::EdgeFilter, std::env}; use {super::debug::EdgeFilter, std::env};
#[derive(Clone)] #[derive(Clone)]
pub struct DepGraph<K: DepKind> { pub struct DepGraph<D: Deps> {
data: Option<Lrc<DepGraphData<K>>>, data: Option<Lrc<DepGraphData<D>>>,
/// This field is used for assigning DepNodeIndices when running in /// This field is used for assigning DepNodeIndices when running in
/// non-incremental mode. Even in non-incremental mode we make sure that /// non-incremental mode. Even in non-incremental mode we make sure that
@ -74,16 +74,16 @@ impl DepNodeColor {
} }
} }
pub struct DepGraphData<K: DepKind> { pub struct DepGraphData<D: Deps> {
/// The new encoding of the dependency graph, optimized for red/green /// The new encoding of the dependency graph, optimized for red/green
/// tracking. The `current` field is the dependency graph of only the /// tracking. The `current` field is the dependency graph of only the
/// current compilation session: We don't merge the previous dep-graph into /// current compilation session: We don't merge the previous dep-graph into
/// current one anymore, but we do reference shared data to save space. /// current one anymore, but we do reference shared data to save space.
current: CurrentDepGraph<K>, current: CurrentDepGraph<D>,
/// The dep-graph from the previous compilation session. It contains all /// The dep-graph from the previous compilation session. It contains all
/// nodes and edges as well as all fingerprints of nodes that have them. /// nodes and edges as well as all fingerprints of nodes that have them.
previous: SerializedDepGraph<K>, previous: SerializedDepGraph,
colors: DepNodeColorMap, colors: DepNodeColorMap,
@ -95,12 +95,12 @@ pub struct DepGraphData<K: DepKind> {
/// this map. We can later look for and extract that data. /// this map. We can later look for and extract that data.
previous_work_products: WorkProductMap, previous_work_products: WorkProductMap,
dep_node_debug: Lock<FxHashMap<DepNode<K>, String>>, dep_node_debug: Lock<FxHashMap<DepNode, String>>,
/// Used by incremental compilation tests to assert that /// Used by incremental compilation tests to assert that
/// a particular query result was decoded from disk /// a particular query result was decoded from disk
/// (not just marked green) /// (not just marked green)
debug_loaded_from_disk: Lock<FxHashSet<DepNode<K>>>, debug_loaded_from_disk: Lock<FxHashSet<DepNode>>,
} }
pub fn hash_result<R>(hcx: &mut StableHashingContext<'_>, result: &R) -> Fingerprint pub fn hash_result<R>(hcx: &mut StableHashingContext<'_>, result: &R) -> Fingerprint
@ -112,15 +112,15 @@ where
stable_hasher.finish() stable_hasher.finish()
} }
impl<K: DepKind> DepGraph<K> { impl<D: Deps> DepGraph<D> {
pub fn new( pub fn new(
profiler: &SelfProfilerRef, profiler: &SelfProfilerRef,
prev_graph: SerializedDepGraph<K>, prev_graph: SerializedDepGraph,
prev_work_products: WorkProductMap, prev_work_products: WorkProductMap,
encoder: FileEncoder, encoder: FileEncoder,
record_graph: bool, record_graph: bool,
record_stats: bool, record_stats: bool,
) -> DepGraph<K> { ) -> DepGraph<D> {
let prev_graph_node_count = prev_graph.node_count(); let prev_graph_node_count = prev_graph.node_count();
let current = CurrentDepGraph::new( let current = CurrentDepGraph::new(
@ -136,7 +136,7 @@ impl<K: DepKind> DepGraph<K> {
// Instantiate a dependy-less node only once for anonymous queries. // Instantiate a dependy-less node only once for anonymous queries.
let _green_node_index = current.intern_new_node( let _green_node_index = current.intern_new_node(
profiler, profiler,
DepNode { kind: DepKind::NULL, hash: current.anon_id_seed.into() }, DepNode { kind: D::DEP_KIND_NULL, hash: current.anon_id_seed.into() },
EdgesVec::new(), EdgesVec::new(),
Fingerprint::ZERO, Fingerprint::ZERO,
); );
@ -146,7 +146,7 @@ impl<K: DepKind> DepGraph<K> {
let (red_node_index, red_node_prev_index_and_color) = current.intern_node( let (red_node_index, red_node_prev_index_and_color) = current.intern_node(
profiler, profiler,
&prev_graph, &prev_graph,
DepNode { kind: DepKind::RED, hash: Fingerprint::ZERO.into() }, DepNode { kind: D::DEP_KIND_RED, hash: Fingerprint::ZERO.into() },
EdgesVec::new(), EdgesVec::new(),
None, None,
false, false,
@ -181,12 +181,12 @@ impl<K: DepKind> DepGraph<K> {
} }
} }
pub fn new_disabled() -> DepGraph<K> { pub fn new_disabled() -> DepGraph<D> {
DepGraph { data: None, virtual_dep_node_index: Lrc::new(AtomicU32::new(0)) } DepGraph { data: None, virtual_dep_node_index: Lrc::new(AtomicU32::new(0)) }
} }
#[inline] #[inline]
pub fn data(&self) -> Option<&DepGraphData<K>> { pub fn data(&self) -> Option<&DepGraphData<D>> {
self.data.as_deref() self.data.as_deref()
} }
@ -196,7 +196,7 @@ impl<K: DepKind> DepGraph<K> {
self.data.is_some() self.data.is_some()
} }
pub fn with_query(&self, f: impl Fn(&DepGraphQuery<K>)) { pub fn with_query(&self, f: impl Fn(&DepGraphQuery)) {
if let Some(data) = &self.data { if let Some(data) = &self.data {
data.current.encoder.borrow().with_query(f) data.current.encoder.borrow().with_query(f)
} }
@ -204,7 +204,7 @@ impl<K: DepKind> DepGraph<K> {
pub fn assert_ignored(&self) { pub fn assert_ignored(&self) {
if let Some(..) = self.data { if let Some(..) = self.data {
K::read_deps(|task_deps| { D::read_deps(|task_deps| {
assert_matches!( assert_matches!(
task_deps, task_deps,
TaskDepsRef::Ignore, TaskDepsRef::Ignore,
@ -218,7 +218,7 @@ impl<K: DepKind> DepGraph<K> {
where where
OP: FnOnce() -> R, OP: FnOnce() -> R,
{ {
K::with_deps(TaskDepsRef::Ignore, op) D::with_deps(TaskDepsRef::Ignore, op)
} }
/// Used to wrap the deserialization of a query result from disk, /// Used to wrap the deserialization of a query result from disk,
@ -271,13 +271,13 @@ impl<K: DepKind> DepGraph<K> {
where where
OP: FnOnce() -> R, OP: FnOnce() -> R,
{ {
K::with_deps(TaskDepsRef::Forbid, op) D::with_deps(TaskDepsRef::Forbid, op)
} }
#[inline(always)] #[inline(always)]
pub fn with_task<Ctxt: HasDepContext<DepKind = K>, A: Debug, R>( pub fn with_task<Ctxt: HasDepContext<Deps = D>, A: Debug, R>(
&self, &self,
key: DepNode<K>, key: DepNode,
cx: Ctxt, cx: Ctxt,
arg: A, arg: A,
task: fn(Ctxt, A) -> R, task: fn(Ctxt, A) -> R,
@ -289,10 +289,10 @@ impl<K: DepKind> DepGraph<K> {
} }
} }
pub fn with_anon_task<Tcx: DepContext<DepKind = K>, OP, R>( pub fn with_anon_task<Tcx: DepContext<Deps = D>, OP, R>(
&self, &self,
cx: Tcx, cx: Tcx,
dep_kind: K, dep_kind: DepKind,
op: OP, op: OP,
) -> (R, DepNodeIndex) ) -> (R, DepNodeIndex)
where where
@ -305,7 +305,7 @@ impl<K: DepKind> DepGraph<K> {
} }
} }
impl<K: DepKind> DepGraphData<K> { impl<D: Deps> DepGraphData<D> {
/// Starts a new dep-graph task. Dep-graph tasks are specified /// Starts a new dep-graph task. Dep-graph tasks are specified
/// using a free function (`task`) and **not** a closure -- this /// using a free function (`task`) and **not** a closure -- this
/// is intentional because we want to exercise tight control over /// is intentional because we want to exercise tight control over
@ -334,9 +334,9 @@ impl<K: DepKind> DepGraphData<K> {
/// ///
/// [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/incremental-compilation.html /// [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/incremental-compilation.html
#[inline(always)] #[inline(always)]
pub fn with_task<Ctxt: HasDepContext<DepKind = K>, A: Debug, R>( pub fn with_task<Ctxt: HasDepContext<Deps = D>, A: Debug, R>(
&self, &self,
key: DepNode<K>, key: DepNode,
cx: Ctxt, cx: Ctxt,
arg: A, arg: A,
task: fn(Ctxt, A) -> R, task: fn(Ctxt, A) -> R,
@ -354,7 +354,7 @@ impl<K: DepKind> DepGraphData<K> {
- dep-node: {key:?}" - dep-node: {key:?}"
); );
let with_deps = |task_deps| K::with_deps(task_deps, || task(cx, arg)); let with_deps = |task_deps| D::with_deps(task_deps, || task(cx, arg));
let (result, edges) = if cx.dep_context().is_eval_always(key.kind) { let (result, edges) = if cx.dep_context().is_eval_always(key.kind) {
(with_deps(TaskDepsRef::EvalAlways), EdgesVec::new()) (with_deps(TaskDepsRef::EvalAlways), EdgesVec::new())
} else { } else {
@ -402,10 +402,10 @@ impl<K: DepKind> DepGraphData<K> {
/// Executes something within an "anonymous" task, that is, a task the /// Executes something within an "anonymous" task, that is, a task the
/// `DepNode` of which is determined by the list of inputs it read from. /// `DepNode` of which is determined by the list of inputs it read from.
pub fn with_anon_task<Tcx: DepContext<DepKind = K>, OP, R>( pub fn with_anon_task<Tcx: DepContext<Deps = D>, OP, R>(
&self, &self,
cx: Tcx, cx: Tcx,
dep_kind: K, dep_kind: DepKind,
op: OP, op: OP,
) -> (R, DepNodeIndex) ) -> (R, DepNodeIndex)
where where
@ -414,7 +414,7 @@ impl<K: DepKind> DepGraphData<K> {
debug_assert!(!cx.is_eval_always(dep_kind)); debug_assert!(!cx.is_eval_always(dep_kind));
let task_deps = Lock::new(TaskDeps::default()); let task_deps = Lock::new(TaskDeps::default());
let result = K::with_deps(TaskDepsRef::Allow(&task_deps), op); let result = D::with_deps(TaskDepsRef::Allow(&task_deps), op);
let task_deps = task_deps.into_inner(); let task_deps = task_deps.into_inner();
let task_deps = task_deps.reads; let task_deps = task_deps.reads;
@ -461,11 +461,11 @@ impl<K: DepKind> DepGraphData<K> {
} }
} }
impl<K: DepKind> DepGraph<K> { impl<D: Deps> DepGraph<D> {
#[inline] #[inline]
pub fn read_index(&self, dep_node_index: DepNodeIndex) { pub fn read_index(&self, dep_node_index: DepNodeIndex) {
if let Some(ref data) = self.data { if let Some(ref data) = self.data {
K::read_deps(|task_deps| { D::read_deps(|task_deps| {
let mut task_deps = match task_deps { let mut task_deps = match task_deps {
TaskDepsRef::Allow(deps) => deps.lock(), TaskDepsRef::Allow(deps) => deps.lock(),
TaskDepsRef::EvalAlways => { TaskDepsRef::EvalAlways => {
@ -532,9 +532,9 @@ impl<K: DepKind> DepGraph<K> {
/// FIXME: If the code is changed enough for this node to be marked before requiring the /// FIXME: If the code is changed enough for this node to be marked before requiring the
/// caller's node, we suppose that those changes will be enough to mark this node red and /// caller's node, we suppose that those changes will be enough to mark this node red and
/// force a recomputation using the "normal" way. /// force a recomputation using the "normal" way.
pub fn with_feed_task<Ctxt: DepContext<DepKind = K>, A: Debug, R: Debug>( pub fn with_feed_task<Ctxt: DepContext<Deps = D>, A: Debug, R: Debug>(
&self, &self,
node: DepNode<K>, node: DepNode,
cx: Ctxt, cx: Ctxt,
key: A, key: A,
result: &R, result: &R,
@ -573,7 +573,7 @@ impl<K: DepKind> DepGraph<K> {
} }
let mut edges = EdgesVec::new(); let mut edges = EdgesVec::new();
K::read_deps(|task_deps| match task_deps { D::read_deps(|task_deps| match task_deps {
TaskDepsRef::Allow(deps) => edges.extend(deps.lock().reads.iter().copied()), TaskDepsRef::Allow(deps) => edges.extend(deps.lock().reads.iter().copied()),
TaskDepsRef::EvalAlways => { TaskDepsRef::EvalAlways => {
edges.push(DepNodeIndex::FOREVER_RED_NODE); edges.push(DepNodeIndex::FOREVER_RED_NODE);
@ -623,9 +623,9 @@ impl<K: DepKind> DepGraph<K> {
} }
} }
impl<K: DepKind> DepGraphData<K> { impl<D: Deps> DepGraphData<D> {
#[inline] #[inline]
pub fn dep_node_index_of_opt(&self, dep_node: &DepNode<K>) -> Option<DepNodeIndex> { pub fn dep_node_index_of_opt(&self, dep_node: &DepNode) -> Option<DepNodeIndex> {
if let Some(prev_index) = self.previous.node_to_index_opt(dep_node) { if let Some(prev_index) = self.previous.node_to_index_opt(dep_node) {
self.current.prev_index_to_index.lock()[prev_index] self.current.prev_index_to_index.lock()[prev_index]
} else { } else {
@ -634,11 +634,11 @@ impl<K: DepKind> DepGraphData<K> {
} }
#[inline] #[inline]
pub fn dep_node_exists(&self, dep_node: &DepNode<K>) -> bool { pub fn dep_node_exists(&self, dep_node: &DepNode) -> bool {
self.dep_node_index_of_opt(dep_node).is_some() self.dep_node_index_of_opt(dep_node).is_some()
} }
fn node_color(&self, dep_node: &DepNode<K>) -> Option<DepNodeColor> { fn node_color(&self, dep_node: &DepNode) -> Option<DepNodeColor> {
if let Some(prev_index) = self.previous.node_to_index_opt(dep_node) { if let Some(prev_index) = self.previous.node_to_index_opt(dep_node) {
self.colors.get(prev_index) self.colors.get(prev_index)
} else { } else {
@ -660,18 +660,18 @@ impl<K: DepKind> DepGraphData<K> {
} }
#[inline] #[inline]
pub fn prev_node_of(&self, prev_index: SerializedDepNodeIndex) -> DepNode<K> { pub fn prev_node_of(&self, prev_index: SerializedDepNodeIndex) -> DepNode {
self.previous.index_to_node(prev_index) self.previous.index_to_node(prev_index)
} }
pub fn mark_debug_loaded_from_disk(&self, dep_node: DepNode<K>) { pub fn mark_debug_loaded_from_disk(&self, dep_node: DepNode) {
self.debug_loaded_from_disk.lock().insert(dep_node); self.debug_loaded_from_disk.lock().insert(dep_node);
} }
} }
impl<K: DepKind> DepGraph<K> { impl<D: Deps> DepGraph<D> {
#[inline] #[inline]
pub fn dep_node_exists(&self, dep_node: &DepNode<K>) -> bool { pub fn dep_node_exists(&self, dep_node: &DepNode) -> bool {
self.data.as_ref().is_some_and(|data| data.dep_node_exists(dep_node)) self.data.as_ref().is_some_and(|data| data.dep_node_exists(dep_node))
} }
@ -687,12 +687,12 @@ impl<K: DepKind> DepGraph<K> {
&self.data.as_ref().unwrap().previous_work_products &self.data.as_ref().unwrap().previous_work_products
} }
pub fn debug_was_loaded_from_disk(&self, dep_node: DepNode<K>) -> bool { pub fn debug_was_loaded_from_disk(&self, dep_node: DepNode) -> bool {
self.data.as_ref().unwrap().debug_loaded_from_disk.lock().contains(&dep_node) self.data.as_ref().unwrap().debug_loaded_from_disk.lock().contains(&dep_node)
} }
#[inline(always)] #[inline(always)]
pub fn register_dep_node_debug_str<F>(&self, dep_node: DepNode<K>, debug_str_gen: F) pub fn register_dep_node_debug_str<F>(&self, dep_node: DepNode, debug_str_gen: F)
where where
F: FnOnce() -> String, F: FnOnce() -> String,
{ {
@ -705,11 +705,11 @@ impl<K: DepKind> DepGraph<K> {
dep_node_debug.borrow_mut().insert(dep_node, debug_str); dep_node_debug.borrow_mut().insert(dep_node, debug_str);
} }
pub fn dep_node_debug_str(&self, dep_node: DepNode<K>) -> Option<String> { pub fn dep_node_debug_str(&self, dep_node: DepNode) -> Option<String> {
self.data.as_ref()?.dep_node_debug.borrow().get(&dep_node).cloned() self.data.as_ref()?.dep_node_debug.borrow().get(&dep_node).cloned()
} }
fn node_color(&self, dep_node: &DepNode<K>) -> Option<DepNodeColor> { fn node_color(&self, dep_node: &DepNode) -> Option<DepNodeColor> {
if let Some(ref data) = self.data { if let Some(ref data) = self.data {
return data.node_color(dep_node); return data.node_color(dep_node);
} }
@ -717,25 +717,25 @@ impl<K: DepKind> DepGraph<K> {
None None
} }
pub fn try_mark_green<Qcx: QueryContext<DepKind = K>>( pub fn try_mark_green<Qcx: QueryContext<Deps = D>>(
&self, &self,
qcx: Qcx, qcx: Qcx,
dep_node: &DepNode<K>, dep_node: &DepNode,
) -> Option<(SerializedDepNodeIndex, DepNodeIndex)> { ) -> Option<(SerializedDepNodeIndex, DepNodeIndex)> {
self.data().and_then(|data| data.try_mark_green(qcx, dep_node)) self.data().and_then(|data| data.try_mark_green(qcx, dep_node))
} }
} }
impl<K: DepKind> DepGraphData<K> { impl<D: Deps> DepGraphData<D> {
/// Try to mark a node index for the node dep_node. /// Try to mark a node index for the node dep_node.
/// ///
/// A node will have an index, when it's already been marked green, or when we can mark it /// A node will have an index, when it's already been marked green, or when we can mark it
/// green. This function will mark the current task as a reader of the specified node, when /// green. This function will mark the current task as a reader of the specified node, when
/// a node index can be found for that node. /// a node index can be found for that node.
pub fn try_mark_green<Qcx: QueryContext<DepKind = K>>( pub fn try_mark_green<Qcx: QueryContext<Deps = D>>(
&self, &self,
qcx: Qcx, qcx: Qcx,
dep_node: &DepNode<K>, dep_node: &DepNode,
) -> Option<(SerializedDepNodeIndex, DepNodeIndex)> { ) -> Option<(SerializedDepNodeIndex, DepNodeIndex)> {
debug_assert!(!qcx.dep_context().is_eval_always(dep_node.kind)); debug_assert!(!qcx.dep_context().is_eval_always(dep_node.kind));
@ -757,11 +757,11 @@ impl<K: DepKind> DepGraphData<K> {
} }
#[instrument(skip(self, qcx, parent_dep_node_index, frame), level = "debug")] #[instrument(skip(self, qcx, parent_dep_node_index, frame), level = "debug")]
fn try_mark_parent_green<Qcx: QueryContext<DepKind = K>>( fn try_mark_parent_green<Qcx: QueryContext<Deps = D>>(
&self, &self,
qcx: Qcx, qcx: Qcx,
parent_dep_node_index: SerializedDepNodeIndex, parent_dep_node_index: SerializedDepNodeIndex,
dep_node: &DepNode<K>, dep_node: &DepNode,
frame: Option<&MarkFrame<'_>>, frame: Option<&MarkFrame<'_>>,
) -> Option<()> { ) -> Option<()> {
let dep_dep_node_color = self.colors.get(parent_dep_node_index); let dep_dep_node_color = self.colors.get(parent_dep_node_index);
@ -845,11 +845,11 @@ impl<K: DepKind> DepGraphData<K> {
/// Try to mark a dep-node which existed in the previous compilation session as green. /// Try to mark a dep-node which existed in the previous compilation session as green.
#[instrument(skip(self, qcx, prev_dep_node_index, frame), level = "debug")] #[instrument(skip(self, qcx, prev_dep_node_index, frame), level = "debug")]
fn try_mark_previous_green<Qcx: QueryContext<DepKind = K>>( fn try_mark_previous_green<Qcx: QueryContext<Deps = D>>(
&self, &self,
qcx: Qcx, qcx: Qcx,
prev_dep_node_index: SerializedDepNodeIndex, prev_dep_node_index: SerializedDepNodeIndex,
dep_node: &DepNode<K>, dep_node: &DepNode,
frame: Option<&MarkFrame<'_>>, frame: Option<&MarkFrame<'_>>,
) -> Option<DepNodeIndex> { ) -> Option<DepNodeIndex> {
let frame = MarkFrame { index: prev_dep_node_index, parent: frame }; let frame = MarkFrame { index: prev_dep_node_index, parent: frame };
@ -916,7 +916,7 @@ impl<K: DepKind> DepGraphData<K> {
/// This may be called concurrently on multiple threads for the same dep node. /// This may be called concurrently on multiple threads for the same dep node.
#[cold] #[cold]
#[inline(never)] #[inline(never)]
fn emit_side_effects<Qcx: QueryContext<DepKind = K>>( fn emit_side_effects<Qcx: QueryContext<Deps = D>>(
&self, &self,
qcx: Qcx, qcx: Qcx,
dep_node_index: DepNodeIndex, dep_node_index: DepNodeIndex,
@ -940,16 +940,16 @@ impl<K: DepKind> DepGraphData<K> {
} }
} }
impl<K: DepKind> DepGraph<K> { impl<D: Deps> DepGraph<D> {
/// Returns true if the given node has been marked as red during the /// Returns true if the given node has been marked as red during the
/// current compilation session. Used in various assertions /// current compilation session. Used in various assertions
pub fn is_red(&self, dep_node: &DepNode<K>) -> bool { pub fn is_red(&self, dep_node: &DepNode) -> bool {
self.node_color(dep_node) == Some(DepNodeColor::Red) self.node_color(dep_node) == Some(DepNodeColor::Red)
} }
/// Returns true if the given node has been marked as green during the /// Returns true if the given node has been marked as green during the
/// current compilation session. Used in various assertions /// current compilation session. Used in various assertions
pub fn is_green(&self, dep_node: &DepNode<K>) -> bool { pub fn is_green(&self, dep_node: &DepNode) -> bool {
self.node_color(dep_node).is_some_and(|c| c.is_green()) self.node_color(dep_node).is_some_and(|c| c.is_green())
} }
@ -961,7 +961,7 @@ impl<K: DepKind> DepGraph<K> {
/// ///
/// This method will only load queries that will end up in the disk cache. /// This method will only load queries that will end up in the disk cache.
/// Other queries will not be executed. /// Other queries will not be executed.
pub fn exec_cache_promotions<Tcx: DepContext<DepKind = K>>(&self, tcx: Tcx) { pub fn exec_cache_promotions<Tcx: DepContext>(&self, tcx: Tcx) {
let _prof_timer = tcx.profiler().generic_activity("incr_comp_query_cache_promotion"); let _prof_timer = tcx.profiler().generic_activity("incr_comp_query_cache_promotion");
let data = self.data.as_ref().unwrap(); let data = self.data.as_ref().unwrap();
@ -1076,9 +1076,9 @@ rustc_index::newtype_index! {
/// `new_node_to_index` and `data`, or `prev_index_to_index` and `data`. When /// `new_node_to_index` and `data`, or `prev_index_to_index` and `data`. When
/// manipulating both, we acquire `new_node_to_index` or `prev_index_to_index` /// manipulating both, we acquire `new_node_to_index` or `prev_index_to_index`
/// first, and `data` second. /// first, and `data` second.
pub(super) struct CurrentDepGraph<K: DepKind> { pub(super) struct CurrentDepGraph<D: Deps> {
encoder: Steal<GraphEncoder<K>>, encoder: Steal<GraphEncoder<D>>,
new_node_to_index: Sharded<FxHashMap<DepNode<K>, DepNodeIndex>>, new_node_to_index: Sharded<FxHashMap<DepNode, DepNodeIndex>>,
prev_index_to_index: Lock<IndexVec<SerializedDepNodeIndex, Option<DepNodeIndex>>>, prev_index_to_index: Lock<IndexVec<SerializedDepNodeIndex, Option<DepNodeIndex>>>,
/// This is used to verify that fingerprints do not change between the creation of a node /// This is used to verify that fingerprints do not change between the creation of a node
@ -1089,7 +1089,7 @@ pub(super) struct CurrentDepGraph<K: DepKind> {
/// Used to trap when a specific edge is added to the graph. /// Used to trap when a specific edge is added to the graph.
/// This is used for debug purposes and is only active with `debug_assertions`. /// This is used for debug purposes and is only active with `debug_assertions`.
#[cfg(debug_assertions)] #[cfg(debug_assertions)]
forbidden_edge: Option<EdgeFilter<K>>, forbidden_edge: Option<EdgeFilter>,
/// Anonymous `DepNode`s are nodes whose IDs we compute from the list of /// Anonymous `DepNode`s are nodes whose IDs we compute from the list of
/// their edges. This has the beneficial side-effect that multiple anonymous /// their edges. This has the beneficial side-effect that multiple anonymous
@ -1116,14 +1116,14 @@ pub(super) struct CurrentDepGraph<K: DepKind> {
node_intern_event_id: Option<EventId>, node_intern_event_id: Option<EventId>,
} }
impl<K: DepKind> CurrentDepGraph<K> { impl<D: Deps> CurrentDepGraph<D> {
fn new( fn new(
profiler: &SelfProfilerRef, profiler: &SelfProfilerRef,
prev_graph_node_count: usize, prev_graph_node_count: usize,
encoder: FileEncoder, encoder: FileEncoder,
record_graph: bool, record_graph: bool,
record_stats: bool, record_stats: bool,
) -> CurrentDepGraph<K> { ) -> Self {
use std::time::{SystemTime, UNIX_EPOCH}; use std::time::{SystemTime, UNIX_EPOCH};
let duration = SystemTime::now().duration_since(UNIX_EPOCH).unwrap(); let duration = SystemTime::now().duration_since(UNIX_EPOCH).unwrap();
@ -1178,7 +1178,7 @@ impl<K: DepKind> CurrentDepGraph<K> {
} }
#[cfg(debug_assertions)] #[cfg(debug_assertions)]
fn record_edge(&self, dep_node_index: DepNodeIndex, key: DepNode<K>, fingerprint: Fingerprint) { fn record_edge(&self, dep_node_index: DepNodeIndex, key: DepNode, fingerprint: Fingerprint) {
if let Some(forbidden_edge) = &self.forbidden_edge { if let Some(forbidden_edge) = &self.forbidden_edge {
forbidden_edge.index_to_node.lock().insert(dep_node_index, key); forbidden_edge.index_to_node.lock().insert(dep_node_index, key);
} }
@ -1192,7 +1192,7 @@ impl<K: DepKind> CurrentDepGraph<K> {
fn intern_new_node( fn intern_new_node(
&self, &self,
profiler: &SelfProfilerRef, profiler: &SelfProfilerRef,
key: DepNode<K>, key: DepNode,
edges: EdgesVec, edges: EdgesVec,
current_fingerprint: Fingerprint, current_fingerprint: Fingerprint,
) -> DepNodeIndex { ) -> DepNodeIndex {
@ -1215,8 +1215,8 @@ impl<K: DepKind> CurrentDepGraph<K> {
fn intern_node( fn intern_node(
&self, &self,
profiler: &SelfProfilerRef, profiler: &SelfProfilerRef,
prev_graph: &SerializedDepGraph<K>, prev_graph: &SerializedDepGraph,
key: DepNode<K>, key: DepNode,
edges: EdgesVec, edges: EdgesVec,
fingerprint: Option<Fingerprint>, fingerprint: Option<Fingerprint>,
print_status: bool, print_status: bool,
@ -1289,7 +1289,7 @@ impl<K: DepKind> CurrentDepGraph<K> {
fn promote_node_and_deps_to_current( fn promote_node_and_deps_to_current(
&self, &self,
profiler: &SelfProfilerRef, profiler: &SelfProfilerRef,
prev_graph: &SerializedDepGraph<K>, prev_graph: &SerializedDepGraph,
prev_index: SerializedDepNodeIndex, prev_index: SerializedDepNodeIndex,
) -> DepNodeIndex { ) -> DepNodeIndex {
self.debug_assert_not_in_new_nodes(prev_graph, prev_index); self.debug_assert_not_in_new_nodes(prev_graph, prev_index);
@ -1317,7 +1317,7 @@ impl<K: DepKind> CurrentDepGraph<K> {
#[inline] #[inline]
fn debug_assert_not_in_new_nodes( fn debug_assert_not_in_new_nodes(
&self, &self,
prev_graph: &SerializedDepGraph<K>, prev_graph: &SerializedDepGraph,
prev_index: SerializedDepNodeIndex, prev_index: SerializedDepNodeIndex,
) { ) {
let node = &prev_graph.index_to_node(prev_index); let node = &prev_graph.index_to_node(prev_index);
@ -1329,11 +1329,11 @@ impl<K: DepKind> CurrentDepGraph<K> {
} }
#[derive(Debug, Clone, Copy)] #[derive(Debug, Clone, Copy)]
pub enum TaskDepsRef<'a, K: DepKind> { pub enum TaskDepsRef<'a> {
/// New dependencies can be added to the /// New dependencies can be added to the
/// `TaskDeps`. This is used when executing a 'normal' query /// `TaskDeps`. This is used when executing a 'normal' query
/// (no `eval_always` modifier) /// (no `eval_always` modifier)
Allow(&'a Lock<TaskDeps<K>>), Allow(&'a Lock<TaskDeps>),
/// This is used when executing an `eval_always` query. We don't /// This is used when executing an `eval_always` query. We don't
/// need to track dependencies for a query that's always /// need to track dependencies for a query that's always
/// re-executed -- but we need to know that this is an `eval_always` /// re-executed -- but we need to know that this is an `eval_always`
@ -1350,15 +1350,15 @@ pub enum TaskDepsRef<'a, K: DepKind> {
} }
#[derive(Debug)] #[derive(Debug)]
pub struct TaskDeps<K: DepKind> { pub struct TaskDeps {
#[cfg(debug_assertions)] #[cfg(debug_assertions)]
node: Option<DepNode<K>>, node: Option<DepNode>,
reads: EdgesVec, reads: EdgesVec,
read_set: FxHashSet<DepNodeIndex>, read_set: FxHashSet<DepNodeIndex>,
phantom_data: PhantomData<DepNode<K>>, phantom_data: PhantomData<DepNode>,
} }
impl<K: DepKind> Default for TaskDeps<K> { impl Default for TaskDeps {
fn default() -> Self { fn default() -> Self {
Self { Self {
#[cfg(debug_assertions)] #[cfg(debug_assertions)]
@ -1410,10 +1410,7 @@ impl DepNodeColorMap {
#[inline(never)] #[inline(never)]
#[cold] #[cold]
pub(crate) fn print_markframe_trace<K: DepKind>( pub(crate) fn print_markframe_trace<D: Deps>(graph: &DepGraph<D>, frame: Option<&MarkFrame<'_>>) {
graph: &DepGraph<K>,
frame: Option<&MarkFrame<'_>>,
) {
let data = graph.data.as_ref().unwrap(); let data = graph.data.as_ref().unwrap();
eprintln!("there was a panic while trying to force a dep node"); eprintln!("there was a panic while trying to force a dep node");

View file

@ -1,11 +1,11 @@
pub mod debug; pub mod debug;
mod dep_node; pub mod dep_node;
mod edges; mod edges;
mod graph; mod graph;
mod query; mod query;
mod serialized; mod serialized;
pub use dep_node::{DepKindStruct, DepNode, DepNodeParams, WorkProductId}; pub use dep_node::{DepKind, DepKindStruct, DepNode, DepNodeParams, WorkProductId};
pub use edges::EdgesVec; pub use edges::EdgesVec;
pub use graph::{ pub use graph::{
hash_result, DepGraph, DepGraphData, DepNodeColor, DepNodeIndex, TaskDeps, TaskDepsRef, hash_result, DepGraph, DepGraphData, DepNodeColor, DepNodeIndex, TaskDeps, TaskDepsRef,
@ -16,22 +16,20 @@ pub use serialized::{SerializedDepGraph, SerializedDepNodeIndex};
use crate::ich::StableHashingContext; use crate::ich::StableHashingContext;
use rustc_data_structures::profiling::SelfProfilerRef; use rustc_data_structures::profiling::SelfProfilerRef;
use rustc_serialize::{opaque::FileEncoder, Encodable};
use rustc_session::Session; use rustc_session::Session;
use std::hash::Hash; use std::panic;
use std::{fmt, panic};
use self::graph::{print_markframe_trace, MarkFrame}; use self::graph::{print_markframe_trace, MarkFrame};
pub trait DepContext: Copy { pub trait DepContext: Copy {
type DepKind: self::DepKind; type Deps: Deps;
/// Create a hashing context for hashing new results. /// Create a hashing context for hashing new results.
fn with_stable_hashing_context<R>(self, f: impl FnOnce(StableHashingContext<'_>) -> R) -> R; fn with_stable_hashing_context<R>(self, f: impl FnOnce(StableHashingContext<'_>) -> R) -> R;
/// Access the DepGraph. /// Access the DepGraph.
fn dep_graph(&self) -> &DepGraph<Self::DepKind>; fn dep_graph(&self) -> &DepGraph<Self::Deps>;
/// Access the profiler. /// Access the profiler.
fn profiler(&self) -> &SelfProfilerRef; fn profiler(&self) -> &SelfProfilerRef;
@ -39,10 +37,10 @@ pub trait DepContext: Copy {
/// Access the compiler session. /// Access the compiler session.
fn sess(&self) -> &Session; fn sess(&self) -> &Session;
fn dep_kind_info(&self, dep_node: Self::DepKind) -> &DepKindStruct<Self>; fn dep_kind_info(&self, dep_node: DepKind) -> &DepKindStruct<Self>;
#[inline(always)] #[inline(always)]
fn fingerprint_style(self, kind: Self::DepKind) -> FingerprintStyle { fn fingerprint_style(self, kind: DepKind) -> FingerprintStyle {
let data = self.dep_kind_info(kind); let data = self.dep_kind_info(kind);
if data.is_anon { if data.is_anon {
return FingerprintStyle::Opaque; return FingerprintStyle::Opaque;
@ -52,18 +50,14 @@ pub trait DepContext: Copy {
#[inline(always)] #[inline(always)]
/// Return whether this kind always require evaluation. /// Return whether this kind always require evaluation.
fn is_eval_always(self, kind: Self::DepKind) -> bool { fn is_eval_always(self, kind: DepKind) -> bool {
self.dep_kind_info(kind).is_eval_always self.dep_kind_info(kind).is_eval_always
} }
/// Try to force a dep node to execute and see if it's green. /// Try to force a dep node to execute and see if it's green.
#[inline] #[inline]
#[instrument(skip(self, frame), level = "debug")] #[instrument(skip(self, frame), level = "debug")]
fn try_force_from_dep_node( fn try_force_from_dep_node(self, dep_node: DepNode, frame: Option<&MarkFrame<'_>>) -> bool {
self,
dep_node: DepNode<Self::DepKind>,
frame: Option<&MarkFrame<'_>>,
) -> bool {
let cb = self.dep_kind_info(dep_node.kind); let cb = self.dep_kind_info(dep_node.kind);
if let Some(f) = cb.force_from_dep_node { if let Some(f) = cb.force_from_dep_node {
if let Err(value) = panic::catch_unwind(panic::AssertUnwindSafe(|| { if let Err(value) = panic::catch_unwind(panic::AssertUnwindSafe(|| {
@ -81,7 +75,7 @@ pub trait DepContext: Copy {
} }
/// Load data from the on-disk cache. /// Load data from the on-disk cache.
fn try_load_from_on_disk_cache(self, dep_node: DepNode<Self::DepKind>) { fn try_load_from_on_disk_cache(self, dep_node: DepNode) {
let cb = self.dep_kind_info(dep_node.kind); let cb = self.dep_kind_info(dep_node.kind);
if let Some(f) = cb.try_load_from_on_disk_cache { if let Some(f) = cb.try_load_from_on_disk_cache {
f(self, dep_node) f(self, dep_node)
@ -89,15 +83,37 @@ pub trait DepContext: Copy {
} }
} }
pub trait Deps {
/// Execute the operation with provided dependencies.
fn with_deps<OP, R>(deps: TaskDepsRef<'_>, op: OP) -> R
where
OP: FnOnce() -> R;
/// Access dependencies from current implicit context.
fn read_deps<OP>(op: OP)
where
OP: for<'a> FnOnce(TaskDepsRef<'a>);
/// We use this for most things when incr. comp. is turned off.
const DEP_KIND_NULL: DepKind;
/// We use this to create a forever-red node.
const DEP_KIND_RED: DepKind;
/// This is the highest value a `DepKind` can have. It's used during encoding to
/// pack information into the unused bits.
const DEP_KIND_MAX: u16;
}
pub trait HasDepContext: Copy { pub trait HasDepContext: Copy {
type DepKind: self::DepKind; type Deps: self::Deps;
type DepContext: self::DepContext<DepKind = Self::DepKind>; type DepContext: self::DepContext<Deps = Self::Deps>;
fn dep_context(&self) -> &Self::DepContext; fn dep_context(&self) -> &Self::DepContext;
} }
impl<T: DepContext> HasDepContext for T { impl<T: DepContext> HasDepContext for T {
type DepKind = T::DepKind; type Deps = T::Deps;
type DepContext = Self; type DepContext = Self;
fn dep_context(&self) -> &Self::DepContext { fn dep_context(&self) -> &Self::DepContext {
@ -106,7 +122,7 @@ impl<T: DepContext> HasDepContext for T {
} }
impl<T: HasDepContext, Q: Copy> HasDepContext for (T, Q) { impl<T: HasDepContext, Q: Copy> HasDepContext for (T, Q) {
type DepKind = T::DepKind; type Deps = T::Deps;
type DepContext = T::DepContext; type DepContext = T::DepContext;
fn dep_context(&self) -> &Self::DepContext { fn dep_context(&self) -> &Self::DepContext {
@ -138,31 +154,3 @@ impl FingerprintStyle {
} }
} }
} }
/// Describe the different families of dependency nodes.
pub trait DepKind: Copy + fmt::Debug + Eq + Hash + Send + Encodable<FileEncoder> + 'static {
/// DepKind to use when incr. comp. is turned off.
const NULL: Self;
/// DepKind to use to create the initial forever-red node.
const RED: Self;
/// Implementation of `std::fmt::Debug` for `DepNode`.
fn debug_node(node: &DepNode<Self>, f: &mut fmt::Formatter<'_>) -> fmt::Result;
/// Execute the operation with provided dependencies.
fn with_deps<OP, R>(deps: TaskDepsRef<'_, Self>, op: OP) -> R
where
OP: FnOnce() -> R;
/// Access dependencies from current implicit context.
fn read_deps<OP>(op: OP)
where
OP: for<'a> FnOnce(TaskDepsRef<'a, Self>);
fn from_u16(u: u16) -> Self;
fn to_u16(self) -> u16;
const MAX: u16;
}

View file

@ -2,16 +2,16 @@ use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::graph::implementation::{Direction, Graph, NodeIndex, INCOMING}; use rustc_data_structures::graph::implementation::{Direction, Graph, NodeIndex, INCOMING};
use rustc_index::IndexVec; use rustc_index::IndexVec;
use super::{DepKind, DepNode, DepNodeIndex}; use super::{DepNode, DepNodeIndex};
pub struct DepGraphQuery<K> { pub struct DepGraphQuery {
pub graph: Graph<DepNode<K>, ()>, pub graph: Graph<DepNode, ()>,
pub indices: FxHashMap<DepNode<K>, NodeIndex>, pub indices: FxHashMap<DepNode, NodeIndex>,
pub dep_index_to_index: IndexVec<DepNodeIndex, Option<NodeIndex>>, pub dep_index_to_index: IndexVec<DepNodeIndex, Option<NodeIndex>>,
} }
impl<K: DepKind> DepGraphQuery<K> { impl DepGraphQuery {
pub fn new(prev_node_count: usize) -> DepGraphQuery<K> { pub fn new(prev_node_count: usize) -> DepGraphQuery {
let node_count = prev_node_count + prev_node_count / 4; let node_count = prev_node_count + prev_node_count / 4;
let edge_count = 6 * node_count; let edge_count = 6 * node_count;
@ -22,7 +22,7 @@ impl<K: DepKind> DepGraphQuery<K> {
DepGraphQuery { graph, indices, dep_index_to_index } DepGraphQuery { graph, indices, dep_index_to_index }
} }
pub fn push(&mut self, index: DepNodeIndex, node: DepNode<K>, edges: &[DepNodeIndex]) { pub fn push(&mut self, index: DepNodeIndex, node: DepNode, edges: &[DepNodeIndex]) {
let source = self.graph.add_node(node); let source = self.graph.add_node(node);
self.dep_index_to_index.insert(index, source); self.dep_index_to_index.insert(index, source);
self.indices.insert(node, source); self.indices.insert(node, source);
@ -37,11 +37,11 @@ impl<K: DepKind> DepGraphQuery<K> {
} }
} }
pub fn nodes(&self) -> Vec<&DepNode<K>> { pub fn nodes(&self) -> Vec<&DepNode> {
self.graph.all_nodes().iter().map(|n| &n.data).collect() self.graph.all_nodes().iter().map(|n| &n.data).collect()
} }
pub fn edges(&self) -> Vec<(&DepNode<K>, &DepNode<K>)> { pub fn edges(&self) -> Vec<(&DepNode, &DepNode)> {
self.graph self.graph
.all_edges() .all_edges()
.iter() .iter()
@ -50,7 +50,7 @@ impl<K: DepKind> DepGraphQuery<K> {
.collect() .collect()
} }
fn reachable_nodes(&self, node: &DepNode<K>, direction: Direction) -> Vec<&DepNode<K>> { fn reachable_nodes(&self, node: &DepNode, direction: Direction) -> Vec<&DepNode> {
if let Some(&index) = self.indices.get(node) { if let Some(&index) = self.indices.get(node) {
self.graph.depth_traverse(index, direction).map(|s| self.graph.node_data(s)).collect() self.graph.depth_traverse(index, direction).map(|s| self.graph.node_data(s)).collect()
} else { } else {
@ -59,7 +59,7 @@ impl<K: DepKind> DepGraphQuery<K> {
} }
/// All nodes that can reach `node`. /// All nodes that can reach `node`.
pub fn transitive_predecessors(&self, node: &DepNode<K>) -> Vec<&DepNode<K>> { pub fn transitive_predecessors(&self, node: &DepNode) -> Vec<&DepNode> {
self.reachable_nodes(node, INCOMING) self.reachable_nodes(node, INCOMING)
} }
} }

View file

@ -36,7 +36,7 @@
//! store it directly after the header with leb128. //! store it directly after the header with leb128.
use super::query::DepGraphQuery; use super::query::DepGraphQuery;
use super::{DepKind, DepNode, DepNodeIndex}; use super::{DepKind, DepNode, DepNodeIndex, Deps};
use crate::dep_graph::EdgesVec; use crate::dep_graph::EdgesVec;
use rustc_data_structures::fingerprint::Fingerprint; use rustc_data_structures::fingerprint::Fingerprint;
use rustc_data_structures::fingerprint::PackedFingerprint; use rustc_data_structures::fingerprint::PackedFingerprint;
@ -70,9 +70,9 @@ const DEP_NODE_WIDTH_BITS: usize = DEP_NODE_SIZE / 2;
/// Data for use when recompiling the **current crate**. /// Data for use when recompiling the **current crate**.
#[derive(Debug)] #[derive(Debug)]
pub struct SerializedDepGraph<K: DepKind> { pub struct SerializedDepGraph {
/// The set of all DepNodes in the graph /// The set of all DepNodes in the graph
nodes: IndexVec<SerializedDepNodeIndex, DepNode<K>>, nodes: IndexVec<SerializedDepNodeIndex, DepNode>,
/// The set of all Fingerprints in the graph. Each Fingerprint corresponds to /// The set of all Fingerprints in the graph. Each Fingerprint corresponds to
/// the DepNode at the same index in the nodes vector. /// the DepNode at the same index in the nodes vector.
fingerprints: IndexVec<SerializedDepNodeIndex, Fingerprint>, fingerprints: IndexVec<SerializedDepNodeIndex, Fingerprint>,
@ -88,7 +88,7 @@ pub struct SerializedDepGraph<K: DepKind> {
index: Vec<UnhashMap<PackedFingerprint, SerializedDepNodeIndex>>, index: Vec<UnhashMap<PackedFingerprint, SerializedDepNodeIndex>>,
} }
impl<K: DepKind> Default for SerializedDepGraph<K> { impl Default for SerializedDepGraph {
fn default() -> Self { fn default() -> Self {
SerializedDepGraph { SerializedDepGraph {
nodes: Default::default(), nodes: Default::default(),
@ -100,7 +100,7 @@ impl<K: DepKind> Default for SerializedDepGraph<K> {
} }
} }
impl<K: DepKind> SerializedDepGraph<K> { impl SerializedDepGraph {
#[inline] #[inline]
pub fn edge_targets_from( pub fn edge_targets_from(
&self, &self,
@ -134,13 +134,13 @@ impl<K: DepKind> SerializedDepGraph<K> {
} }
#[inline] #[inline]
pub fn index_to_node(&self, dep_node_index: SerializedDepNodeIndex) -> DepNode<K> { pub fn index_to_node(&self, dep_node_index: SerializedDepNodeIndex) -> DepNode {
self.nodes[dep_node_index] self.nodes[dep_node_index]
} }
#[inline] #[inline]
pub fn node_to_index_opt(&self, dep_node: &DepNode<K>) -> Option<SerializedDepNodeIndex> { pub fn node_to_index_opt(&self, dep_node: &DepNode) -> Option<SerializedDepNodeIndex> {
self.index.get(dep_node.kind.to_u16() as usize)?.get(&dep_node.hash).cloned() self.index.get(dep_node.kind.as_usize())?.get(&dep_node.hash).cloned()
} }
#[inline] #[inline]
@ -184,11 +184,9 @@ fn mask(bits: usize) -> usize {
usize::MAX >> ((std::mem::size_of::<usize>() * 8) - bits) usize::MAX >> ((std::mem::size_of::<usize>() * 8) - bits)
} }
impl<'a, K: DepKind + Decodable<MemDecoder<'a>>> Decodable<MemDecoder<'a>> impl SerializedDepGraph {
for SerializedDepGraph<K>
{
#[instrument(level = "debug", skip(d))] #[instrument(level = "debug", skip(d))]
fn decode(d: &mut MemDecoder<'a>) -> SerializedDepGraph<K> { pub fn decode<D: Deps>(d: &mut MemDecoder<'_>) -> SerializedDepGraph {
// The last 16 bytes are the node count and edge count. // The last 16 bytes are the node count and edge count.
debug!("position: {:?}", d.position()); debug!("position: {:?}", d.position());
let (node_count, edge_count) = let (node_count, edge_count) =
@ -217,14 +215,14 @@ impl<'a, K: DepKind + Decodable<MemDecoder<'a>>> Decodable<MemDecoder<'a>>
// least (34 byte header + 1 byte len + 64 bytes edge data), which is ~1%. A 2-byte leb128 // least (34 byte header + 1 byte len + 64 bytes edge data), which is ~1%. A 2-byte leb128
// length is about the same fractional overhead and it amortizes for yet greater lengths. // length is about the same fractional overhead and it amortizes for yet greater lengths.
let mut edge_list_data = Vec::with_capacity( let mut edge_list_data = Vec::with_capacity(
graph_bytes - node_count * std::mem::size_of::<SerializedNodeHeader<K>>(), graph_bytes - node_count * std::mem::size_of::<SerializedNodeHeader<D>>(),
); );
for _index in 0..node_count { for _index in 0..node_count {
// Decode the header for this edge; the header packs together as many of the fixed-size // Decode the header for this edge; the header packs together as many of the fixed-size
// fields as possible to limit the number of times we update decoder state. // fields as possible to limit the number of times we update decoder state.
let node_header = let node_header =
SerializedNodeHeader::<K> { bytes: d.read_array(), _marker: PhantomData }; SerializedNodeHeader::<D> { bytes: d.read_array(), _marker: PhantomData };
let _i: SerializedDepNodeIndex = nodes.push(node_header.node()); let _i: SerializedDepNodeIndex = nodes.push(node_header.node());
debug_assert_eq!(_i.index(), _index); debug_assert_eq!(_i.index(), _index);
@ -256,12 +254,12 @@ impl<'a, K: DepKind + Decodable<MemDecoder<'a>>> Decodable<MemDecoder<'a>>
edge_list_data.extend(&[0u8; DEP_NODE_PAD]); edge_list_data.extend(&[0u8; DEP_NODE_PAD]);
// Read the number of each dep kind and use it to create an hash map with a suitable size. // Read the number of each dep kind and use it to create an hash map with a suitable size.
let mut index: Vec<_> = (0..(K::MAX as usize + 1)) let mut index: Vec<_> = (0..(D::DEP_KIND_MAX + 1))
.map(|_| UnhashMap::with_capacity_and_hasher(d.read_u32() as usize, Default::default())) .map(|_| UnhashMap::with_capacity_and_hasher(d.read_u32() as usize, Default::default()))
.collect(); .collect();
for (idx, node) in nodes.iter_enumerated() { for (idx, node) in nodes.iter_enumerated() {
index[node.kind.to_u16() as usize].insert(node.hash, idx); index[node.kind.as_usize()].insert(node.hash, idx);
} }
SerializedDepGraph { nodes, fingerprints, edge_list_indices, edge_list_data, index } SerializedDepGraph { nodes, fingerprints, edge_list_indices, edge_list_data, index }
@ -276,20 +274,20 @@ impl<'a, K: DepKind + Decodable<MemDecoder<'a>>> Decodable<MemDecoder<'a>>
/// * The `DepKind`'s discriminant (a u16, but not all bits are used...) /// * The `DepKind`'s discriminant (a u16, but not all bits are used...)
/// * The byte width of the encoded edges for this node /// * The byte width of the encoded edges for this node
/// * In whatever bits remain, the length of the edge list for this node, if it fits /// * In whatever bits remain, the length of the edge list for this node, if it fits
struct SerializedNodeHeader<K> { struct SerializedNodeHeader<D> {
// 2 bytes for the DepNode // 2 bytes for the DepNode
// 16 for Fingerprint in DepNode // 16 for Fingerprint in DepNode
// 16 for Fingerprint in NodeInfo // 16 for Fingerprint in NodeInfo
bytes: [u8; 34], bytes: [u8; 34],
_marker: PhantomData<K>, _marker: PhantomData<D>,
} }
// The fields of a `SerializedNodeHeader`, this struct is an implementation detail and exists only // The fields of a `SerializedNodeHeader`, this struct is an implementation detail and exists only
// to make the implementation of `SerializedNodeHeader` simpler. // to make the implementation of `SerializedNodeHeader` simpler.
struct Unpacked<K> { struct Unpacked {
len: Option<usize>, len: Option<usize>,
bytes_per_index: usize, bytes_per_index: usize,
kind: K, kind: DepKind,
hash: PackedFingerprint, hash: PackedFingerprint,
fingerprint: Fingerprint, fingerprint: Fingerprint,
} }
@ -301,20 +299,20 @@ struct Unpacked<K> {
// 0..M length of the edge // 0..M length of the edge
// M..M+N bytes per index // M..M+N bytes per index
// M+N..16 kind // M+N..16 kind
impl<K: DepKind> SerializedNodeHeader<K> { impl<D: Deps> SerializedNodeHeader<D> {
const TOTAL_BITS: usize = std::mem::size_of::<K>() * 8; const TOTAL_BITS: usize = std::mem::size_of::<DepKind>() * 8;
const LEN_BITS: usize = Self::TOTAL_BITS - Self::KIND_BITS - Self::WIDTH_BITS; const LEN_BITS: usize = Self::TOTAL_BITS - Self::KIND_BITS - Self::WIDTH_BITS;
const WIDTH_BITS: usize = DEP_NODE_WIDTH_BITS; const WIDTH_BITS: usize = DEP_NODE_WIDTH_BITS;
const KIND_BITS: usize = Self::TOTAL_BITS - K::MAX.leading_zeros() as usize; const KIND_BITS: usize = Self::TOTAL_BITS - D::DEP_KIND_MAX.leading_zeros() as usize;
const MAX_INLINE_LEN: usize = (u16::MAX as usize >> (Self::TOTAL_BITS - Self::LEN_BITS)) - 1; const MAX_INLINE_LEN: usize = (u16::MAX as usize >> (Self::TOTAL_BITS - Self::LEN_BITS)) - 1;
#[inline] #[inline]
fn new(node_info: &NodeInfo<K>) -> Self { fn new(node_info: &NodeInfo) -> Self {
debug_assert_eq!(Self::TOTAL_BITS, Self::LEN_BITS + Self::WIDTH_BITS + Self::KIND_BITS); debug_assert_eq!(Self::TOTAL_BITS, Self::LEN_BITS + Self::WIDTH_BITS + Self::KIND_BITS);
let NodeInfo { node, fingerprint, edges } = node_info; let NodeInfo { node, fingerprint, edges } = node_info;
let mut head = node.kind.to_u16(); let mut head = node.kind.as_inner();
let free_bytes = edges.max_index().leading_zeros() as usize / 8; let free_bytes = edges.max_index().leading_zeros() as usize / 8;
let bytes_per_index = (DEP_NODE_SIZE - free_bytes).saturating_sub(1); let bytes_per_index = (DEP_NODE_SIZE - free_bytes).saturating_sub(1);
@ -347,7 +345,7 @@ impl<K: DepKind> SerializedNodeHeader<K> {
} }
#[inline] #[inline]
fn unpack(&self) -> Unpacked<K> { fn unpack(&self) -> Unpacked {
let head = u16::from_le_bytes(self.bytes[..2].try_into().unwrap()); let head = u16::from_le_bytes(self.bytes[..2].try_into().unwrap());
let hash = self.bytes[2..18].try_into().unwrap(); let hash = self.bytes[2..18].try_into().unwrap();
let fingerprint = self.bytes[18..].try_into().unwrap(); let fingerprint = self.bytes[18..].try_into().unwrap();
@ -359,7 +357,7 @@ impl<K: DepKind> SerializedNodeHeader<K> {
Unpacked { Unpacked {
len: len.checked_sub(1), len: len.checked_sub(1),
bytes_per_index: bytes_per_index as usize + 1, bytes_per_index: bytes_per_index as usize + 1,
kind: DepKind::from_u16(kind), kind: DepKind::new(kind),
hash: Fingerprint::from_le_bytes(hash).into(), hash: Fingerprint::from_le_bytes(hash).into(),
fingerprint: Fingerprint::from_le_bytes(fingerprint), fingerprint: Fingerprint::from_le_bytes(fingerprint),
} }
@ -381,7 +379,7 @@ impl<K: DepKind> SerializedNodeHeader<K> {
} }
#[inline] #[inline]
fn node(&self) -> DepNode<K> { fn node(&self) -> DepNode {
let Unpacked { kind, hash, .. } = self.unpack(); let Unpacked { kind, hash, .. } = self.unpack();
DepNode { kind, hash } DepNode { kind, hash }
} }
@ -395,15 +393,15 @@ impl<K: DepKind> SerializedNodeHeader<K> {
} }
#[derive(Debug)] #[derive(Debug)]
struct NodeInfo<K: DepKind> { struct NodeInfo {
node: DepNode<K>, node: DepNode,
fingerprint: Fingerprint, fingerprint: Fingerprint,
edges: EdgesVec, edges: EdgesVec,
} }
impl<K: DepKind> Encodable<FileEncoder> for NodeInfo<K> { impl NodeInfo {
fn encode(&self, e: &mut FileEncoder) { fn encode<D: Deps>(&self, e: &mut FileEncoder) {
let header = SerializedNodeHeader::new(self); let header = SerializedNodeHeader::<D>::new(self);
e.write_array(header.bytes); e.write_array(header.bytes);
if header.len().is_none() { if header.len().is_none() {
@ -420,41 +418,43 @@ impl<K: DepKind> Encodable<FileEncoder> for NodeInfo<K> {
} }
} }
struct Stat<K: DepKind> { struct Stat {
kind: K, kind: DepKind,
node_counter: u64, node_counter: u64,
edge_counter: u64, edge_counter: u64,
} }
struct EncoderState<K: DepKind> { struct EncoderState<D: Deps> {
encoder: FileEncoder, encoder: FileEncoder,
total_node_count: usize, total_node_count: usize,
total_edge_count: usize, total_edge_count: usize,
stats: Option<FxHashMap<K, Stat<K>>>, stats: Option<FxHashMap<DepKind, Stat>>,
/// Stores the number of times we've encoded each dep kind. /// Stores the number of times we've encoded each dep kind.
kind_stats: Vec<u32>, kind_stats: Vec<u32>,
marker: PhantomData<D>,
} }
impl<K: DepKind> EncoderState<K> { impl<D: Deps> EncoderState<D> {
fn new(encoder: FileEncoder, record_stats: bool) -> Self { fn new(encoder: FileEncoder, record_stats: bool) -> Self {
Self { Self {
encoder, encoder,
total_edge_count: 0, total_edge_count: 0,
total_node_count: 0, total_node_count: 0,
stats: record_stats.then(FxHashMap::default), stats: record_stats.then(FxHashMap::default),
kind_stats: iter::repeat(0).take(K::MAX as usize + 1).collect(), kind_stats: iter::repeat(0).take(D::DEP_KIND_MAX as usize + 1).collect(),
marker: PhantomData,
} }
} }
fn encode_node( fn encode_node(
&mut self, &mut self,
node: &NodeInfo<K>, node: &NodeInfo,
record_graph: &Option<Lock<DepGraphQuery<K>>>, record_graph: &Option<Lock<DepGraphQuery>>,
) -> DepNodeIndex { ) -> DepNodeIndex {
let index = DepNodeIndex::new(self.total_node_count); let index = DepNodeIndex::new(self.total_node_count);
self.total_node_count += 1; self.total_node_count += 1;
self.kind_stats[node.node.kind.to_u16() as usize] += 1; self.kind_stats[node.node.kind.as_usize()] += 1;
let edge_count = node.edges.len(); let edge_count = node.edges.len();
self.total_edge_count += edge_count; self.total_edge_count += edge_count;
@ -475,12 +475,19 @@ impl<K: DepKind> EncoderState<K> {
} }
let encoder = &mut self.encoder; let encoder = &mut self.encoder;
node.encode(encoder); node.encode::<D>(encoder);
index index
} }
fn finish(self, profiler: &SelfProfilerRef) -> FileEncodeResult { fn finish(self, profiler: &SelfProfilerRef) -> FileEncodeResult {
let Self { mut encoder, total_node_count, total_edge_count, stats: _, kind_stats } = self; let Self {
mut encoder,
total_node_count,
total_edge_count,
stats: _,
kind_stats,
marker: _,
} = self;
let node_count = total_node_count.try_into().unwrap(); let node_count = total_node_count.try_into().unwrap();
let edge_count = total_edge_count.try_into().unwrap(); let edge_count = total_edge_count.try_into().unwrap();
@ -506,12 +513,12 @@ impl<K: DepKind> EncoderState<K> {
} }
} }
pub struct GraphEncoder<K: DepKind> { pub struct GraphEncoder<D: Deps> {
status: Lock<EncoderState<K>>, status: Lock<EncoderState<D>>,
record_graph: Option<Lock<DepGraphQuery<K>>>, record_graph: Option<Lock<DepGraphQuery>>,
} }
impl<K: DepKind + Encodable<FileEncoder>> GraphEncoder<K> { impl<D: Deps> GraphEncoder<D> {
pub fn new( pub fn new(
encoder: FileEncoder, encoder: FileEncoder,
prev_node_count: usize, prev_node_count: usize,
@ -523,7 +530,7 @@ impl<K: DepKind + Encodable<FileEncoder>> GraphEncoder<K> {
GraphEncoder { status, record_graph } GraphEncoder { status, record_graph }
} }
pub(crate) fn with_query(&self, f: impl Fn(&DepGraphQuery<K>)) { pub(crate) fn with_query(&self, f: impl Fn(&DepGraphQuery)) {
if let Some(record_graph) = &self.record_graph { if let Some(record_graph) = &self.record_graph {
f(&record_graph.lock()) f(&record_graph.lock())
} }
@ -584,7 +591,7 @@ impl<K: DepKind + Encodable<FileEncoder>> GraphEncoder<K> {
pub(crate) fn send( pub(crate) fn send(
&self, &self,
profiler: &SelfProfilerRef, profiler: &SelfProfilerRef,
node: DepNode<K>, node: DepNode,
fingerprint: Fingerprint, fingerprint: Fingerprint,
edges: EdgesVec, edges: EdgesVec,
) -> DepNodeIndex { ) -> DepNodeIndex {

View file

@ -1,6 +1,6 @@
//! Query configuration and description traits. //! Query configuration and description traits.
use crate::dep_graph::{DepNode, DepNodeParams, SerializedDepNodeIndex}; use crate::dep_graph::{DepKind, DepNode, DepNodeParams, SerializedDepNodeIndex};
use crate::error::HandleCycleError; use crate::error::HandleCycleError;
use crate::ich::StableHashingContext; use crate::ich::StableHashingContext;
use crate::query::caches::QueryCache; use crate::query::caches::QueryCache;
@ -27,7 +27,7 @@ pub trait QueryConfig<Qcx: QueryContext>: Copy {
fn format_value(self) -> fn(&Self::Value) -> String; fn format_value(self) -> fn(&Self::Value) -> String;
// Don't use this method to access query results, instead use the methods on TyCtxt // Don't use this method to access query results, instead use the methods on TyCtxt
fn query_state<'a>(self, tcx: Qcx) -> &'a QueryState<Self::Key, Qcx::DepKind> fn query_state<'a>(self, tcx: Qcx) -> &'a QueryState<Self::Key>
where where
Qcx: 'a; Qcx: 'a;
@ -57,7 +57,7 @@ pub trait QueryConfig<Qcx: QueryContext>: Copy {
fn value_from_cycle_error( fn value_from_cycle_error(
self, self,
tcx: Qcx::DepContext, tcx: Qcx::DepContext,
cycle: &[QueryInfo<Qcx::DepKind>], cycle: &[QueryInfo],
guar: ErrorGuaranteed, guar: ErrorGuaranteed,
) -> Self::Value; ) -> Self::Value;
@ -66,12 +66,12 @@ pub trait QueryConfig<Qcx: QueryContext>: Copy {
fn depth_limit(self) -> bool; fn depth_limit(self) -> bool;
fn feedable(self) -> bool; fn feedable(self) -> bool;
fn dep_kind(self) -> Qcx::DepKind; fn dep_kind(self) -> DepKind;
fn handle_cycle_error(self) -> HandleCycleError; fn handle_cycle_error(self) -> HandleCycleError;
fn hash_result(self) -> HashResult<Self::Value>; fn hash_result(self) -> HashResult<Self::Value>;
// Just here for convenience and checking that the key matches the kind, don't override this. // Just here for convenience and checking that the key matches the kind, don't override this.
fn construct_dep_node(self, tcx: Qcx::DepContext, key: &Self::Key) -> DepNode<Qcx::DepKind> { fn construct_dep_node(self, tcx: Qcx::DepContext, key: &Self::Key) -> DepNode {
DepNode::construct(tcx, self.dep_kind(), key) DepNode::construct(tcx, self.dep_kind(), key)
} }
} }

View file

@ -1,9 +1,8 @@
use crate::dep_graph::DepKind; use crate::dep_graph::DepContext;
use crate::error::CycleStack; use crate::error::CycleStack;
use crate::query::plumbing::CycleError; use crate::query::plumbing::CycleError;
use crate::query::DepKind;
use crate::query::{QueryContext, QueryStackFrame}; use crate::query::{QueryContext, QueryStackFrame};
use core::marker::PhantomData;
use rustc_data_structures::fx::FxHashMap; use rustc_data_structures::fx::FxHashMap;
use rustc_errors::{ use rustc_errors::{
Diagnostic, DiagnosticBuilder, ErrorGuaranteed, Handler, IntoDiagnostic, Level, Diagnostic, DiagnosticBuilder, ErrorGuaranteed, Handler, IntoDiagnostic, Level,
@ -30,48 +29,48 @@ use {
/// Represents a span and a query key. /// Represents a span and a query key.
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub struct QueryInfo<D: DepKind> { pub struct QueryInfo {
/// The span corresponding to the reason for which this query was required. /// The span corresponding to the reason for which this query was required.
pub span: Span, pub span: Span,
pub query: QueryStackFrame<D>, pub query: QueryStackFrame,
} }
pub type QueryMap<D> = FxHashMap<QueryJobId, QueryJobInfo<D>>; pub type QueryMap = FxHashMap<QueryJobId, QueryJobInfo>;
/// A value uniquely identifying an active query job. /// A value uniquely identifying an active query job.
#[derive(Copy, Clone, Eq, PartialEq, Hash)] #[derive(Copy, Clone, Eq, PartialEq, Hash)]
pub struct QueryJobId(pub NonZeroU64); pub struct QueryJobId(pub NonZeroU64);
impl QueryJobId { impl QueryJobId {
fn query<D: DepKind>(self, map: &QueryMap<D>) -> QueryStackFrame<D> { fn query(self, map: &QueryMap) -> QueryStackFrame {
map.get(&self).unwrap().query.clone() map.get(&self).unwrap().query.clone()
} }
#[cfg(parallel_compiler)] #[cfg(parallel_compiler)]
fn span<D: DepKind>(self, map: &QueryMap<D>) -> Span { fn span(self, map: &QueryMap) -> Span {
map.get(&self).unwrap().job.span map.get(&self).unwrap().job.span
} }
#[cfg(parallel_compiler)] #[cfg(parallel_compiler)]
fn parent<D: DepKind>(self, map: &QueryMap<D>) -> Option<QueryJobId> { fn parent(self, map: &QueryMap) -> Option<QueryJobId> {
map.get(&self).unwrap().job.parent map.get(&self).unwrap().job.parent
} }
#[cfg(parallel_compiler)] #[cfg(parallel_compiler)]
fn latch<D: DepKind>(self, map: &QueryMap<D>) -> Option<&QueryLatch<D>> { fn latch(self, map: &QueryMap) -> Option<&QueryLatch> {
map.get(&self).unwrap().job.latch.as_ref() map.get(&self).unwrap().job.latch.as_ref()
} }
} }
#[derive(Clone)] #[derive(Clone)]
pub struct QueryJobInfo<D: DepKind> { pub struct QueryJobInfo {
pub query: QueryStackFrame<D>, pub query: QueryStackFrame,
pub job: QueryJob<D>, pub job: QueryJob,
} }
/// Represents an active query job. /// Represents an active query job.
#[derive(Clone)] #[derive(Clone)]
pub struct QueryJob<D: DepKind> { pub struct QueryJob {
pub id: QueryJobId, pub id: QueryJobId,
/// The span corresponding to the reason for which this query was required. /// The span corresponding to the reason for which this query was required.
@ -82,11 +81,10 @@ pub struct QueryJob<D: DepKind> {
/// The latch that is used to wait on this job. /// The latch that is used to wait on this job.
#[cfg(parallel_compiler)] #[cfg(parallel_compiler)]
latch: Option<QueryLatch<D>>, latch: Option<QueryLatch>,
spooky: core::marker::PhantomData<D>,
} }
impl<D: DepKind> QueryJob<D> { impl QueryJob {
/// Creates a new query job. /// Creates a new query job.
#[inline] #[inline]
pub fn new(id: QueryJobId, span: Span, parent: Option<QueryJobId>) -> Self { pub fn new(id: QueryJobId, span: Span, parent: Option<QueryJobId>) -> Self {
@ -96,12 +94,11 @@ impl<D: DepKind> QueryJob<D> {
parent, parent,
#[cfg(parallel_compiler)] #[cfg(parallel_compiler)]
latch: None, latch: None,
spooky: PhantomData,
} }
} }
#[cfg(parallel_compiler)] #[cfg(parallel_compiler)]
pub(super) fn latch(&mut self) -> QueryLatch<D> { pub(super) fn latch(&mut self) -> QueryLatch {
if self.latch.is_none() { if self.latch.is_none() {
self.latch = Some(QueryLatch::new()); self.latch = Some(QueryLatch::new());
} }
@ -124,12 +121,12 @@ impl<D: DepKind> QueryJob<D> {
} }
impl QueryJobId { impl QueryJobId {
pub(super) fn find_cycle_in_stack<D: DepKind>( pub(super) fn find_cycle_in_stack(
&self, &self,
query_map: QueryMap<D>, query_map: QueryMap,
current_job: &Option<QueryJobId>, current_job: &Option<QueryJobId>,
span: Span, span: Span,
) -> CycleError<D> { ) -> CycleError {
// Find the waitee amongst `current_job` parents // Find the waitee amongst `current_job` parents
let mut cycle = Vec::new(); let mut cycle = Vec::new();
let mut current_job = Option::clone(current_job); let mut current_job = Option::clone(current_job);
@ -163,18 +160,18 @@ impl QueryJobId {
#[cold] #[cold]
#[inline(never)] #[inline(never)]
pub fn try_find_layout_root<D: DepKind>( pub fn try_find_layout_root(
&self, &self,
query_map: QueryMap<D>, query_map: QueryMap,
) -> Option<(QueryJobInfo<D>, usize)> { layout_of_kind: DepKind,
) -> Option<(QueryJobInfo, usize)> {
let mut last_layout = None; let mut last_layout = None;
let mut current_id = Some(*self); let mut current_id = Some(*self);
let mut depth = 0; let mut depth = 0;
while let Some(id) = current_id { while let Some(id) = current_id {
let info = query_map.get(&id).unwrap(); let info = query_map.get(&id).unwrap();
// FIXME: This string comparison should probably not be done. if info.query.dep_kind == layout_of_kind {
if format!("{:?}", info.query.dep_kind) == "layout_of" {
depth += 1; depth += 1;
last_layout = Some((info.clone(), depth)); last_layout = Some((info.clone(), depth));
} }
@ -185,15 +182,15 @@ impl QueryJobId {
} }
#[cfg(parallel_compiler)] #[cfg(parallel_compiler)]
struct QueryWaiter<D: DepKind> { struct QueryWaiter {
query: Option<QueryJobId>, query: Option<QueryJobId>,
condvar: Condvar, condvar: Condvar,
span: Span, span: Span,
cycle: Mutex<Option<CycleError<D>>>, cycle: Mutex<Option<CycleError>>,
} }
#[cfg(parallel_compiler)] #[cfg(parallel_compiler)]
impl<D: DepKind> QueryWaiter<D> { impl QueryWaiter {
fn notify(&self, registry: &rayon_core::Registry) { fn notify(&self, registry: &rayon_core::Registry) {
rayon_core::mark_unblocked(registry); rayon_core::mark_unblocked(registry);
self.condvar.notify_one(); self.condvar.notify_one();
@ -201,19 +198,19 @@ impl<D: DepKind> QueryWaiter<D> {
} }
#[cfg(parallel_compiler)] #[cfg(parallel_compiler)]
struct QueryLatchInfo<D: DepKind> { struct QueryLatchInfo {
complete: bool, complete: bool,
waiters: Vec<Arc<QueryWaiter<D>>>, waiters: Vec<Arc<QueryWaiter>>,
} }
#[cfg(parallel_compiler)] #[cfg(parallel_compiler)]
#[derive(Clone)] #[derive(Clone)]
pub(super) struct QueryLatch<D: DepKind> { pub(super) struct QueryLatch {
info: Arc<Mutex<QueryLatchInfo<D>>>, info: Arc<Mutex<QueryLatchInfo>>,
} }
#[cfg(parallel_compiler)] #[cfg(parallel_compiler)]
impl<D: DepKind> QueryLatch<D> { impl QueryLatch {
fn new() -> Self { fn new() -> Self {
QueryLatch { QueryLatch {
info: Arc::new(Mutex::new(QueryLatchInfo { complete: false, waiters: Vec::new() })), info: Arc::new(Mutex::new(QueryLatchInfo { complete: false, waiters: Vec::new() })),
@ -221,11 +218,7 @@ impl<D: DepKind> QueryLatch<D> {
} }
/// Awaits for the query job to complete. /// Awaits for the query job to complete.
pub(super) fn wait_on( pub(super) fn wait_on(&self, query: Option<QueryJobId>, span: Span) -> Result<(), CycleError> {
&self,
query: Option<QueryJobId>,
span: Span,
) -> Result<(), CycleError<D>> {
let waiter = let waiter =
Arc::new(QueryWaiter { query, span, cycle: Mutex::new(None), condvar: Condvar::new() }); Arc::new(QueryWaiter { query, span, cycle: Mutex::new(None), condvar: Condvar::new() });
self.wait_on_inner(&waiter); self.wait_on_inner(&waiter);
@ -240,7 +233,7 @@ impl<D: DepKind> QueryLatch<D> {
} }
/// Awaits the caller on this latch by blocking the current thread. /// Awaits the caller on this latch by blocking the current thread.
fn wait_on_inner(&self, waiter: &Arc<QueryWaiter<D>>) { fn wait_on_inner(&self, waiter: &Arc<QueryWaiter>) {
let mut info = self.info.lock(); let mut info = self.info.lock();
if !info.complete { if !info.complete {
// We push the waiter on to the `waiters` list. It can be accessed inside // We push the waiter on to the `waiters` list. It can be accessed inside
@ -274,7 +267,7 @@ impl<D: DepKind> QueryLatch<D> {
/// Removes a single waiter from the list of waiters. /// Removes a single waiter from the list of waiters.
/// This is used to break query cycles. /// This is used to break query cycles.
fn extract_waiter(&self, waiter: usize) -> Arc<QueryWaiter<D>> { fn extract_waiter(&self, waiter: usize) -> Arc<QueryWaiter> {
let mut info = self.info.lock(); let mut info = self.info.lock();
debug_assert!(!info.complete); debug_assert!(!info.complete);
// Remove the waiter from the list of waiters // Remove the waiter from the list of waiters
@ -296,14 +289,9 @@ type Waiter = (QueryJobId, usize);
/// required information to resume the waiter. /// required information to resume the waiter.
/// If all `visit` calls returns None, this function also returns None. /// If all `visit` calls returns None, this function also returns None.
#[cfg(parallel_compiler)] #[cfg(parallel_compiler)]
fn visit_waiters<F, D>( fn visit_waiters<F>(query_map: &QueryMap, query: QueryJobId, mut visit: F) -> Option<Option<Waiter>>
query_map: &QueryMap<D>,
query: QueryJobId,
mut visit: F,
) -> Option<Option<Waiter>>
where where
F: FnMut(Span, QueryJobId) -> Option<Option<Waiter>>, F: FnMut(Span, QueryJobId) -> Option<Option<Waiter>>,
D: DepKind,
{ {
// Visit the parent query which is a non-resumable waiter since it's on the same stack // Visit the parent query which is a non-resumable waiter since it's on the same stack
if let Some(parent) = query.parent(query_map) { if let Some(parent) = query.parent(query_map) {
@ -332,8 +320,8 @@ where
/// If a cycle is detected, this initial value is replaced with the span causing /// If a cycle is detected, this initial value is replaced with the span causing
/// the cycle. /// the cycle.
#[cfg(parallel_compiler)] #[cfg(parallel_compiler)]
fn cycle_check<D: DepKind>( fn cycle_check(
query_map: &QueryMap<D>, query_map: &QueryMap,
query: QueryJobId, query: QueryJobId,
span: Span, span: Span,
stack: &mut Vec<(Span, QueryJobId)>, stack: &mut Vec<(Span, QueryJobId)>,
@ -373,8 +361,8 @@ fn cycle_check<D: DepKind>(
/// from `query` without going through any of the queries in `visited`. /// from `query` without going through any of the queries in `visited`.
/// This is achieved with a depth first search. /// This is achieved with a depth first search.
#[cfg(parallel_compiler)] #[cfg(parallel_compiler)]
fn connected_to_root<D: DepKind>( fn connected_to_root(
query_map: &QueryMap<D>, query_map: &QueryMap,
query: QueryJobId, query: QueryJobId,
visited: &mut FxHashSet<QueryJobId>, visited: &mut FxHashSet<QueryJobId>,
) -> bool { ) -> bool {
@ -396,10 +384,9 @@ fn connected_to_root<D: DepKind>(
// Deterministically pick an query from a list // Deterministically pick an query from a list
#[cfg(parallel_compiler)] #[cfg(parallel_compiler)]
fn pick_query<'a, T, F, D>(query_map: &QueryMap<D>, queries: &'a [T], f: F) -> &'a T fn pick_query<'a, T, F>(query_map: &QueryMap, queries: &'a [T], f: F) -> &'a T
where where
F: Fn(&T) -> (Span, QueryJobId), F: Fn(&T) -> (Span, QueryJobId),
D: DepKind,
{ {
// Deterministically pick an entry point // Deterministically pick an entry point
// FIXME: Sort this instead // FIXME: Sort this instead
@ -423,10 +410,10 @@ where
/// If a cycle was not found, the starting query is removed from `jobs` and /// If a cycle was not found, the starting query is removed from `jobs` and
/// the function returns false. /// the function returns false.
#[cfg(parallel_compiler)] #[cfg(parallel_compiler)]
fn remove_cycle<D: DepKind>( fn remove_cycle(
query_map: &QueryMap<D>, query_map: &QueryMap,
jobs: &mut Vec<QueryJobId>, jobs: &mut Vec<QueryJobId>,
wakelist: &mut Vec<Arc<QueryWaiter<D>>>, wakelist: &mut Vec<Arc<QueryWaiter>>,
) -> bool { ) -> bool {
let mut visited = FxHashSet::default(); let mut visited = FxHashSet::default();
let mut stack = Vec::new(); let mut stack = Vec::new();
@ -528,7 +515,7 @@ fn remove_cycle<D: DepKind>(
/// There may be multiple cycles involved in a deadlock, so this searches /// There may be multiple cycles involved in a deadlock, so this searches
/// all active queries for cycles before finally resuming all the waiters at once. /// all active queries for cycles before finally resuming all the waiters at once.
#[cfg(parallel_compiler)] #[cfg(parallel_compiler)]
pub fn deadlock<D: DepKind>(query_map: QueryMap<D>, registry: &rayon_core::Registry) { pub fn deadlock(query_map: QueryMap, registry: &rayon_core::Registry) {
let on_panic = defer(|| { let on_panic = defer(|| {
eprintln!("deadlock handler panicked, aborting process"); eprintln!("deadlock handler panicked, aborting process");
process::abort(); process::abort();
@ -566,9 +553,9 @@ pub fn deadlock<D: DepKind>(query_map: QueryMap<D>, registry: &rayon_core::Regis
#[inline(never)] #[inline(never)]
#[cold] #[cold]
pub(crate) fn report_cycle<'a, D: DepKind>( pub(crate) fn report_cycle<'a>(
sess: &'a Session, sess: &'a Session,
CycleError { usage, cycle: stack }: &CycleError<D>, CycleError { usage, cycle: stack }: &CycleError,
) -> DiagnosticBuilder<'a, ErrorGuaranteed> { ) -> DiagnosticBuilder<'a, ErrorGuaranteed> {
assert!(!stack.is_empty()); assert!(!stack.is_empty());
@ -655,8 +642,10 @@ pub fn print_query_stack<Qcx: QueryContext>(
if let Some(ref mut file) = file { if let Some(ref mut file) = file {
let _ = writeln!( let _ = writeln!(
file, file,
"#{} [{:?}] {}", "#{} [{}] {}",
count_total, query_info.query.dep_kind, query_info.query.description count_total,
qcx.dep_context().dep_kind_info(query_info.query.dep_kind).name,
query_info.query.description
); );
} }

View file

@ -28,27 +28,27 @@ use thin_vec::ThinVec;
/// ///
/// This is mostly used in case of cycles for error reporting. /// This is mostly used in case of cycles for error reporting.
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub struct QueryStackFrame<D: DepKind> { pub struct QueryStackFrame {
pub description: String, pub description: String,
span: Option<Span>, span: Option<Span>,
pub def_id: Option<DefId>, pub def_id: Option<DefId>,
pub def_kind: Option<DefKind>, pub def_kind: Option<DefKind>,
pub ty_adt_id: Option<DefId>, pub ty_adt_id: Option<DefId>,
pub dep_kind: D, pub dep_kind: DepKind,
/// This hash is used to deterministically pick /// This hash is used to deterministically pick
/// a query to remove cycles in the parallel compiler. /// a query to remove cycles in the parallel compiler.
#[cfg(parallel_compiler)] #[cfg(parallel_compiler)]
hash: Hash64, hash: Hash64,
} }
impl<D: DepKind> QueryStackFrame<D> { impl QueryStackFrame {
#[inline] #[inline]
pub fn new( pub fn new(
description: String, description: String,
span: Option<Span>, span: Option<Span>,
def_id: Option<DefId>, def_id: Option<DefId>,
def_kind: Option<DefKind>, def_kind: Option<DefKind>,
dep_kind: D, dep_kind: DepKind,
ty_adt_id: Option<DefId>, ty_adt_id: Option<DefId>,
_hash: impl FnOnce() -> Hash64, _hash: impl FnOnce() -> Hash64,
) -> Self { ) -> Self {
@ -106,7 +106,7 @@ pub trait QueryContext: HasDepContext {
/// Get the query information from the TLS context. /// Get the query information from the TLS context.
fn current_query_job(self) -> Option<QueryJobId>; fn current_query_job(self) -> Option<QueryJobId>;
fn try_collect_active_jobs(self) -> Option<QueryMap<Self::DepKind>>; fn try_collect_active_jobs(self) -> Option<QueryMap>;
/// Load side effects associated to the node in the previous session. /// Load side effects associated to the node in the previous session.
fn load_side_effects(self, prev_dep_node_index: SerializedDepNodeIndex) -> QuerySideEffects; fn load_side_effects(self, prev_dep_node_index: SerializedDepNodeIndex) -> QuerySideEffects;

View file

@ -2,8 +2,8 @@
//! generate the actual methods on tcx which find and execute the provider, //! generate the actual methods on tcx which find and execute the provider,
//! manage the caches, and so forth. //! manage the caches, and so forth.
use crate::dep_graph::{DepContext, DepKind, DepNode, DepNodeIndex, DepNodeParams}; use crate::dep_graph::DepGraphData;
use crate::dep_graph::{DepGraphData, HasDepContext}; use crate::dep_graph::{DepContext, DepNode, DepNodeIndex, DepNodeParams};
use crate::ich::StableHashingContext; use crate::ich::StableHashingContext;
use crate::query::caches::QueryCache; use crate::query::caches::QueryCache;
#[cfg(parallel_compiler)] #[cfg(parallel_compiler)]
@ -30,24 +30,23 @@ use thin_vec::ThinVec;
use super::QueryConfig; use super::QueryConfig;
pub struct QueryState<K, D: DepKind> { pub struct QueryState<K> {
active: Sharded<FxHashMap<K, QueryResult<D>>>, active: Sharded<FxHashMap<K, QueryResult>>,
} }
/// Indicates the state of a query for a given key in a query map. /// Indicates the state of a query for a given key in a query map.
enum QueryResult<D: DepKind> { enum QueryResult {
/// An already executing query. The query job can be used to await for its completion. /// An already executing query. The query job can be used to await for its completion.
Started(QueryJob<D>), Started(QueryJob),
/// The query panicked. Queries trying to wait on this will raise a fatal error which will /// The query panicked. Queries trying to wait on this will raise a fatal error which will
/// silently panic. /// silently panic.
Poisoned, Poisoned,
} }
impl<K, D> QueryState<K, D> impl<K> QueryState<K>
where where
K: Eq + Hash + Copy + Debug, K: Eq + Hash + Copy + Debug,
D: DepKind,
{ {
pub fn all_inactive(&self) -> bool { pub fn all_inactive(&self) -> bool {
self.active.lock_shards().all(|shard| shard.is_empty()) self.active.lock_shards().all(|shard| shard.is_empty())
@ -56,8 +55,8 @@ where
pub fn try_collect_active_jobs<Qcx: Copy>( pub fn try_collect_active_jobs<Qcx: Copy>(
&self, &self,
qcx: Qcx, qcx: Qcx,
make_query: fn(Qcx, K) -> QueryStackFrame<D>, make_query: fn(Qcx, K) -> QueryStackFrame,
jobs: &mut QueryMap<D>, jobs: &mut QueryMap,
) -> Option<()> { ) -> Option<()> {
let mut active = Vec::new(); let mut active = Vec::new();
@ -82,25 +81,25 @@ where
} }
} }
impl<K, D: DepKind> Default for QueryState<K, D> { impl<K> Default for QueryState<K> {
fn default() -> QueryState<K, D> { fn default() -> QueryState<K> {
QueryState { active: Default::default() } QueryState { active: Default::default() }
} }
} }
/// A type representing the responsibility to execute the job in the `job` field. /// A type representing the responsibility to execute the job in the `job` field.
/// This will poison the relevant query if dropped. /// This will poison the relevant query if dropped.
struct JobOwner<'tcx, K, D: DepKind> struct JobOwner<'tcx, K>
where where
K: Eq + Hash + Copy, K: Eq + Hash + Copy,
{ {
state: &'tcx QueryState<K, D>, state: &'tcx QueryState<K>,
key: K, key: K,
} }
#[cold] #[cold]
#[inline(never)] #[inline(never)]
fn mk_cycle<Q, Qcx>(query: Q, qcx: Qcx, cycle_error: CycleError<Qcx::DepKind>) -> Q::Value fn mk_cycle<Q, Qcx>(query: Q, qcx: Qcx, cycle_error: CycleError) -> Q::Value
where where
Q: QueryConfig<Qcx>, Q: QueryConfig<Qcx>,
Qcx: QueryContext, Qcx: QueryContext,
@ -112,7 +111,7 @@ where
fn handle_cycle_error<Q, Qcx>( fn handle_cycle_error<Q, Qcx>(
query: Q, query: Q,
qcx: Qcx, qcx: Qcx,
cycle_error: &CycleError<Qcx::DepKind>, cycle_error: &CycleError,
mut error: DiagnosticBuilder<'_, ErrorGuaranteed>, mut error: DiagnosticBuilder<'_, ErrorGuaranteed>,
) -> Q::Value ) -> Q::Value
where where
@ -137,7 +136,7 @@ where
} }
} }
impl<'tcx, K, D: DepKind> JobOwner<'tcx, K, D> impl<'tcx, K> JobOwner<'tcx, K>
where where
K: Eq + Hash + Copy, K: Eq + Hash + Copy,
{ {
@ -169,10 +168,9 @@ where
} }
} }
impl<'tcx, K, D> Drop for JobOwner<'tcx, K, D> impl<'tcx, K> Drop for JobOwner<'tcx, K>
where where
K: Eq + Hash + Copy, K: Eq + Hash + Copy,
D: DepKind,
{ {
#[inline(never)] #[inline(never)]
#[cold] #[cold]
@ -195,10 +193,10 @@ where
} }
#[derive(Clone)] #[derive(Clone)]
pub(crate) struct CycleError<D: DepKind> { pub(crate) struct CycleError {
/// The query and related span that uses the cycle. /// The query and related span that uses the cycle.
pub usage: Option<(Span, QueryStackFrame<D>)>, pub usage: Option<(Span, QueryStackFrame)>,
pub cycle: Vec<QueryInfo<D>>, pub cycle: Vec<QueryInfo>,
} }
/// Checks if the query is already computed and in the cache. /// Checks if the query is already computed and in the cache.
@ -248,7 +246,7 @@ fn wait_for_query<Q, Qcx>(
qcx: Qcx, qcx: Qcx,
span: Span, span: Span,
key: Q::Key, key: Q::Key,
latch: QueryLatch<Qcx::DepKind>, latch: QueryLatch,
current: Option<QueryJobId>, current: Option<QueryJobId>,
) -> (Q::Value, Option<DepNodeIndex>) ) -> (Q::Value, Option<DepNodeIndex>)
where where
@ -296,7 +294,7 @@ fn try_execute_query<Q, Qcx, const INCR: bool>(
qcx: Qcx, qcx: Qcx,
span: Span, span: Span,
key: Q::Key, key: Q::Key,
dep_node: Option<DepNode<Qcx::DepKind>>, dep_node: Option<DepNode>,
) -> (Q::Value, Option<DepNodeIndex>) ) -> (Q::Value, Option<DepNodeIndex>)
where where
Q: QueryConfig<Qcx>, Q: QueryConfig<Qcx>,
@ -364,10 +362,10 @@ where
fn execute_job<Q, Qcx, const INCR: bool>( fn execute_job<Q, Qcx, const INCR: bool>(
query: Q, query: Q,
qcx: Qcx, qcx: Qcx,
state: &QueryState<Q::Key, Qcx::DepKind>, state: &QueryState<Q::Key>,
key: Q::Key, key: Q::Key,
id: QueryJobId, id: QueryJobId,
dep_node: Option<DepNode<Qcx::DepKind>>, dep_node: Option<DepNode>,
) -> (Q::Value, Option<DepNodeIndex>) ) -> (Q::Value, Option<DepNodeIndex>)
where where
Q: QueryConfig<Qcx>, Q: QueryConfig<Qcx>,
@ -474,9 +472,9 @@ where
fn execute_job_incr<Q, Qcx>( fn execute_job_incr<Q, Qcx>(
query: Q, query: Q,
qcx: Qcx, qcx: Qcx,
dep_graph_data: &DepGraphData<Qcx::DepKind>, dep_graph_data: &DepGraphData<Qcx::Deps>,
key: Q::Key, key: Q::Key,
mut dep_node_opt: Option<DepNode<Qcx::DepKind>>, mut dep_node_opt: Option<DepNode>,
job_id: QueryJobId, job_id: QueryJobId,
) -> (Q::Value, DepNodeIndex) ) -> (Q::Value, DepNodeIndex)
where where
@ -540,10 +538,10 @@ where
#[inline(always)] #[inline(always)]
fn try_load_from_disk_and_cache_in_memory<Q, Qcx>( fn try_load_from_disk_and_cache_in_memory<Q, Qcx>(
query: Q, query: Q,
dep_graph_data: &DepGraphData<Qcx::DepKind>, dep_graph_data: &DepGraphData<Qcx::Deps>,
qcx: Qcx, qcx: Qcx,
key: &Q::Key, key: &Q::Key,
dep_node: &DepNode<Qcx::DepKind>, dep_node: &DepNode,
) -> Option<(Q::Value, DepNodeIndex)> ) -> Option<(Q::Value, DepNodeIndex)>
where where
Q: QueryConfig<Qcx>, Q: QueryConfig<Qcx>,
@ -637,7 +635,7 @@ where
#[instrument(skip(tcx, dep_graph_data, result, hash_result, format_value), level = "debug")] #[instrument(skip(tcx, dep_graph_data, result, hash_result, format_value), level = "debug")]
pub(crate) fn incremental_verify_ich<Tcx, V>( pub(crate) fn incremental_verify_ich<Tcx, V>(
tcx: Tcx, tcx: Tcx,
dep_graph_data: &DepGraphData<Tcx::DepKind>, dep_graph_data: &DepGraphData<Tcx::Deps>,
result: &V, result: &V,
prev_index: SerializedDepNodeIndex, prev_index: SerializedDepNodeIndex,
hash_result: Option<fn(&mut StableHashingContext<'_>, &V) -> Fingerprint>, hash_result: Option<fn(&mut StableHashingContext<'_>, &V) -> Fingerprint>,
@ -730,7 +728,7 @@ fn ensure_must_run<Q, Qcx>(
qcx: Qcx, qcx: Qcx,
key: &Q::Key, key: &Q::Key,
check_cache: bool, check_cache: bool,
) -> (bool, Option<DepNode<Qcx::DepKind>>) ) -> (bool, Option<DepNode>)
where where
Q: QueryConfig<Qcx>, Q: QueryConfig<Qcx>,
Qcx: QueryContext, Qcx: QueryContext,
@ -821,12 +819,8 @@ where
Some(result) Some(result)
} }
pub fn force_query<Q, Qcx>( pub fn force_query<Q, Qcx>(query: Q, qcx: Qcx, key: Q::Key, dep_node: DepNode)
query: Q, where
qcx: Qcx,
key: Q::Key,
dep_node: DepNode<<Qcx as HasDepContext>::DepKind>,
) where
Q: QueryConfig<Qcx>, Q: QueryConfig<Qcx>,
Qcx: QueryContext, Qcx: QueryContext,
{ {

View file

@ -1,14 +1,14 @@
use rustc_span::ErrorGuaranteed; use rustc_span::ErrorGuaranteed;
use crate::dep_graph::{DepContext, DepKind}; use crate::dep_graph::DepContext;
use crate::query::QueryInfo; use crate::query::QueryInfo;
pub trait Value<Tcx: DepContext, D: DepKind>: Sized { pub trait Value<Tcx: DepContext>: Sized {
fn from_cycle_error(tcx: Tcx, cycle: &[QueryInfo<D>], guar: ErrorGuaranteed) -> Self; fn from_cycle_error(tcx: Tcx, cycle: &[QueryInfo], guar: ErrorGuaranteed) -> Self;
} }
impl<Tcx: DepContext, T, D: DepKind> Value<Tcx, D> for T { impl<Tcx: DepContext, T> Value<Tcx> for T {
default fn from_cycle_error(tcx: Tcx, cycle: &[QueryInfo<D>], _guar: ErrorGuaranteed) -> T { default fn from_cycle_error(tcx: Tcx, cycle: &[QueryInfo], _guar: ErrorGuaranteed) -> T {
tcx.sess().abort_if_errors(); tcx.sess().abort_if_errors();
// Ideally we would use `bug!` here. But bug! is only defined in rustc_middle, and it's // Ideally we would use `bug!` here. But bug! is only defined in rustc_middle, and it's
// non-trivial to define it earlier. // non-trivial to define it earlier.

View file

@ -8,7 +8,7 @@ use cache::ProvisionalCache;
use rustc_data_structures::fx::FxHashSet; use rustc_data_structures::fx::FxHashSet;
use rustc_index::Idx; use rustc_index::Idx;
use rustc_index::IndexVec; use rustc_index::IndexVec;
use rustc_middle::dep_graph::DepKind; use rustc_middle::dep_graph::dep_kinds;
use rustc_middle::traits::solve::inspect::CacheHit; use rustc_middle::traits::solve::inspect::CacheHit;
use rustc_middle::traits::solve::CacheData; use rustc_middle::traits::solve::CacheData;
use rustc_middle::traits::solve::{CanonicalInput, Certainty, EvaluationCache, QueryResult}; use rustc_middle::traits::solve::{CanonicalInput, Certainty, EvaluationCache, QueryResult};
@ -287,7 +287,7 @@ impl<'tcx> SearchGraph<'tcx> {
// Everything that affects the `result` should be performed within this // Everything that affects the `result` should be performed within this
// `with_anon_task` closure. // `with_anon_task` closure.
let ((final_entry, result), dep_node) = let ((final_entry, result), dep_node) =
tcx.dep_graph.with_anon_task(tcx, DepKind::TraitSelect, || { tcx.dep_graph.with_anon_task(tcx, dep_kinds::TraitSelect, || {
// When we encounter a coinductive cycle, we have to fetch the // When we encounter a coinductive cycle, we have to fetch the
// result of that cycle while we are still computing it. Because // result of that cycle while we are still computing it. Because
// of this we continuously recompute the cycle until the result // of this we continuously recompute the cycle until the result

View file

@ -35,7 +35,8 @@ use rustc_hir::def_id::DefId;
use rustc_infer::infer::DefineOpaqueTypes; use rustc_infer::infer::DefineOpaqueTypes;
use rustc_infer::infer::LateBoundRegionConversionTime; use rustc_infer::infer::LateBoundRegionConversionTime;
use rustc_infer::traits::TraitObligation; use rustc_infer::traits::TraitObligation;
use rustc_middle::dep_graph::{DepKind, DepNodeIndex}; use rustc_middle::dep_graph::dep_kinds;
use rustc_middle::dep_graph::DepNodeIndex;
use rustc_middle::mir::interpret::ErrorHandled; use rustc_middle::mir::interpret::ErrorHandled;
use rustc_middle::traits::DefiningAnchor; use rustc_middle::traits::DefiningAnchor;
use rustc_middle::ty::abstract_const::NotConstEvaluatable; use rustc_middle::ty::abstract_const::NotConstEvaluatable;
@ -1435,7 +1436,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
OP: FnOnce(&mut Self) -> R, OP: FnOnce(&mut Self) -> R,
{ {
let (result, dep_node) = let (result, dep_node) =
self.tcx().dep_graph.with_anon_task(self.tcx(), DepKind::TraitSelect, || op(self)); self.tcx().dep_graph.with_anon_task(self.tcx(), dep_kinds::TraitSelect, || op(self));
self.tcx().dep_graph.read_index(dep_node); self.tcx().dep_graph.read_index(dep_node);
(result, dep_node) (result, dep_node)
} }