From 49c1b07a9e09af5d01150c53b50676a5bc2e402d Mon Sep 17 00:00:00 2001 From: Camille GILLOT Date: Sun, 18 Oct 2020 21:01:36 +0200 Subject: [PATCH] Decouple QueryContext from DepContext. --- .../rustc_middle/src/ty/query/plumbing.rs | 4 +- .../rustc_query_system/src/dep_graph/graph.rs | 13 +-- .../rustc_query_system/src/dep_graph/mod.rs | 21 +++++ .../rustc_query_system/src/query/config.rs | 4 +- compiler/rustc_query_system/src/query/job.rs | 5 +- compiler/rustc_query_system/src/query/mod.rs | 6 +- .../rustc_query_system/src/query/plumbing.rs | 87 +++++++++++-------- 7 files changed, 87 insertions(+), 53 deletions(-) diff --git a/compiler/rustc_middle/src/ty/query/plumbing.rs b/compiler/rustc_middle/src/ty/query/plumbing.rs index 891da797b34..ea889549e43 100644 --- a/compiler/rustc_middle/src/ty/query/plumbing.rs +++ b/compiler/rustc_middle/src/ty/query/plumbing.rs @@ -48,7 +48,7 @@ impl QueryContext for TyCtxt<'tcx> { &self, token: QueryJobId, diagnostics: Option<&Lock>>, - compute: impl FnOnce(Self) -> R, + compute: impl FnOnce() -> R, ) -> R { // The `TyCtxt` stored in TLS has the same global interner lifetime // as `self`, so we use `with_related_context` to relate the 'tcx lifetimes @@ -65,7 +65,7 @@ impl QueryContext for TyCtxt<'tcx> { // Use the `ImplicitCtxt` while we execute the query. tls::enter_context(&new_icx, |_| { - rustc_data_structures::stack::ensure_sufficient_stack(|| compute(*self)) + rustc_data_structures::stack::ensure_sufficient_stack(compute) }) }) } diff --git a/compiler/rustc_query_system/src/dep_graph/graph.rs b/compiler/rustc_query_system/src/dep_graph/graph.rs index b13aa2f6ccb..9b0810e03f7 100644 --- a/compiler/rustc_query_system/src/dep_graph/graph.rs +++ b/compiler/rustc_query_system/src/dep_graph/graph.rs @@ -23,7 +23,7 @@ use super::debug::EdgeFilter; use super::prev::PreviousDepGraph; use super::query::DepGraphQuery; use super::serialized::SerializedDepNodeIndex; -use super::{DepContext, DepKind, DepNode, WorkProductId}; +use super::{DepContext, DepKind, DepNode, HasDepContext, WorkProductId}; #[derive(Clone)] pub struct DepGraph { @@ -235,7 +235,7 @@ impl DepGraph { /// `arg` parameter. /// /// [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/incremental-compilation.html - pub fn with_task, A, R>( + pub fn with_task, A, R>( &self, key: DepNode, cx: Ctxt, @@ -261,7 +261,7 @@ impl DepGraph { ) } - fn with_task_impl, A, R>( + fn with_task_impl, A, R>( &self, key: DepNode, cx: Ctxt, @@ -271,14 +271,15 @@ impl DepGraph { hash_result: impl FnOnce(&mut Ctxt::StableHashingContext, &R) -> Option, ) -> (R, DepNodeIndex) { if let Some(ref data) = self.data { + let dcx = cx.dep_context(); let task_deps = create_task(key).map(Lock::new); let result = K::with_deps(task_deps.as_ref(), || task(cx, arg)); let edges = task_deps.map_or_else(|| smallvec![], |lock| lock.into_inner().reads); - let mut hcx = cx.create_stable_hashing_context(); + let mut hcx = dcx.create_stable_hashing_context(); let current_fingerprint = hash_result(&mut hcx, &result); - let print_status = cfg!(debug_assertions) && cx.debug_dep_tasks(); + let print_status = cfg!(debug_assertions) && dcx.debug_dep_tasks(); // Intern the new `DepNode`. let dep_node_index = if let Some(prev_index) = data.previous.node_to_index_opt(&key) { @@ -408,7 +409,7 @@ impl DepGraph { /// Executes something within an "eval-always" task which is a task /// that runs whenever anything changes. - pub fn with_eval_always_task, A, R>( + pub fn with_eval_always_task, A, R>( &self, key: DepNode, cx: Ctxt, diff --git a/compiler/rustc_query_system/src/dep_graph/mod.rs b/compiler/rustc_query_system/src/dep_graph/mod.rs index 5c1444ad6c4..db192d1cfe7 100644 --- a/compiler/rustc_query_system/src/dep_graph/mod.rs +++ b/compiler/rustc_query_system/src/dep_graph/mod.rs @@ -63,6 +63,27 @@ pub trait DepContext: Copy { fn profiler(&self) -> &SelfProfilerRef; } +pub trait HasDepContext: Copy { + type DepKind: self::DepKind; + type StableHashingContext; + type DepContext: self::DepContext< + DepKind = Self::DepKind, + StableHashingContext = Self::StableHashingContext, + >; + + fn dep_context(&self) -> &Self::DepContext; +} + +impl HasDepContext for T { + type DepKind = T::DepKind; + type StableHashingContext = T::StableHashingContext; + type DepContext = Self; + + fn dep_context(&self) -> &Self::DepContext { + self + } +} + /// Describe the different families of dependency nodes. pub trait DepKind: Copy + fmt::Debug + Eq + Hash { const NULL: Self; diff --git a/compiler/rustc_query_system/src/query/config.rs b/compiler/rustc_query_system/src/query/config.rs index 23b1ab09722..3873b47d4d4 100644 --- a/compiler/rustc_query_system/src/query/config.rs +++ b/compiler/rustc_query_system/src/query/config.rs @@ -33,9 +33,9 @@ pub(crate) struct QueryVtable { } impl QueryVtable { - pub(crate) fn to_dep_node(&self, tcx: CTX, key: &K) -> DepNode + pub(crate) fn to_dep_node(&self, tcx: CTX::DepContext, key: &K) -> DepNode where - K: crate::dep_graph::DepNodeParams, + K: crate::dep_graph::DepNodeParams, { DepNode::construct(tcx, self.dep_kind, key) } diff --git a/compiler/rustc_query_system/src/query/job.rs b/compiler/rustc_query_system/src/query/job.rs index 5fed500390b..0ecc2694a79 100644 --- a/compiler/rustc_query_system/src/query/job.rs +++ b/compiler/rustc_query_system/src/query/job.rs @@ -10,7 +10,8 @@ use std::num::NonZeroU32; #[cfg(parallel_compiler)] use { - super::QueryContext, + crate::dep_graph::DepContext, + crate::query::QueryContext, parking_lot::{Condvar, Mutex}, rustc_data_structures::fx::FxHashSet, rustc_data_structures::stable_hasher::{HashStable, StableHasher}, @@ -432,7 +433,7 @@ where { // Deterministically pick an entry point // FIXME: Sort this instead - let mut hcx = tcx.create_stable_hashing_context(); + let mut hcx = tcx.dep_context().create_stable_hashing_context(); queries .iter() .min_by_key(|v| { diff --git a/compiler/rustc_query_system/src/query/mod.rs b/compiler/rustc_query_system/src/query/mod.rs index 84d4b406c84..2d678035d4d 100644 --- a/compiler/rustc_query_system/src/query/mod.rs +++ b/compiler/rustc_query_system/src/query/mod.rs @@ -14,7 +14,7 @@ pub use self::caches::{ mod config; pub use self::config::{QueryAccessors, QueryConfig, QueryDescription}; -use crate::dep_graph::DepContext; +use crate::dep_graph::HasDepContext; use crate::query::job::QueryMap; use rustc_data_structures::stable_hasher::HashStable; @@ -23,7 +23,7 @@ use rustc_data_structures::thin_vec::ThinVec; use rustc_errors::Diagnostic; use rustc_span::def_id::DefId; -pub trait QueryContext: DepContext { +pub trait QueryContext: HasDepContext { type Query: Clone + HashStable; fn incremental_verify_ich(&self) -> bool; @@ -44,6 +44,6 @@ pub trait QueryContext: DepContext { &self, token: QueryJobId, diagnostics: Option<&Lock>>, - compute: impl FnOnce(Self) -> R, + compute: impl FnOnce() -> R, ) -> R; } diff --git a/compiler/rustc_query_system/src/query/plumbing.rs b/compiler/rustc_query_system/src/query/plumbing.rs index 4242324b968..89f1e6511e3 100644 --- a/compiler/rustc_query_system/src/query/plumbing.rs +++ b/compiler/rustc_query_system/src/query/plumbing.rs @@ -2,7 +2,7 @@ //! generate the actual methods on tcx which find and execute the provider, //! manage the caches, and so forth. -use crate::dep_graph::{DepKind, DepNode}; +use crate::dep_graph::{DepContext, DepKind, DepNode}; use crate::dep_graph::{DepNodeIndex, SerializedDepNodeIndex}; use crate::query::caches::QueryCache; use crate::query::config::{QueryDescription, QueryVtable, QueryVtableExt}; @@ -204,7 +204,7 @@ where // in another thread has completed. Record how long we wait in the // self-profiler. let _query_blocked_prof_timer = if cfg!(parallel_compiler) { - Some(tcx.profiler().query_blocked()) + Some(tcx.dep_context().profiler().query_blocked()) } else { None }; @@ -266,8 +266,8 @@ where let cached = cache .cache .lookup(cache, &key, |value, index| { - if unlikely!(tcx.profiler().enabled()) { - tcx.profiler().query_cache_hit(index.into()); + if unlikely!(tcx.dep_context().profiler().enabled()) { + tcx.dep_context().profiler().query_cache_hit(index.into()); } #[cfg(debug_assertions)] { @@ -395,7 +395,7 @@ pub fn try_get_cached<'a, CTX, C, R, OnHit>( ) -> Result where C: QueryCache, - CTX: QueryContext, + CTX: DepContext, OnHit: FnOnce(&C::Stored) -> R, { cache.cache.lookup(cache, &key, |value, index| { @@ -422,7 +422,7 @@ fn try_execute_query( ) -> C::Stored where C: QueryCache, - C::Key: crate::dep_graph::DepNodeParams, + C::Key: crate::dep_graph::DepNodeParams, CTX: QueryContext, { let job = match JobOwner::<'_, CTX::DepKind, CTX::Query, C>::try_start( @@ -432,46 +432,51 @@ where TryGetJob::Cycle(result) => return result, #[cfg(parallel_compiler)] TryGetJob::JobCompleted((v, index)) => { - tcx.dep_graph().read_index(index); + tcx.dep_context().dep_graph().read_index(index); return v; } }; // Fast path for when incr. comp. is off. `to_dep_node` is // expensive for some `DepKind`s. - if !tcx.dep_graph().is_fully_enabled() { + if !tcx.dep_context().dep_graph().is_fully_enabled() { let null_dep_node = DepNode::new_no_params(DepKind::NULL); return force_query_with_job(tcx, key, job, null_dep_node, query).0; } if query.anon { - let prof_timer = tcx.profiler().query_provider(); + let prof_timer = tcx.dep_context().profiler().query_provider(); let ((result, dep_node_index), diagnostics) = with_diagnostics(|diagnostics| { - tcx.start_query(job.id, diagnostics, |tcx| { - tcx.dep_graph().with_anon_task(query.dep_kind, || query.compute(tcx, key)) + tcx.start_query(job.id, diagnostics, || { + tcx.dep_context() + .dep_graph() + .with_anon_task(query.dep_kind, || query.compute(tcx, key)) }) }); prof_timer.finish_with_query_invocation_id(dep_node_index.into()); - tcx.dep_graph().read_index(dep_node_index); + tcx.dep_context().dep_graph().read_index(dep_node_index); if unlikely!(!diagnostics.is_empty()) { - tcx.store_diagnostics_for_anon_node(dep_node_index, diagnostics); + tcx.dep_context().store_diagnostics_for_anon_node(dep_node_index, diagnostics); } return job.complete(result, dep_node_index); } - let dep_node = query.to_dep_node(tcx, &key); + let dep_node = query.to_dep_node(*tcx.dep_context(), &key); if !query.eval_always { // The diagnostics for this query will be // promoted to the current session during // `try_mark_green()`, so we can ignore them here. - let loaded = tcx.start_query(job.id, None, |tcx| { - let marked = tcx.dep_graph().try_mark_green_and_read(tcx, &dep_node); + let loaded = tcx.start_query(job.id, None, || { + let marked = tcx + .dep_context() + .dep_graph() + .try_mark_green_and_read(*tcx.dep_context(), &dep_node); marked.map(|(prev_dep_node_index, dep_node_index)| { ( load_from_disk_and_cache_in_memory( @@ -492,7 +497,7 @@ where } let (result, dep_node_index) = force_query_with_job(tcx, key, job, dep_node, query); - tcx.dep_graph().read_index(dep_node_index); + tcx.dep_context().dep_graph().read_index(dep_node_index); result } @@ -510,11 +515,11 @@ where // Note this function can be called concurrently from the same query // We must ensure that this is handled correctly. - debug_assert!(tcx.dep_graph().is_green(dep_node)); + debug_assert!(tcx.dep_context().dep_graph().is_green(dep_node)); // First we try to load the result from the on-disk cache. let result = if query.cache_on_disk(tcx, &key, None) { - let prof_timer = tcx.profiler().incr_cache_loading(); + let prof_timer = tcx.dep_context().profiler().incr_cache_loading(); let result = query.try_load_from_disk(tcx, prev_dep_node_index); prof_timer.finish_with_query_invocation_id(dep_node_index.into()); @@ -536,10 +541,10 @@ where } else { // We could not load a result from the on-disk cache, so // recompute. - let prof_timer = tcx.profiler().query_provider(); + let prof_timer = tcx.dep_context().profiler().query_provider(); // The dep-graph for this computation is already in-place. - let result = tcx.dep_graph().with_ignore(|| query.compute(tcx, key)); + let result = tcx.dep_context().dep_graph().with_ignore(|| query.compute(tcx, key)); prof_timer.finish_with_query_invocation_id(dep_node_index.into()); @@ -549,7 +554,7 @@ where // If `-Zincremental-verify-ich` is specified, re-hash results from // the cache and make sure that they have the expected fingerprint. if unlikely!(tcx.incremental_verify_ich()) { - incremental_verify_ich(tcx, &result, dep_node, dep_node_index, query); + incremental_verify_ich(*tcx.dep_context(), &result, dep_node, dep_node_index, query); } result @@ -558,7 +563,7 @@ where #[inline(never)] #[cold] fn incremental_verify_ich( - tcx: CTX, + tcx: CTX::DepContext, result: &V, dep_node: &DepNode, dep_node_index: DepNodeIndex, @@ -601,7 +606,7 @@ where // 2. Two distinct query keys get mapped to the same `DepNode` // (see for example #48923). assert!( - !tcx.dep_graph().dep_node_exists(&dep_node), + !tcx.dep_context().dep_graph().dep_node_exists(&dep_node), "forcing query with already existing `DepNode`\n\ - query-key: {:?}\n\ - dep-node: {:?}", @@ -609,12 +614,12 @@ where dep_node ); - let prof_timer = tcx.profiler().query_provider(); + let prof_timer = tcx.dep_context().profiler().query_provider(); let ((result, dep_node_index), diagnostics) = with_diagnostics(|diagnostics| { - tcx.start_query(job.id, diagnostics, |tcx| { + tcx.start_query(job.id, diagnostics, || { if query.eval_always { - tcx.dep_graph().with_eval_always_task( + tcx.dep_context().dep_graph().with_eval_always_task( dep_node, tcx, key, @@ -622,7 +627,13 @@ where query.hash_result, ) } else { - tcx.dep_graph().with_task(dep_node, tcx, key, query.compute, query.hash_result) + tcx.dep_context().dep_graph().with_task( + dep_node, + tcx, + key, + query.compute, + query.hash_result, + ) } }) }); @@ -630,7 +641,7 @@ where prof_timer.finish_with_query_invocation_id(dep_node_index.into()); if unlikely!(!diagnostics.is_empty()) && dep_node.kind != DepKind::NULL { - tcx.store_diagnostics(dep_node_index, diagnostics); + tcx.dep_context().store_diagnostics(dep_node_index, diagnostics); } let result = job.complete(result, dep_node_index); @@ -651,7 +662,7 @@ fn get_query_impl( where CTX: QueryContext, C: QueryCache, - C::Key: crate::dep_graph::DepNodeParams, + C::Key: crate::dep_graph::DepNodeParams, { try_execute_query(tcx, state, cache, span, key, lookup, query) } @@ -665,9 +676,9 @@ where /// /// Note: The optimization is only available during incr. comp. #[inline(never)] -fn ensure_must_run(tcx: CTX, key: &K, query: &QueryVtable) -> bool +fn ensure_must_run(tcx: CTX::DepContext, key: &K, query: &QueryVtable) -> bool where - K: crate::dep_graph::DepNodeParams, + K: crate::dep_graph::DepNodeParams, CTX: QueryContext, { if query.eval_always { @@ -707,14 +718,14 @@ fn force_query_impl( query: &QueryVtable, ) where C: QueryCache, - C::Key: crate::dep_graph::DepNodeParams, + C::Key: crate::dep_graph::DepNodeParams, CTX: QueryContext, { // We may be concurrently trying both execute and force a query. // Ensure that only one of them runs the query. let cached = cache.cache.lookup(cache, &key, |_, index| { - if unlikely!(tcx.profiler().enabled()) { - tcx.profiler().query_cache_hit(index.into()); + if unlikely!(tcx.dep_context().profiler().enabled()) { + tcx.dep_context().profiler().query_cache_hit(index.into()); } #[cfg(debug_assertions)] { @@ -752,12 +763,12 @@ pub fn get_query( ) -> Option where Q: QueryDescription, - Q::Key: crate::dep_graph::DepNodeParams, + Q::Key: crate::dep_graph::DepNodeParams, CTX: QueryContext, { let query = &Q::VTABLE; if let QueryMode::Ensure = mode { - if !ensure_must_run(tcx, &key, query) { + if !ensure_must_run(*tcx.dep_context(), &key, query) { return None; } } @@ -771,7 +782,7 @@ where pub fn force_query(tcx: CTX, key: Q::Key, span: Span, dep_node: DepNode) where Q: QueryDescription, - Q::Key: crate::dep_graph::DepNodeParams, + Q::Key: crate::dep_graph::DepNodeParams, CTX: QueryContext, { force_query_impl(tcx, Q::query_state(tcx), Q::query_cache(tcx), key, span, dep_node, &Q::VTABLE)