Error if we try to read dep during deserialization

This commit is contained in:
Aaron Hill 2021-12-13 20:56:30 -06:00
parent 489296d825
commit 75181dc22f
No known key found for this signature in database
GPG key ID: B4087E510E98B164
4 changed files with 24 additions and 5 deletions

View file

@ -83,12 +83,17 @@ impl QueryContext for QueryCtxt<'_> {
&self, &self,
token: QueryJobId<Self::DepKind>, token: QueryJobId<Self::DepKind>,
diagnostics: Option<&Lock<ThinVec<Diagnostic>>>, diagnostics: Option<&Lock<ThinVec<Diagnostic>>>,
read_allowed: bool,
compute: impl FnOnce() -> R, compute: impl FnOnce() -> R,
) -> R { ) -> R {
// The `TyCtxt` stored in TLS has the same global interner lifetime // The `TyCtxt` stored in TLS has the same global interner lifetime
// as `self`, so we use `with_related_context` to relate the 'tcx lifetimes // as `self`, so we use `with_related_context` to relate the 'tcx lifetimes
// when accessing the `ImplicitCtxt`. // when accessing the `ImplicitCtxt`.
tls::with_related_context(**self, move |current_icx| { tls::with_related_context(**self, move |current_icx| {
let mut old_read_allowed = false;
if let Some(task_deps) = current_icx.task_deps {
old_read_allowed = std::mem::replace(&mut task_deps.lock().read_allowed, read_allowed);
}
// Update the `ImplicitCtxt` to point to our new query job. // Update the `ImplicitCtxt` to point to our new query job.
let new_icx = ImplicitCtxt { let new_icx = ImplicitCtxt {
tcx: **self, tcx: **self,
@ -99,9 +104,14 @@ impl QueryContext for QueryCtxt<'_> {
}; };
// Use the `ImplicitCtxt` while we execute the query. // Use the `ImplicitCtxt` while we execute the query.
tls::enter_context(&new_icx, |_| { let res = tls::enter_context(&new_icx, |_| {
rustc_data_structures::stack::ensure_sufficient_stack(compute) rustc_data_structures::stack::ensure_sufficient_stack(compute)
}) });
if let Some(task_deps) = new_icx.task_deps {
task_deps.lock().read_allowed = old_read_allowed;
}
res
}) })
} }
} }

View file

@ -251,6 +251,7 @@ impl<K: DepKind> DepGraph<K> {
reads: SmallVec::new(), reads: SmallVec::new(),
read_set: Default::default(), read_set: Default::default(),
phantom_data: PhantomData, phantom_data: PhantomData,
read_allowed: true,
})) }))
}; };
let result = K::with_deps(task_deps.as_ref(), || task(cx, arg)); let result = K::with_deps(task_deps.as_ref(), || task(cx, arg));
@ -362,6 +363,11 @@ impl<K: DepKind> DepGraph<K> {
if let Some(task_deps) = task_deps { if let Some(task_deps) = task_deps {
let mut task_deps = task_deps.lock(); let mut task_deps = task_deps.lock();
let task_deps = &mut *task_deps; let task_deps = &mut *task_deps;
if !task_deps.read_allowed {
panic!("Illegal read of: {:?}", dep_node_index);
}
if cfg!(debug_assertions) { if cfg!(debug_assertions) {
data.current.total_read_count.fetch_add(1, Relaxed); data.current.total_read_count.fetch_add(1, Relaxed);
} }
@ -1115,6 +1121,7 @@ pub struct TaskDeps<K> {
reads: EdgesVec, reads: EdgesVec,
read_set: FxHashSet<DepNodeIndex>, read_set: FxHashSet<DepNodeIndex>,
phantom_data: PhantomData<DepNode<K>>, phantom_data: PhantomData<DepNode<K>>,
pub read_allowed: bool,
} }
impl<K> Default for TaskDeps<K> { impl<K> Default for TaskDeps<K> {
@ -1125,6 +1132,7 @@ impl<K> Default for TaskDeps<K> {
reads: EdgesVec::new(), reads: EdgesVec::new(),
read_set: FxHashSet::default(), read_set: FxHashSet::default(),
phantom_data: PhantomData, phantom_data: PhantomData,
read_allowed: true,
} }
} }
} }

View file

@ -142,6 +142,7 @@ pub trait QueryContext: HasDepContext {
&self, &self,
token: QueryJobId<Self::DepKind>, token: QueryJobId<Self::DepKind>,
diagnostics: Option<&Lock<ThinVec<Diagnostic>>>, diagnostics: Option<&Lock<ThinVec<Diagnostic>>>,
read_allowed: bool,
compute: impl FnOnce() -> R, compute: impl FnOnce() -> R,
) -> R; ) -> R;
} }

View file

@ -440,7 +440,7 @@ where
// Fast path for when incr. comp. is off. // Fast path for when incr. comp. is off.
if !dep_graph.is_fully_enabled() { if !dep_graph.is_fully_enabled() {
let prof_timer = tcx.dep_context().profiler().query_provider(); let prof_timer = tcx.dep_context().profiler().query_provider();
let result = tcx.start_query(job_id, None, || query.compute(*tcx.dep_context(), key)); let result = tcx.start_query(job_id, None, true, || query.compute(*tcx.dep_context(), key));
let dep_node_index = dep_graph.next_virtual_depnode_index(); let dep_node_index = dep_graph.next_virtual_depnode_index();
prof_timer.finish_with_query_invocation_id(dep_node_index.into()); prof_timer.finish_with_query_invocation_id(dep_node_index.into());
return (result, dep_node_index); return (result, dep_node_index);
@ -453,7 +453,7 @@ where
// The diagnostics for this query will be promoted to the current session during // The diagnostics for this query will be promoted to the current session during
// `try_mark_green()`, so we can ignore them here. // `try_mark_green()`, so we can ignore them here.
if let Some(ret) = tcx.start_query(job_id, None, || { if let Some(ret) = tcx.start_query(job_id, None, false, || {
try_load_from_disk_and_cache_in_memory(tcx, &key, &dep_node, query) try_load_from_disk_and_cache_in_memory(tcx, &key, &dep_node, query)
}) { }) {
return ret; return ret;
@ -463,7 +463,7 @@ where
let prof_timer = tcx.dep_context().profiler().query_provider(); let prof_timer = tcx.dep_context().profiler().query_provider();
let diagnostics = Lock::new(ThinVec::new()); let diagnostics = Lock::new(ThinVec::new());
let (result, dep_node_index) = tcx.start_query(job_id, Some(&diagnostics), || { let (result, dep_node_index) = tcx.start_query(job_id, Some(&diagnostics), true, || {
if query.anon { if query.anon {
return dep_graph.with_anon_task(*tcx.dep_context(), query.dep_kind, || { return dep_graph.with_anon_task(*tcx.dep_context(), query.dep_kind, || {
query.compute(*tcx.dep_context(), key) query.compute(*tcx.dep_context(), key)