Auto merge of #66610 - alexreg:trait-upcasting-cosmetic, r=Centril
Aggregation of drive-by cosmetic changes for trait-upcasting PR Cherry-picked from #60900. As requested by @Centril (and @nikomatsakis, I believe). r? @Centril
This commit is contained in:
commit
f11759d38c
40 changed files with 647 additions and 639 deletions
|
@ -1919,8 +1919,9 @@ pub enum ImplItemKind {
|
|||
/// Bindings like `A: Debug` are represented as a special type `A =
|
||||
/// $::Debug` that is understood by the astconv code.
|
||||
///
|
||||
/// FIXME(alexreg) -- why have a separate type for the binding case,
|
||||
/// wouldn't it be better to make the `ty` field an enum like:
|
||||
/// FIXME(alexreg): why have a separate type for the binding case,
|
||||
/// wouldn't it be better to make the `ty` field an enum like the
|
||||
/// following?
|
||||
///
|
||||
/// ```
|
||||
/// enum TypeBindingKind {
|
||||
|
|
|
@ -306,7 +306,7 @@ impl<'cx, 'tcx> TypeFolder<'tcx> for Canonicalizer<'cx, 'tcx> {
|
|||
match *r {
|
||||
ty::ReLateBound(index, ..) => {
|
||||
if index >= self.binder_index {
|
||||
bug!("escaping late bound region during canonicalization")
|
||||
bug!("escaping late-bound region during canonicalization");
|
||||
} else {
|
||||
r
|
||||
}
|
||||
|
@ -336,7 +336,7 @@ impl<'cx, 'tcx> TypeFolder<'tcx> for Canonicalizer<'cx, 'tcx> {
|
|||
.canonicalize_free_region(self, r),
|
||||
|
||||
ty::ReClosureBound(..) => {
|
||||
bug!("closure bound region encountered during canonicalization")
|
||||
bug!("closure bound region encountered during canonicalization");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -346,14 +346,14 @@ impl<'cx, 'tcx> TypeFolder<'tcx> for Canonicalizer<'cx, 'tcx> {
|
|||
ty::Infer(ty::TyVar(vid)) => {
|
||||
debug!("canonical: type var found with vid {:?}", vid);
|
||||
match self.infcx.unwrap().probe_ty_var(vid) {
|
||||
// `t` could be a float / int variable: canonicalize that instead
|
||||
// `t` could be a float / int variable; canonicalize that instead.
|
||||
Ok(t) => {
|
||||
debug!("(resolved to {:?})", t);
|
||||
self.fold_ty(t)
|
||||
}
|
||||
|
||||
// `TyVar(vid)` is unresolved, track its universe index in the canonicalized
|
||||
// result
|
||||
// result.
|
||||
Err(mut ui) => {
|
||||
if !self.infcx.unwrap().tcx.sess.opts.debugging_opts.chalk {
|
||||
// FIXME: perf problem described in #55921.
|
||||
|
|
|
@ -48,22 +48,24 @@
|
|||
use super::lexical_region_resolve::RegionResolutionError;
|
||||
use super::region_constraints::GenericKind;
|
||||
use super::{InferCtxt, RegionVariableOrigin, SubregionOrigin, TypeTrace, ValuePairs};
|
||||
use crate::infer::{self, SuppressRegionErrors};
|
||||
|
||||
use crate::hir;
|
||||
use crate::hir::def_id::DefId;
|
||||
use crate::hir::Node;
|
||||
use crate::infer::{self, SuppressRegionErrors};
|
||||
use crate::infer::opaque_types;
|
||||
use crate::middle::region;
|
||||
use crate::traits::{IfExpressionCause, MatchExpressionArmCause, ObligationCause};
|
||||
use crate::traits::{ObligationCauseCode};
|
||||
use crate::traits::{
|
||||
IfExpressionCause, MatchExpressionArmCause, ObligationCause, ObligationCauseCode,
|
||||
};
|
||||
use crate::ty::error::TypeError;
|
||||
use crate::ty::{self, subst::{Subst, SubstsRef}, Region, Ty, TyCtxt, TypeFoldable};
|
||||
|
||||
use errors::{Applicability, DiagnosticBuilder, DiagnosticStyledString};
|
||||
use std::{cmp, fmt};
|
||||
use rustc_error_codes::*;
|
||||
use syntax_pos::{Pos, Span};
|
||||
|
||||
use rustc_error_codes::*;
|
||||
use std::{cmp, fmt};
|
||||
|
||||
mod note;
|
||||
|
||||
|
@ -1270,7 +1272,7 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
|
|||
}
|
||||
|
||||
/// When encountering a case where `.as_ref()` on a `Result` or `Option` would be appropriate,
|
||||
/// suggest it.
|
||||
/// suggests it.
|
||||
fn suggest_as_ref_where_appropriate(
|
||||
&self,
|
||||
span: Span,
|
||||
|
|
|
@ -221,7 +221,7 @@ impl<'cx, 'tcx> InferCtxt<'cx, 'tcx> {
|
|||
}
|
||||
|
||||
/// The `TypeOutlives` struct has the job of "lowering" a `T: 'a`
|
||||
/// obligation into a series of `'a: 'b` constraints and "verifys", as
|
||||
/// obligation into a series of `'a: 'b` constraints and "verify"s, as
|
||||
/// described on the module comment. The final constraints are emitted
|
||||
/// via a "delegate" of type `D` -- this is usually the `infcx`, which
|
||||
/// accrues them into the `region_obligations` code, but for NLL we
|
||||
|
|
|
@ -1,11 +1,12 @@
|
|||
use std::fmt::{self, Display};
|
||||
use std::convert::TryFrom;
|
||||
use super::{AllocId, InterpResult};
|
||||
|
||||
use crate::mir;
|
||||
use crate::ty::layout::{self, HasDataLayout, Size};
|
||||
|
||||
use rustc_macros::HashStable;
|
||||
|
||||
use super::{AllocId, InterpResult};
|
||||
use std::convert::TryFrom;
|
||||
use std::fmt::{self, Display};
|
||||
|
||||
/// Used by `check_in_alloc` to indicate context of check
|
||||
#[derive(Debug, Copy, Clone, RustcEncodable, RustcDecodable, HashStable)]
|
||||
|
@ -74,8 +75,8 @@ pub trait PointerArithmetic: layout::HasDataLayout {
|
|||
fn overflowing_signed_offset(&self, val: u64, i: i128) -> (u64, bool) {
|
||||
// FIXME: is it possible to over/underflow here?
|
||||
if i < 0 {
|
||||
// Trickery to ensure that i64::min_value() works fine: compute n = -i.
|
||||
// This formula only works for true negative values, it overflows for zero!
|
||||
// Trickery to ensure that `i64::min_value()` works fine: compute `n = -i`.
|
||||
// This formula only works for true negative values; it overflows for zero!
|
||||
let n = u64::max_value() - (i as u64) + 1;
|
||||
let res = val.overflowing_sub(n);
|
||||
self.truncate_to_ptr(res)
|
||||
|
@ -105,7 +106,7 @@ impl<T: layout::HasDataLayout> PointerArithmetic for T {}
|
|||
///
|
||||
/// Defaults to the index based and loosely coupled `AllocId`.
|
||||
///
|
||||
/// Pointer is also generic over the `Tag` associated with each pointer,
|
||||
/// `Pointer` is also generic over the `Tag` associated with each pointer,
|
||||
/// which is used to do provenance tracking during execution.
|
||||
#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd,
|
||||
RustcEncodable, RustcDecodable, Hash, HashStable)]
|
||||
|
@ -129,7 +130,7 @@ impl<Id: fmt::Debug> fmt::Debug for Pointer<(), Id> {
|
|||
}
|
||||
}
|
||||
|
||||
/// Produces a `Pointer` which points to the beginning of the `Allocation`.
|
||||
/// Produces a `Pointer` that points to the beginning of the `Allocation`.
|
||||
impl From<AllocId> for Pointer {
|
||||
#[inline(always)]
|
||||
fn from(alloc_id: AllocId) -> Self {
|
||||
|
|
|
@ -1203,7 +1203,7 @@ options! {CodegenOptions, CodegenSetter, basic_codegen_options,
|
|||
force_frame_pointers: Option<bool> = (None, parse_opt_bool, [TRACKED],
|
||||
"force use of the frame pointers"),
|
||||
debug_assertions: Option<bool> = (None, parse_opt_bool, [TRACKED],
|
||||
"explicitly enable the cfg(debug_assertions) directive"),
|
||||
"explicitly enable the `cfg(debug_assertions)` directive"),
|
||||
inline_threshold: Option<usize> = (None, parse_opt_uint, [TRACKED],
|
||||
"set the threshold for inlining a function (default: 225)"),
|
||||
panic: Option<PanicStrategy> = (None, parse_panic_strategy,
|
||||
|
|
|
@ -1,18 +1,18 @@
|
|||
//! Support code for rustdoc and external tools . You really don't
|
||||
//! want to be using this unless you need to.
|
||||
//! Support code for rustdoc and external tools.
|
||||
//! You really don't want to be using this unless you need to.
|
||||
|
||||
use super::*;
|
||||
|
||||
use std::collections::hash_map::Entry;
|
||||
use std::collections::VecDeque;
|
||||
|
||||
use crate::infer::region_constraints::{Constraint, RegionConstraintData};
|
||||
use crate::infer::InferCtxt;
|
||||
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
|
||||
|
||||
use crate::ty::fold::TypeFolder;
|
||||
use crate::ty::{Region, RegionVid};
|
||||
|
||||
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
|
||||
|
||||
use std::collections::hash_map::Entry;
|
||||
use std::collections::VecDeque;
|
||||
|
||||
// FIXME(twk): this is obviously not nice to duplicate like that
|
||||
#[derive(Eq, PartialEq, Hash, Copy, Clone, Debug)]
|
||||
pub enum RegionTarget<'tcx> {
|
||||
|
@ -233,43 +233,45 @@ impl<'tcx> AutoTraitFinder<'tcx> {
|
|||
}
|
||||
|
||||
impl AutoTraitFinder<'tcx> {
|
||||
// The core logic responsible for computing the bounds for our synthesized impl.
|
||||
//
|
||||
// To calculate the bounds, we call SelectionContext.select in a loop. Like FulfillmentContext,
|
||||
// we recursively select the nested obligations of predicates we encounter. However, whenever we
|
||||
// encounter an UnimplementedError involving a type parameter, we add it to our ParamEnv. Since
|
||||
// our goal is to determine when a particular type implements an auto trait, Unimplemented
|
||||
// errors tell us what conditions need to be met.
|
||||
//
|
||||
// This method ends up working somewhat similarly to FulfillmentContext, but with a few key
|
||||
// differences. FulfillmentContext works under the assumption that it's dealing with concrete
|
||||
// user code. According, it considers all possible ways that a Predicate could be met - which
|
||||
// isn't always what we want for a synthesized impl. For example, given the predicate 'T:
|
||||
// Iterator', FulfillmentContext can end up reporting an Unimplemented error for T:
|
||||
// IntoIterator - since there's an implementation of Iteratpr where T: IntoIterator,
|
||||
// FulfillmentContext will drive SelectionContext to consider that impl before giving up. If we
|
||||
// were to rely on FulfillmentContext's decision, we might end up synthesizing an impl like
|
||||
// this:
|
||||
// 'impl<T> Send for Foo<T> where T: IntoIterator'
|
||||
//
|
||||
// While it might be technically true that Foo implements Send where T: IntoIterator,
|
||||
// the bound is overly restrictive - it's really only necessary that T: Iterator.
|
||||
//
|
||||
// For this reason, evaluate_predicates handles predicates with type variables specially. When
|
||||
// we encounter an Unimplemented error for a bound such as 'T: Iterator', we immediately add it
|
||||
// to our ParamEnv, and add it to our stack for recursive evaluation. When we later select it,
|
||||
// we'll pick up any nested bounds, without ever inferring that 'T: IntoIterator' needs to
|
||||
// hold.
|
||||
//
|
||||
// One additional consideration is supertrait bounds. Normally, a ParamEnv is only ever
|
||||
// constructed once for a given type. As part of the construction process, the ParamEnv will
|
||||
// have any supertrait bounds normalized - e.g., if we have a type 'struct Foo<T: Copy>', the
|
||||
// ParamEnv will contain 'T: Copy' and 'T: Clone', since 'Copy: Clone'. When we construct our
|
||||
// own ParamEnv, we need to do this ourselves, through traits::elaborate_predicates, or else
|
||||
// SelectionContext will choke on the missing predicates. However, this should never show up in
|
||||
// the final synthesized generics: we don't want our generated docs page to contain something
|
||||
// like 'T: Copy + Clone', as that's redundant. Therefore, we keep track of a separate
|
||||
// 'user_env', which only holds the predicates that will actually be displayed to the user.
|
||||
/// The core logic responsible for computing the bounds for our synthesized impl.
|
||||
///
|
||||
/// To calculate the bounds, we call `SelectionContext.select` in a loop. Like
|
||||
/// `FulfillmentContext`, we recursively select the nested obligations of predicates we
|
||||
/// encounter. However, whenever we encounter an `UnimplementedError` involving a type
|
||||
/// parameter, we add it to our `ParamEnv`. Since our goal is to determine when a particular
|
||||
/// type implements an auto trait, Unimplemented errors tell us what conditions need to be met.
|
||||
///
|
||||
/// This method ends up working somewhat similarly to `FulfillmentContext`, but with a few key
|
||||
/// differences. `FulfillmentContext` works under the assumption that it's dealing with concrete
|
||||
/// user code. According, it considers all possible ways that a `Predicate` could be met, which
|
||||
/// isn't always what we want for a synthesized impl. For example, given the predicate `T:
|
||||
/// Iterator`, `FulfillmentContext` can end up reporting an Unimplemented error for `T:
|
||||
/// IntoIterator` -- since there's an implementation of `Iterator` where `T: IntoIterator`,
|
||||
/// `FulfillmentContext` will drive `SelectionContext` to consider that impl before giving up.
|
||||
/// If we were to rely on `FulfillmentContext`s decision, we might end up synthesizing an impl
|
||||
/// like this:
|
||||
///
|
||||
/// impl<T> Send for Foo<T> where T: IntoIterator
|
||||
///
|
||||
/// While it might be technically true that Foo implements Send where `T: IntoIterator`,
|
||||
/// the bound is overly restrictive - it's really only necessary that `T: Iterator`.
|
||||
///
|
||||
/// For this reason, `evaluate_predicates` handles predicates with type variables specially.
|
||||
/// When we encounter an `Unimplemented` error for a bound such as `T: Iterator`, we immediately
|
||||
/// add it to our `ParamEnv`, and add it to our stack for recursive evaluation. When we later
|
||||
/// select it, we'll pick up any nested bounds, without ever inferring that `T: IntoIterator`
|
||||
/// needs to hold.
|
||||
///
|
||||
/// One additional consideration is supertrait bounds. Normally, a `ParamEnv` is only ever
|
||||
/// constructed once for a given type. As part of the construction process, the `ParamEnv` will
|
||||
/// have any supertrait bounds normalized -- e.g., if we have a type `struct Foo<T: Copy>`, the
|
||||
/// `ParamEnv` will contain `T: Copy` and `T: Clone`, since `Copy: Clone`. When we construct our
|
||||
/// own `ParamEnv`, we need to do this ourselves, through `traits::elaborate_predicates`, or
|
||||
/// else `SelectionContext` will choke on the missing predicates. However, this should never
|
||||
/// show up in the final synthesized generics: we don't want our generated docs page to contain
|
||||
/// something like `T: Copy + Clone`, as that's redundant. Therefore, we keep track of a
|
||||
/// separate `user_env`, which only holds the predicates that will actually be displayed to the
|
||||
/// user.
|
||||
fn evaluate_predicates(
|
||||
&self,
|
||||
infcx: &InferCtxt<'_, 'tcx>,
|
||||
|
@ -307,7 +309,7 @@ impl AutoTraitFinder<'tcx> {
|
|||
continue;
|
||||
}
|
||||
|
||||
// Call infcx.resolve_vars_if_possible to see if we can
|
||||
// Call `infcx.resolve_vars_if_possible` to see if we can
|
||||
// get rid of any inference variables.
|
||||
let obligation = infcx.resolve_vars_if_possible(
|
||||
&Obligation::new(dummy_cause.clone(), new_env, pred)
|
||||
|
@ -316,14 +318,14 @@ impl AutoTraitFinder<'tcx> {
|
|||
|
||||
match &result {
|
||||
&Ok(Some(ref vtable)) => {
|
||||
// If we see an explicit negative impl (e.g., 'impl !Send for MyStruct'),
|
||||
// If we see an explicit negative impl (e.g., `impl !Send for MyStruct`),
|
||||
// we immediately bail out, since it's impossible for us to continue.
|
||||
match vtable {
|
||||
Vtable::VtableImpl(VtableImplData { impl_def_id, .. }) => {
|
||||
// Blame tidy for the weird bracket placement
|
||||
// Blame 'tidy' for the weird bracket placement.
|
||||
if infcx.tcx.impl_polarity(*impl_def_id) == ty::ImplPolarity::Negative
|
||||
{
|
||||
debug!("evaluate_nested_obligations: Found explicit negative impl\
|
||||
debug!("evaluate_nested_obligations: found explicit negative impl\
|
||||
{:?}, bailing out", impl_def_id);
|
||||
return None;
|
||||
}
|
||||
|
@ -356,7 +358,7 @@ impl AutoTraitFinder<'tcx> {
|
|||
predicates.push_back(pred);
|
||||
} else {
|
||||
debug!(
|
||||
"evaluate_nested_obligations: Unimplemented found, bailing: \
|
||||
"evaluate_nested_obligations: `Unimplemented` found, bailing: \
|
||||
{:?} {:?} {:?}",
|
||||
ty,
|
||||
pred,
|
||||
|
@ -392,29 +394,29 @@ impl AutoTraitFinder<'tcx> {
|
|||
return Some((new_env, final_user_env));
|
||||
}
|
||||
|
||||
// This method is designed to work around the following issue:
|
||||
// When we compute auto trait bounds, we repeatedly call SelectionContext.select,
|
||||
// progressively building a ParamEnv based on the results we get.
|
||||
// However, our usage of SelectionContext differs from its normal use within the compiler,
|
||||
// in that we capture and re-reprocess predicates from Unimplemented errors.
|
||||
//
|
||||
// This can lead to a corner case when dealing with region parameters.
|
||||
// During our selection loop in evaluate_predicates, we might end up with
|
||||
// two trait predicates that differ only in their region parameters:
|
||||
// one containing a HRTB lifetime parameter, and one containing a 'normal'
|
||||
// lifetime parameter. For example:
|
||||
//
|
||||
// T as MyTrait<'a>
|
||||
// T as MyTrait<'static>
|
||||
//
|
||||
// If we put both of these predicates in our computed ParamEnv, we'll
|
||||
// confuse SelectionContext, since it will (correctly) view both as being applicable.
|
||||
//
|
||||
// To solve this, we pick the 'more strict' lifetime bound - i.e., the HRTB
|
||||
// Our end goal is to generate a user-visible description of the conditions
|
||||
// under which a type implements an auto trait. A trait predicate involving
|
||||
// a HRTB means that the type needs to work with any choice of lifetime,
|
||||
// not just one specific lifetime (e.g., 'static).
|
||||
/// This method is designed to work around the following issue:
|
||||
/// When we compute auto trait bounds, we repeatedly call `SelectionContext.select`,
|
||||
/// progressively building a `ParamEnv` based on the results we get.
|
||||
/// However, our usage of `SelectionContext` differs from its normal use within the compiler,
|
||||
/// in that we capture and re-reprocess predicates from `Unimplemented` errors.
|
||||
///
|
||||
/// This can lead to a corner case when dealing with region parameters.
|
||||
/// During our selection loop in `evaluate_predicates`, we might end up with
|
||||
/// two trait predicates that differ only in their region parameters:
|
||||
/// one containing a HRTB lifetime parameter, and one containing a 'normal'
|
||||
/// lifetime parameter. For example:
|
||||
///
|
||||
/// T as MyTrait<'a>
|
||||
/// T as MyTrait<'static>
|
||||
///
|
||||
/// If we put both of these predicates in our computed `ParamEnv`, we'll
|
||||
/// confuse `SelectionContext`, since it will (correctly) view both as being applicable.
|
||||
///
|
||||
/// To solve this, we pick the 'more strict' lifetime bound -- i.e., the HRTB
|
||||
/// Our end goal is to generate a user-visible description of the conditions
|
||||
/// under which a type implements an auto trait. A trait predicate involving
|
||||
/// a HRTB means that the type needs to work with any choice of lifetime,
|
||||
/// not just one specific lifetime (e.g., `'static`).
|
||||
fn add_user_pred<'c>(
|
||||
&self,
|
||||
user_computed_preds: &mut FxHashSet<ty::Predicate<'c>>,
|
||||
|
@ -430,7 +432,7 @@ impl AutoTraitFinder<'tcx> {
|
|||
|
||||
if !new_substs.types().eq(old_substs.types()) {
|
||||
// We can't compare lifetimes if the types are different,
|
||||
// so skip checking old_pred
|
||||
// so skip checking `old_pred`.
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -438,8 +440,8 @@ impl AutoTraitFinder<'tcx> {
|
|||
new_substs.regions().zip(old_substs.regions())
|
||||
{
|
||||
match (new_region, old_region) {
|
||||
// If both predicates have an 'ReLateBound' (a HRTB) in the
|
||||
// same spot, we do nothing
|
||||
// If both predicates have an `ReLateBound` (a HRTB) in the
|
||||
// same spot, we do nothing.
|
||||
(
|
||||
ty::RegionKind::ReLateBound(_, _),
|
||||
ty::RegionKind::ReLateBound(_, _),
|
||||
|
@ -463,13 +465,13 @@ impl AutoTraitFinder<'tcx> {
|
|||
// varaible).
|
||||
//
|
||||
// In both cases, we want to remove the old predicate,
|
||||
// from user_computed_preds, and replace it with the new
|
||||
// from `user_computed_preds`, and replace it with the new
|
||||
// one. Having both the old and the new
|
||||
// predicate in a ParamEnv would confuse SelectionContext
|
||||
// predicate in a `ParamEnv` would confuse `SelectionContext`.
|
||||
//
|
||||
// We're currently in the predicate passed to 'retain',
|
||||
// so we return 'false' to remove the old predicate from
|
||||
// user_computed_preds
|
||||
// so we return `false` to remove the old predicate from
|
||||
// `user_computed_preds`.
|
||||
return false;
|
||||
}
|
||||
(_, ty::RegionKind::ReLateBound(_, _)) |
|
||||
|
@ -486,8 +488,8 @@ impl AutoTraitFinder<'tcx> {
|
|||
// predicate has some other type of region.
|
||||
//
|
||||
// We want to leave the old
|
||||
// predicate in user_computed_preds, and skip adding
|
||||
// new_pred to user_computed_params.
|
||||
// predicate in `user_computed_preds`, and skip adding
|
||||
// new_pred to `user_computed_params`.
|
||||
should_add_new = false
|
||||
},
|
||||
_ => {}
|
||||
|
@ -505,8 +507,8 @@ impl AutoTraitFinder<'tcx> {
|
|||
}
|
||||
}
|
||||
|
||||
// This is very similar to handle_lifetimes. However, instead of matching ty::Region's
|
||||
// to each other, we match ty::RegionVid's to ty::Region's
|
||||
/// This is very similar to `handle_lifetimes`. However, instead of matching `ty::Region`s
|
||||
/// to each other, we match `ty::RegionVid`s to `ty::Region`s.
|
||||
fn map_vid_to_region<'cx>(
|
||||
&self,
|
||||
regions: &RegionConstraintData<'cx>,
|
||||
|
@ -573,7 +575,7 @@ impl AutoTraitFinder<'tcx> {
|
|||
finished_map.insert(v1, r1);
|
||||
}
|
||||
(&RegionTarget::Region(_), &RegionTarget::RegionVid(_)) => {
|
||||
// Do nothing - we don't care about regions that are smaller than vids
|
||||
// Do nothing; we don't care about regions that are smaller than vids.
|
||||
}
|
||||
(&RegionTarget::RegionVid(_), &RegionTarget::RegionVid(_)) => {
|
||||
if let Entry::Occupied(v) = vid_map.entry(*smaller) {
|
||||
|
|
|
@ -191,23 +191,23 @@ pub enum ObligationCauseCode<'tcx> {
|
|||
/// Obligation incurred due to a coercion.
|
||||
Coercion { source: Ty<'tcx>, target: Ty<'tcx> },
|
||||
|
||||
// Various cases where expressions must be sized/copy/etc:
|
||||
/// L = X implies that L is Sized
|
||||
/// Various cases where expressions must be `Sized` / `Copy` / etc.
|
||||
/// `L = X` implies that `L` is `Sized`.
|
||||
AssignmentLhsSized,
|
||||
/// (x1, .., xn) must be Sized
|
||||
/// `(x1, .., xn)` must be `Sized`.
|
||||
TupleInitializerSized,
|
||||
/// S { ... } must be Sized
|
||||
/// `S { ... }` must be `Sized`.
|
||||
StructInitializerSized,
|
||||
/// Type of each variable must be Sized
|
||||
/// Type of each variable must be `Sized`.
|
||||
VariableType(hir::HirId),
|
||||
/// Argument type must be Sized
|
||||
/// Argument type must be `Sized`.
|
||||
SizedArgumentType,
|
||||
/// Return type must be Sized
|
||||
/// Return type must be `Sized`.
|
||||
SizedReturnType,
|
||||
/// Yield type must be Sized
|
||||
/// Yield type must be `Sized`.
|
||||
SizedYieldType,
|
||||
/// [T,..n] --> T must be Copy. If `true`, suggest `const_in_array_repeat_expressions` feature
|
||||
/// flag.
|
||||
/// `[T, ..n]` implies that `T` must be `Copy`.
|
||||
/// If `true`, suggest `const_in_array_repeat_expressions` feature flag.
|
||||
RepeatVec(bool),
|
||||
|
||||
/// Types of fields (other than the last, except for packed structs) in a struct must be sized.
|
||||
|
@ -216,7 +216,7 @@ pub enum ObligationCauseCode<'tcx> {
|
|||
/// Constant expressions must be sized.
|
||||
ConstSized,
|
||||
|
||||
/// Static items must have `Sync` type
|
||||
/// `static` items must have `Sync` type.
|
||||
SharedStatic,
|
||||
|
||||
BuiltinDerivedObligation(DerivedObligationCause<'tcx>),
|
||||
|
@ -602,7 +602,7 @@ pub enum Vtable<'tcx, N> {
|
|||
/// the impl's type parameters.
|
||||
///
|
||||
/// The type parameter `N` indicates the type used for "nested
|
||||
/// obligations" that are required by the impl. During type check, this
|
||||
/// obligations" that are required by the impl. During type-check, this
|
||||
/// is `Obligation`, as one might expect. During codegen, however, this
|
||||
/// is `()`, because codegen only requires a shallow resolution of an
|
||||
/// impl, and nested obligations are satisfied later.
|
||||
|
@ -1046,8 +1046,7 @@ fn vtable_methods<'tcx>(
|
|||
return None;
|
||||
}
|
||||
|
||||
// the method may have some early-bound lifetimes, add
|
||||
// regions for those
|
||||
// The method may have some early-bound lifetimes; add regions for those.
|
||||
let substs = trait_ref.map_bound(|trait_ref|
|
||||
InternalSubsts::for_item(tcx, def_id, |param, _|
|
||||
match param.kind {
|
||||
|
@ -1060,15 +1059,15 @@ fn vtable_methods<'tcx>(
|
|||
)
|
||||
);
|
||||
|
||||
// the trait type may have higher-ranked lifetimes in it;
|
||||
// so erase them if they appear, so that we get the type
|
||||
// at some particular call site
|
||||
// The trait type may have higher-ranked lifetimes in it;
|
||||
// erase them if they appear, so that we get the type
|
||||
// at some particular call site.
|
||||
let substs = tcx.normalize_erasing_late_bound_regions(
|
||||
ty::ParamEnv::reveal_all(),
|
||||
&substs
|
||||
);
|
||||
|
||||
// It's possible that the method relies on where clauses that
|
||||
// It's possible that the method relies on where-clauses that
|
||||
// do not hold for this particular set of type parameters.
|
||||
// Note that this method could then never be called, so we
|
||||
// do not want to try and codegen it, in that case (see #23435).
|
||||
|
|
|
@ -157,7 +157,7 @@ impl IntercrateAmbiguityCause {
|
|||
struct TraitObligationStack<'prev, 'tcx> {
|
||||
obligation: &'prev TraitObligation<'tcx>,
|
||||
|
||||
/// Trait ref from `obligation` but "freshened" with the
|
||||
/// The trait ref from `obligation` but "freshened" with the
|
||||
/// selection-context's freshener. Used to check for recursion.
|
||||
fresh_trait_ref: ty::PolyTraitRef<'tcx>,
|
||||
|
||||
|
@ -193,11 +193,11 @@ struct TraitObligationStack<'prev, 'tcx> {
|
|||
|
||||
previous: TraitObligationStackList<'prev, 'tcx>,
|
||||
|
||||
/// Number of parent frames plus one -- so the topmost frame has depth 1.
|
||||
/// The number of parent frames plus one (thus, the topmost frame has depth 1).
|
||||
depth: usize,
|
||||
|
||||
/// Depth-first number of this node in the search graph -- a
|
||||
/// pre-order index. Basically a freshly incremented counter.
|
||||
/// The depth-first number of this node in the search graph -- a
|
||||
/// pre-order index. Basically, a freshly incremented counter.
|
||||
dfn: usize,
|
||||
}
|
||||
|
||||
|
@ -239,9 +239,9 @@ pub struct SelectionCache<'tcx> {
|
|||
/// }
|
||||
/// fn foo<T: AsDebug>(t: T) { println!("{:?}", <T as AsDebug>::debug(t)); }
|
||||
///
|
||||
/// we can't just use the impl to resolve the <T as AsDebug> obligation
|
||||
/// - a type from another crate (that doesn't implement fmt::Debug) could
|
||||
/// implement AsDebug.
|
||||
/// we can't just use the impl to resolve the `<T as AsDebug>` obligation
|
||||
/// -- a type from another crate (that doesn't implement `fmt::Debug`) could
|
||||
/// implement `AsDebug`.
|
||||
///
|
||||
/// Because where-clauses match the type exactly, multiple clauses can
|
||||
/// only match if there are unresolved variables, and we can mostly just
|
||||
|
@ -266,10 +266,10 @@ pub struct SelectionCache<'tcx> {
|
|||
/// }
|
||||
/// fn main() { foo(false); }
|
||||
///
|
||||
/// Here the obligation <T as Foo<$0>> can be matched by both the blanket
|
||||
/// impl and the where-clause. We select the where-clause and unify $0=bool,
|
||||
/// Here the obligation `<T as Foo<$0>>` can be matched by both the blanket
|
||||
/// impl and the where-clause. We select the where-clause and unify `$0=bool`,
|
||||
/// so the program prints "false". However, if the where-clause is omitted,
|
||||
/// the blanket impl is selected, we unify $0=(), and the program prints
|
||||
/// the blanket impl is selected, we unify `$0=()`, and the program prints
|
||||
/// "()".
|
||||
///
|
||||
/// Exactly the same issues apply to projection and object candidates, except
|
||||
|
@ -282,8 +282,8 @@ pub struct SelectionCache<'tcx> {
|
|||
/// parameter environment.
|
||||
#[derive(PartialEq, Eq, Debug, Clone, TypeFoldable)]
|
||||
enum SelectionCandidate<'tcx> {
|
||||
/// If has_nested is false, there are no *further* obligations
|
||||
BuiltinCandidate {
|
||||
/// `false` if there are no *further* obligations.
|
||||
has_nested: bool,
|
||||
},
|
||||
ParamCandidate(ty::PolyTraitRef<'tcx>),
|
||||
|
@ -339,11 +339,11 @@ impl<'a, 'tcx> ty::Lift<'tcx> for SelectionCandidate<'a> {
|
|||
}
|
||||
|
||||
struct SelectionCandidateSet<'tcx> {
|
||||
// a list of candidates that definitely apply to the current
|
||||
// A list of candidates that definitely apply to the current
|
||||
// obligation (meaning: types unify).
|
||||
vec: Vec<SelectionCandidate<'tcx>>,
|
||||
|
||||
// if this is true, then there were candidates that might or might
|
||||
// If `true`, then there were candidates that might or might
|
||||
// not have applied, but we couldn't tell. This occurs when some
|
||||
// of the input types are type variables, in which case there are
|
||||
// various "builtin" rules that might or might not trigger.
|
||||
|
@ -358,7 +358,7 @@ struct EvaluatedCandidate<'tcx> {
|
|||
|
||||
/// When does the builtin impl for `T: Trait` apply?
|
||||
enum BuiltinImplConditions<'tcx> {
|
||||
/// The impl is conditional on T1,T2,.. : Trait
|
||||
/// The impl is conditional on `T1, T2, ...: Trait`.
|
||||
Where(ty::Binder<Vec<Ty<'tcx>>>),
|
||||
/// There is no built-in impl. There may be some other
|
||||
/// candidate (a where-clause or user-defined impl).
|
||||
|
@ -381,15 +381,15 @@ enum BuiltinImplConditions<'tcx> {
|
|||
/// the categories it's easy to see that the unions are correct.
|
||||
#[derive(Copy, Clone, Debug, PartialOrd, Ord, PartialEq, Eq, HashStable)]
|
||||
pub enum EvaluationResult {
|
||||
/// Evaluation successful
|
||||
/// Evaluation successful.
|
||||
EvaluatedToOk,
|
||||
/// Evaluation successful, but there were unevaluated region obligations
|
||||
/// Evaluation successful, but there were unevaluated region obligations.
|
||||
EvaluatedToOkModuloRegions,
|
||||
/// Evaluation is known to be ambiguous - it *might* hold for some
|
||||
/// Evaluation is known to be ambiguous -- it *might* hold for some
|
||||
/// assignment of inference variables, but it might not.
|
||||
///
|
||||
/// While this has the same meaning as `EvaluatedToUnknown` - we can't
|
||||
/// know whether this obligation holds or not - it is the result we
|
||||
/// While this has the same meaning as `EvaluatedToUnknown` -- we can't
|
||||
/// know whether this obligation holds or not -- it is the result we
|
||||
/// would get with an empty stack, and therefore is cacheable.
|
||||
EvaluatedToAmbig,
|
||||
/// Evaluation failed because of recursion involving inference
|
||||
|
@ -404,10 +404,10 @@ pub enum EvaluationResult {
|
|||
/// We know this branch can't be a part of a minimal proof-tree for
|
||||
/// the "root" of our cycle, because then we could cut out the recursion
|
||||
/// and maintain a valid proof tree. However, this does not mean
|
||||
/// that all the obligations on this branch do not hold - it's possible
|
||||
/// that all the obligations on this branch do not hold -- it's possible
|
||||
/// that we entered this branch "speculatively", and that there
|
||||
/// might be some other way to prove this obligation that does not
|
||||
/// go through this cycle - so we can't cache this as a failure.
|
||||
/// go through this cycle -- so we can't cache this as a failure.
|
||||
///
|
||||
/// For example, suppose we have this:
|
||||
///
|
||||
|
@ -723,10 +723,10 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
|
|||
debug!("evaluate_predicate_recursively(previous_stack={:?}, obligation={:?})",
|
||||
previous_stack.head(), obligation);
|
||||
|
||||
// Previous_stack stores a TraitObligatiom, while 'obligation' is
|
||||
// a PredicateObligation. These are distinct types, so we can't
|
||||
// use any Option combinator method that would force them to be
|
||||
// the same
|
||||
// `previous_stack` stores a `TraitObligatiom`, while `obligation` is
|
||||
// a `PredicateObligation`. These are distinct types, so we can't
|
||||
// use any `Option` combinator method that would force them to be
|
||||
// the same.
|
||||
match previous_stack.head() {
|
||||
Some(h) => self.check_recursion_limit(&obligation, h.obligation)?,
|
||||
None => self.check_recursion_limit(&obligation, &obligation)?
|
||||
|
@ -740,7 +740,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
|
|||
}
|
||||
|
||||
ty::Predicate::Subtype(ref p) => {
|
||||
// does this code ever run?
|
||||
// Does this code ever run?
|
||||
match self.infcx
|
||||
.subtype_predicate(&obligation.cause, obligation.param_env, p)
|
||||
{
|
||||
|
@ -768,8 +768,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
|
|||
},
|
||||
|
||||
ty::Predicate::TypeOutlives(..) | ty::Predicate::RegionOutlives(..) => {
|
||||
// we do not consider region relationships when
|
||||
// evaluating trait matches
|
||||
// We do not consider region relationships when evaluating trait matches.
|
||||
Ok(EvaluatedToOkModuloRegions)
|
||||
}
|
||||
|
||||
|
@ -953,7 +952,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
|
|||
stack: &TraitObligationStack<'_, 'tcx>,
|
||||
) -> Option<EvaluationResult> {
|
||||
if let Some(cycle_depth) = stack.iter()
|
||||
.skip(1) // skip top-most frame
|
||||
.skip(1) // Skip top-most frame.
|
||||
.find(|prev| stack.obligation.param_env == prev.obligation.param_env &&
|
||||
stack.fresh_trait_ref == prev.fresh_trait_ref)
|
||||
.map(|stack| stack.depth)
|
||||
|
@ -1030,8 +1029,8 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
|
|||
.skip_binder()
|
||||
.input_types()
|
||||
.any(|ty| ty.is_fresh());
|
||||
// this check was an imperfect workaround for a bug n the old
|
||||
// intercrate mode, it should be removed when that goes away.
|
||||
// This check was an imperfect workaround for a bug in the old
|
||||
// intercrate mode; it should be removed when that goes away.
|
||||
if unbound_input_types && self.intercrate == Some(IntercrateMode::Issue43355) {
|
||||
debug!(
|
||||
"evaluate_stack({:?}) --> unbound argument, intercrate --> ambiguous",
|
||||
|
@ -1083,7 +1082,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
|
|||
}
|
||||
|
||||
/// For defaulted traits, we use a co-inductive strategy to solve, so
|
||||
/// that recursion is ok. This routine returns true if the top of the
|
||||
/// that recursion is ok. This routine returns `true` if the top of the
|
||||
/// stack (`cycle[0]`):
|
||||
///
|
||||
/// - is a defaulted trait,
|
||||
|
@ -1107,7 +1106,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
|
|||
result
|
||||
}
|
||||
|
||||
/// Further evaluate `candidate` to decide whether all type parameters match and whether nested
|
||||
/// Further evaluates `candidate` to decide whether all type parameters match and whether nested
|
||||
/// obligations are met. Returns whether `candidate` remains viable after this further
|
||||
/// scrutiny.
|
||||
fn evaluate_candidate<'o>(
|
||||
|
@ -1199,26 +1198,26 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
|
|||
.insert(trait_ref, WithDepNode::new(dep_node, result));
|
||||
}
|
||||
|
||||
// For various reasons, it's possible for a subobligation
|
||||
// to have a *lower* recursion_depth than the obligation used to create it.
|
||||
// Projection sub-obligations may be returned from the projection cache,
|
||||
// which results in obligations with an 'old' recursion_depth.
|
||||
// Additionally, methods like ty::wf::obligations and
|
||||
// InferCtxt.subtype_predicate produce subobligations without
|
||||
// taking in a 'parent' depth, causing the generated subobligations
|
||||
// to have a recursion_depth of 0
|
||||
//
|
||||
// To ensure that obligation_depth never decreasees, we force all subobligations
|
||||
// to have at least the depth of the original obligation.
|
||||
/// For various reasons, it's possible for a subobligation
|
||||
/// to have a *lower* recursion_depth than the obligation used to create it.
|
||||
/// Projection sub-obligations may be returned from the projection cache,
|
||||
/// which results in obligations with an 'old' `recursion_depth`.
|
||||
/// Additionally, methods like `ty::wf::obligations` and
|
||||
/// `InferCtxt.subtype_predicate` produce subobligations without
|
||||
/// taking in a 'parent' depth, causing the generated subobligations
|
||||
/// to have a `recursion_depth` of `0`.
|
||||
///
|
||||
/// To ensure that obligation_depth never decreasees, we force all subobligations
|
||||
/// to have at least the depth of the original obligation.
|
||||
fn add_depth<T: 'cx, I: Iterator<Item = &'cx mut Obligation<'tcx, T>>>(&self, it: I,
|
||||
min_depth: usize) {
|
||||
it.for_each(|o| o.recursion_depth = cmp::max(min_depth, o.recursion_depth) + 1);
|
||||
}
|
||||
|
||||
// Check that the recursion limit has not been exceeded.
|
||||
//
|
||||
// The weird return type of this function allows it to be used with the 'try' (?)
|
||||
// operator within certain functions
|
||||
/// Checks that the recursion limit has not been exceeded.
|
||||
///
|
||||
/// The weird return type of this function allows it to be used with the `try` (`?`)
|
||||
/// operator within certain functions.
|
||||
fn check_recursion_limit<T: Display + TypeFoldable<'tcx>, V: Display + TypeFoldable<'tcx>>(
|
||||
&self,
|
||||
obligation: &Obligation<'tcx, T>,
|
||||
|
@ -1256,7 +1255,6 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
|
|||
// not update) the cache.
|
||||
self.check_recursion_limit(&stack.obligation, &stack.obligation)?;
|
||||
|
||||
|
||||
// Check the cache. Note that we freshen the trait-ref
|
||||
// separately rather than using `stack.fresh_trait_ref` --
|
||||
// this is because we want the unbound variables to be
|
||||
|
@ -1436,10 +1434,10 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
|
|||
// candidate set is *individually* applicable. Now we have to
|
||||
// figure out if they contain mutual incompatibilities. This
|
||||
// frequently arises if we have an unconstrained input type --
|
||||
// for example, we are looking for $0:Eq where $0 is some
|
||||
// for example, we are looking for `$0: Eq` where `$0` is some
|
||||
// unconstrained type variable. In that case, we'll get a
|
||||
// candidate which assumes $0 == int, one that assumes $0 ==
|
||||
// usize, etc. This spells an ambiguity.
|
||||
// candidate which assumes $0 == int, one that assumes `$0 ==
|
||||
// usize`, etc. This spells an ambiguity.
|
||||
|
||||
// If there is more than one candidate, first winnow them down
|
||||
// by considering extra conditions (nested obligations and so
|
||||
|
@ -1453,8 +1451,8 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
|
|||
// and we were to see some code `foo.push_clone()` where `boo`
|
||||
// is a `Vec<Bar>` and `Bar` does not implement `Clone`. If
|
||||
// we were to winnow, we'd wind up with zero candidates.
|
||||
// Instead, we select the right impl now but report `Bar does
|
||||
// not implement Clone`.
|
||||
// Instead, we select the right impl now but report "`Bar` does
|
||||
// not implement `Clone`".
|
||||
if candidates.len() == 1 {
|
||||
return self.filter_negative_and_reservation_impls(candidates.pop().unwrap());
|
||||
}
|
||||
|
@ -1586,7 +1584,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
|
|||
// avoid us having to fear that coherence results "pollute"
|
||||
// the master cache. Since coherence executes pretty quickly,
|
||||
// it's not worth going to more trouble to increase the
|
||||
// hit-rate I don't think.
|
||||
// hit-rate, I don't think.
|
||||
if self.intercrate.is_some() {
|
||||
return false;
|
||||
}
|
||||
|
@ -1617,13 +1615,13 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
|
|||
}
|
||||
|
||||
/// Determines whether can we safely cache the result
|
||||
/// of selecting an obligation. This is almost always 'true',
|
||||
/// except when dealing with certain ParamCandidates.
|
||||
/// of selecting an obligation. This is almost always `true`,
|
||||
/// except when dealing with certain `ParamCandidate`s.
|
||||
///
|
||||
/// Ordinarily, a ParamCandidate will contain no inference variables,
|
||||
/// since it was usually produced directly from a DefId. However,
|
||||
/// Ordinarily, a `ParamCandidate` will contain no inference variables,
|
||||
/// since it was usually produced directly from a `DefId`. However,
|
||||
/// certain cases (currently only librustdoc's blanket impl finder),
|
||||
/// a ParamEnv may be explicitly constructed with inference types.
|
||||
/// a `ParamEnv` may be explicitly constructed with inference types.
|
||||
/// When this is the case, we do *not* want to cache the resulting selection
|
||||
/// candidate. This is due to the fact that it might not always be possible
|
||||
/// to equate the obligation's trait ref and the candidate's trait ref,
|
||||
|
@ -1631,7 +1629,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
|
|||
///
|
||||
/// Because of this, we always want to re-run the full selection
|
||||
/// process for our obligation the next time we see it, since
|
||||
/// we might end up picking a different SelectionCandidate (or none at all)
|
||||
/// we might end up picking a different `SelectionCandidate` (or none at all).
|
||||
fn can_cache_candidate(&self,
|
||||
result: &SelectionResult<'tcx, SelectionCandidate<'tcx>>
|
||||
) -> bool {
|
||||
|
@ -1662,15 +1660,14 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
|
|||
|
||||
if self.can_use_global_caches(param_env) {
|
||||
if let Err(Overflow) = candidate {
|
||||
// Don't cache overflow globally; we only produce this
|
||||
// in certain modes.
|
||||
// Don't cache overflow globally; we only produce this in certain modes.
|
||||
} else if !trait_ref.has_local_value() {
|
||||
if !candidate.has_local_value() {
|
||||
debug!(
|
||||
"insert_candidate_cache(trait_ref={:?}, candidate={:?}) global",
|
||||
trait_ref, candidate,
|
||||
);
|
||||
// This may overwrite the cache with the same value
|
||||
// This may overwrite the cache with the same value.
|
||||
tcx.selection_cache
|
||||
.hashmap
|
||||
.borrow_mut()
|
||||
|
@ -1755,7 +1752,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
|
|||
} else {
|
||||
if lang_items.clone_trait() == Some(def_id) {
|
||||
// Same builtin conditions as `Copy`, i.e., every type which has builtin support
|
||||
// for `Copy` also has builtin support for `Clone`, + tuples and arrays of `Clone`
|
||||
// for `Copy` also has builtin support for `Clone`, and tuples/arrays of `Clone`
|
||||
// types have builtin support for `Clone`.
|
||||
let clone_conditions = self.copy_clone_conditions(obligation);
|
||||
self.assemble_builtin_bound_candidates(clone_conditions, &mut candidates)?;
|
||||
|
@ -1786,7 +1783,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
|
|||
) {
|
||||
debug!("assemble_candidates_for_projected_tys({:?})", obligation);
|
||||
|
||||
// before we go into the whole placeholder thing, just
|
||||
// Before we go into the whole placeholder thing, just
|
||||
// quickly check if the self-type is a projection at all.
|
||||
match obligation.predicate.skip_binder().trait_ref.self_ty().kind {
|
||||
ty::Projection(_) | ty::Opaque(..) => {}
|
||||
|
@ -1907,10 +1904,10 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
|
|||
self.infcx.leak_check(false, placeholder_map, snapshot).is_ok()
|
||||
}
|
||||
|
||||
/// Given an obligation like `<SomeTrait for T>`, search the obligations that the caller
|
||||
/// Given an obligation like `<SomeTrait for T>`, searches the obligations that the caller
|
||||
/// supplied to find out whether it is listed among them.
|
||||
///
|
||||
/// Never affects inference environment.
|
||||
/// Never affects the inference environment.
|
||||
fn assemble_candidates_from_caller_bounds<'o>(
|
||||
&mut self,
|
||||
stack: &TraitObligationStack<'o, 'tcx>,
|
||||
|
@ -2052,7 +2049,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
/// Implement one of the `Fn()` family for a fn pointer.
|
||||
/// Implements one of the `Fn()` family for a fn pointer.
|
||||
fn assemble_fn_pointer_candidates(
|
||||
&mut self,
|
||||
obligation: &TraitObligation<'tcx>,
|
||||
|
@ -2067,14 +2064,14 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
|
|||
return Ok(());
|
||||
}
|
||||
|
||||
// Okay to skip binder because what we are inspecting doesn't involve bound regions
|
||||
// Okay to skip binder because what we are inspecting doesn't involve bound regions.
|
||||
let self_ty = *obligation.self_ty().skip_binder();
|
||||
match self_ty.kind {
|
||||
ty::Infer(ty::TyVar(_)) => {
|
||||
debug!("assemble_fn_pointer_candidates: ambiguous self-type");
|
||||
candidates.ambiguous = true; // could wind up being a fn() type
|
||||
candidates.ambiguous = true; // Could wind up being a fn() type.
|
||||
}
|
||||
// provide an impl, but only for suitable `fn` pointers
|
||||
// Provide an impl, but only for suitable `fn` pointers.
|
||||
ty::FnDef(..) | ty::FnPtr(_) => {
|
||||
if let ty::FnSig {
|
||||
unsafety: hir::Unsafety::Normal,
|
||||
|
@ -2092,7 +2089,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
/// Search for impls that might apply to `obligation`.
|
||||
/// Searches for impls that might apply to `obligation`.
|
||||
fn assemble_candidates_from_impls(
|
||||
&mut self,
|
||||
obligation: &TraitObligation<'tcx>,
|
||||
|
@ -2160,7 +2157,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
|
|||
// this path.
|
||||
}
|
||||
ty::Infer(ty::TyVar(_)) => {
|
||||
// the auto impl might apply, we don't know
|
||||
// The auto impl might apply; we don't know.
|
||||
candidates.ambiguous = true;
|
||||
}
|
||||
ty::Generator(_, _, movability)
|
||||
|
@ -2188,7 +2185,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
/// Search for impls that might apply to `obligation`.
|
||||
/// Searches for impls that might apply to `obligation`.
|
||||
fn assemble_candidates_from_object_ty(
|
||||
&mut self,
|
||||
obligation: &TraitObligation<'tcx>,
|
||||
|
@ -2226,7 +2223,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
|
|||
return;
|
||||
}
|
||||
} else {
|
||||
// Only auto-trait bounds exist.
|
||||
// Only auto trait bounds exist.
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
@ -2267,7 +2264,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
|
|||
})
|
||||
}
|
||||
|
||||
/// Search for unsizing that might apply to `obligation`.
|
||||
/// Searches for unsizing that might apply to `obligation`.
|
||||
fn assemble_candidates_for_unsizing(
|
||||
&mut self,
|
||||
obligation: &TraitObligation<'tcx>,
|
||||
|
@ -2311,7 +2308,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
|
|||
(&ty::Dynamic(ref data_a, ..), &ty::Dynamic(ref data_b, ..)) => {
|
||||
// Upcasts permit two things:
|
||||
//
|
||||
// 1. Dropping builtin bounds, e.g., `Foo+Send` to `Foo`
|
||||
// 1. Dropping auto traits, e.g., `Foo + Send` to `Foo`
|
||||
// 2. Tightening the region bound, e.g., `Foo + 'a` to `Foo + 'b` if `'a: 'b`
|
||||
//
|
||||
// Note that neither of these changes requires any
|
||||
|
@ -2326,11 +2323,11 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
|
|||
.all(|b| data_a.auto_traits().any(|a| a == b))
|
||||
}
|
||||
|
||||
// T -> Trait.
|
||||
// `T` -> `Trait`
|
||||
(_, &ty::Dynamic(..)) => true,
|
||||
|
||||
// Ambiguous handling is below T -> Trait, because inference
|
||||
// variables can still implement Unsize<Trait> and nested
|
||||
// Ambiguous handling is below `T` -> `Trait`, because inference
|
||||
// variables can still implement `Unsize<Trait>` and nested
|
||||
// obligations will have the final say (likely deferred).
|
||||
(&ty::Infer(ty::TyVar(_)), _) | (_, &ty::Infer(ty::TyVar(_))) => {
|
||||
debug!("assemble_candidates_for_unsizing: ambiguous");
|
||||
|
@ -2338,15 +2335,15 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
|
|||
false
|
||||
}
|
||||
|
||||
// [T; n] -> [T].
|
||||
// `[T; n]` -> `[T]`
|
||||
(&ty::Array(..), &ty::Slice(_)) => true,
|
||||
|
||||
// Struct<T> -> Struct<U>.
|
||||
// `Struct<T>` -> `Struct<U>`
|
||||
(&ty::Adt(def_id_a, _), &ty::Adt(def_id_b, _)) if def_id_a.is_struct() => {
|
||||
def_id_a == def_id_b
|
||||
}
|
||||
|
||||
// (.., T) -> (.., U).
|
||||
// `(.., T)` -> `(.., U)`
|
||||
(&ty::Tuple(tys_a), &ty::Tuple(tys_b)) => tys_a.len() == tys_b.len(),
|
||||
|
||||
_ => false,
|
||||
|
@ -2404,7 +2401,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
|
|||
|cand: &ty::PolyTraitRef<'_>| cand.is_global() && !cand.has_late_bound_regions();
|
||||
|
||||
match other.candidate {
|
||||
// Prefer BuiltinCandidate { has_nested: false } to anything else.
|
||||
// Prefer `BuiltinCandidate { has_nested: false }` to anything else.
|
||||
// This is a fix for #53123 and prevents winnowing from accidentally extending the
|
||||
// lifetime of a variable.
|
||||
BuiltinCandidate { has_nested: false } => true,
|
||||
|
@ -2415,7 +2412,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
|
|||
when there are other valid candidates"
|
||||
);
|
||||
}
|
||||
// Prefer BuiltinCandidate { has_nested: false } to anything else.
|
||||
// Prefer `BuiltinCandidate { has_nested: false }` to anything else.
|
||||
// This is a fix for #53123 and prevents winnowing from accidentally extending the
|
||||
// lifetime of a variable.
|
||||
BuiltinCandidate { has_nested: false } => false,
|
||||
|
@ -2446,7 +2443,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
|
|||
when there are other valid candidates"
|
||||
);
|
||||
}
|
||||
// Prefer BuiltinCandidate { has_nested: false } to anything else.
|
||||
// Prefer `BuiltinCandidate { has_nested: false }` to anything else.
|
||||
// This is a fix for #53123 and prevents winnowing from accidentally extending the
|
||||
// lifetime of a variable.
|
||||
BuiltinCandidate { has_nested: false } => false,
|
||||
|
@ -2468,7 +2465,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
|
|||
ImplCandidate(other_def) => {
|
||||
// See if we can toss out `victim` based on specialization.
|
||||
// This requires us to know *for sure* that the `other` impl applies
|
||||
// i.e., EvaluatedToOk:
|
||||
// i.e., `EvaluatedToOk`.
|
||||
if other.evaluation.must_apply_modulo_regions() {
|
||||
match victim.candidate {
|
||||
ImplCandidate(victim_def) => {
|
||||
|
@ -2496,7 +2493,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
|
|||
match victim.candidate {
|
||||
ParamCandidate(ref cand) => {
|
||||
// Prefer these to a global where-clause bound
|
||||
// (see issue #50825)
|
||||
// (see issue #50825).
|
||||
is_global(cand) && other.evaluation.must_apply_modulo_regions()
|
||||
}
|
||||
_ => false,
|
||||
|
@ -2754,7 +2751,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
|
|||
types.skip_binder().to_vec()
|
||||
}
|
||||
|
||||
// for `PhantomData<T>`, we pass `T`
|
||||
// For `PhantomData<T>`, we pass `T`.
|
||||
ty::Adt(def, substs) if def.is_phantom_data() => substs.types().collect(),
|
||||
|
||||
ty::Adt(def, substs) => def.all_fields().map(|f| f.ty(self.tcx(), substs)).collect(),
|
||||
|
@ -2894,11 +2891,9 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
|
|||
}
|
||||
|
||||
BuiltinObjectCandidate => {
|
||||
// This indicates something like `(Trait+Send) :
|
||||
// Send`. In this case, we know that this holds
|
||||
// because that's what the object type is telling us,
|
||||
// and there's really no additional obligations to
|
||||
// prove and no types in particular to unify etc.
|
||||
// This indicates something like `Trait + Send: Send`. In this case, we know that
|
||||
// this holds because that's what the object type is telling us, and there's really
|
||||
// no additional obligations to prove and no types in particular to unify, etc.
|
||||
Ok(VtableParam(Vec::new()))
|
||||
}
|
||||
|
||||
|
@ -3152,7 +3147,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
|
|||
// We want to find the first supertrait in the list of
|
||||
// supertraits that we can unify with, and do that
|
||||
// unification. We know that there is exactly one in the list
|
||||
// where we can unify because otherwise select would have
|
||||
// where we can unify, because otherwise select would have
|
||||
// reported an ambiguity. (When we do find a match, also
|
||||
// record it for later.)
|
||||
let nonmatching = util::supertraits(tcx, poly_trait_ref).take_while(
|
||||
|
@ -3166,7 +3161,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
|
|||
},
|
||||
);
|
||||
|
||||
// Additionally, for each of the nonmatching predicates that
|
||||
// Additionally, for each of the non-matching predicates that
|
||||
// we pass over, we sum up the set of number of vtable
|
||||
// entries, so that we can compute the offset for the selected
|
||||
// trait.
|
||||
|
@ -3354,7 +3349,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
|
|||
trait_ref,
|
||||
)?);
|
||||
|
||||
// FIXME: chalk
|
||||
// FIXME: Chalk
|
||||
|
||||
if !self.tcx().sess.opts.debugging_opts.chalk {
|
||||
obligations.push(Obligation::new(
|
||||
|
@ -3421,7 +3416,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
|
|||
) -> Result<VtableBuiltinData<PredicateObligation<'tcx>>, SelectionError<'tcx>> {
|
||||
let tcx = self.tcx();
|
||||
|
||||
// assemble_candidates_for_unsizing should ensure there are no late bound
|
||||
// `assemble_candidates_for_unsizing` should ensure there are no late-bound
|
||||
// regions here. See the comment there for more details.
|
||||
let source = self.infcx
|
||||
.shallow_resolve(obligation.self_ty().no_bound_vars().unwrap());
|
||||
|
@ -3442,7 +3437,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
|
|||
match (&source.kind, &target.kind) {
|
||||
// Trait+Kx+'a -> Trait+Ky+'b (upcasts).
|
||||
(&ty::Dynamic(ref data_a, r_a), &ty::Dynamic(ref data_b, r_b)) => {
|
||||
// See assemble_candidates_for_unsizing for more info.
|
||||
// See `assemble_candidates_for_unsizing` for more info.
|
||||
let existential_predicates = data_a.map_bound(|data_a| {
|
||||
let iter =
|
||||
data_a.principal().map(|x| ty::ExistentialPredicate::Trait(x))
|
||||
|
@ -3463,20 +3458,19 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
|
|||
// Require that the traits involved in this upcast are **equal**;
|
||||
// only the **lifetime bound** is changed.
|
||||
//
|
||||
// FIXME: This condition is arguably too strong -- it
|
||||
// would suffice for the source trait to be a
|
||||
// *subtype* of the target trait. In particular
|
||||
// changing from something like `for<'a, 'b> Foo<'a,
|
||||
// 'b>` to `for<'a> Foo<'a, 'a>` should be
|
||||
// FIXME: This condition is arguably too strong -- it would
|
||||
// suffice for the source trait to be a *subtype* of the target
|
||||
// trait. In particular, changing from something like
|
||||
// `for<'a, 'b> Foo<'a, 'b>` to `for<'a> Foo<'a, 'a>` should be
|
||||
// permitted. And, indeed, in the in commit
|
||||
// 904a0bde93f0348f69914ee90b1f8b6e4e0d7cbc, this
|
||||
// condition was loosened. However, when the leak check was added
|
||||
// back, using subtype here actually guies the coercion code in
|
||||
// such a way that it accepts `old-lub-glb-object.rs`. This is probably
|
||||
// a good thing, but I've modified this to `.eq` because I want
|
||||
// to continue rejecting that test (as we have done for quite some time)
|
||||
// before we are firmly comfortable with what our behavior
|
||||
// should be there. -nikomatsakis
|
||||
// condition was loosened. However, when the leak check was
|
||||
// added back, using subtype here actually guides the coercion
|
||||
// code in such a way that it accepts `old-lub-glb-object.rs`.
|
||||
// This is probably a good thing, but I've modified this to `.eq`
|
||||
// because I want to continue rejecting that test (as we have
|
||||
// done for quite some time) before we are firmly comfortable
|
||||
// with what our behavior should be there. -nikomatsakis
|
||||
let InferOk { obligations, .. } = self.infcx
|
||||
.at(&obligation.cause, obligation.param_env)
|
||||
.eq(target, source_trait) // FIXME -- see below
|
||||
|
@ -3498,7 +3492,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
|
|||
));
|
||||
}
|
||||
|
||||
// T -> Trait.
|
||||
// `T` -> `Trait`
|
||||
(_, &ty::Dynamic(ref data, r)) => {
|
||||
let mut object_dids = data.auto_traits()
|
||||
.chain(data.principal_def_id());
|
||||
|
@ -3522,24 +3516,26 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
|
|||
};
|
||||
|
||||
// Create obligations:
|
||||
// - Casting T to Trait
|
||||
// - Casting `T` to `Trait`
|
||||
// - For all the various builtin bounds attached to the object cast. (In other
|
||||
// words, if the object type is Foo+Send, this would create an obligation for the
|
||||
// Send check.)
|
||||
// words, if the object type is `Foo + Send`, this would create an obligation for
|
||||
// the `Send` check.)
|
||||
// - Projection predicates
|
||||
nested.extend(
|
||||
data.iter()
|
||||
.map(|d| predicate_to_obligation(d.with_self_ty(tcx, source))),
|
||||
.map(|predicate|
|
||||
predicate_to_obligation(predicate.with_self_ty(tcx, source))
|
||||
),
|
||||
);
|
||||
|
||||
// We can only make objects from sized types.
|
||||
let tr = ty::TraitRef {
|
||||
def_id: tcx.require_lang_item(lang_items::SizedTraitLangItem, None),
|
||||
substs: tcx.mk_substs_trait(source, &[]),
|
||||
};
|
||||
let tr = ty::TraitRef::new(
|
||||
tcx.require_lang_item(lang_items::SizedTraitLangItem, None),
|
||||
tcx.mk_substs_trait(source, &[]),
|
||||
);
|
||||
nested.push(predicate_to_obligation(tr.to_predicate()));
|
||||
|
||||
// If the type is `Foo+'a`, ensures that the type
|
||||
// If the type is `Foo + 'a`, ensure that the type
|
||||
// being cast to `Foo + 'a` outlives `'a`:
|
||||
let outlives = ty::OutlivesPredicate(source, r);
|
||||
nested.push(predicate_to_obligation(
|
||||
|
@ -3547,7 +3543,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
|
|||
));
|
||||
}
|
||||
|
||||
// [T; n] -> [T].
|
||||
// `[T; n]` -> `[T]`
|
||||
(&ty::Array(a, _), &ty::Slice(b)) => {
|
||||
let InferOk { obligations, .. } = self.infcx
|
||||
.at(&obligation.cause, obligation.param_env)
|
||||
|
@ -3556,10 +3552,10 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
|
|||
nested.extend(obligations);
|
||||
}
|
||||
|
||||
// Struct<T> -> Struct<U>.
|
||||
// `Struct<T>` -> `Struct<U>`
|
||||
(&ty::Adt(def, substs_a), &ty::Adt(_, substs_b)) => {
|
||||
let fields = def.all_fields()
|
||||
.map(|f| tcx.type_of(f.did))
|
||||
.map(|field| tcx.type_of(field.did))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
// The last field of the structure has to exist and contain type parameters.
|
||||
|
@ -3598,7 +3594,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
|
|||
}
|
||||
}
|
||||
|
||||
// Extract Field<T> and Field<U> from Struct<T> and Struct<U>.
|
||||
// Extract `Field<T>` and `Field<U>` from `Struct<T>` and `Struct<U>`.
|
||||
let inner_source = field.subst(tcx, substs_a);
|
||||
let inner_target = field.subst(tcx, substs_b);
|
||||
|
||||
|
@ -3618,7 +3614,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
|
|||
.map_err(|_| Unimplemented)?;
|
||||
nested.extend(obligations);
|
||||
|
||||
// Construct the nested Field<T>: Unsize<Field<U>> predicate.
|
||||
// Construct the nested `Field<T>: Unsize<Field<U>>` predicate.
|
||||
nested.push(tcx.predicate_for_trait_def(
|
||||
obligation.param_env,
|
||||
obligation.cause.clone(),
|
||||
|
@ -3629,7 +3625,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
|
|||
));
|
||||
}
|
||||
|
||||
// (.., T) -> (.., U).
|
||||
// `(.., T)` -> `(.., U)`
|
||||
(&ty::Tuple(tys_a), &ty::Tuple(tys_b)) => {
|
||||
assert_eq!(tys_a.len(), tys_b.len());
|
||||
|
||||
|
@ -3652,7 +3648,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
|
|||
.map_err(|_| Unimplemented)?;
|
||||
nested.extend(obligations);
|
||||
|
||||
// Construct the nested T: Unsize<U> predicate.
|
||||
// Construct the nested `T: Unsize<U>` predicate.
|
||||
nested.push(tcx.predicate_for_trait_def(
|
||||
obligation.param_env,
|
||||
obligation.cause.clone(),
|
||||
|
@ -3969,7 +3965,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
|
|||
//
|
||||
// This code is hot enough that it's worth avoiding the allocation
|
||||
// required for the FxHashSet when possible. Special-casing lengths 0,
|
||||
// 1 and 2 covers roughly 75--80% of the cases.
|
||||
// 1 and 2 covers roughly 75-80% of the cases.
|
||||
if predicates.len() <= 1 {
|
||||
// No possibility of duplicates.
|
||||
} else if predicates.len() == 2 {
|
||||
|
|
|
@ -80,7 +80,7 @@ impl<T: AsRef<ty::Predicate<'tcx>>> Extend<T> for PredicateSet<'tcx> {
|
|||
///////////////////////////////////////////////////////////////////////////
|
||||
|
||||
/// "Elaboration" is the process of identifying all the predicates that
|
||||
/// are implied by a source predicate. Currently this basically means
|
||||
/// are implied by a source predicate. Currently, this basically means
|
||||
/// walking the "supertraits" and other similar assumptions. For example,
|
||||
/// if we know that `T: Ord`, the elaborator would deduce that `T: PartialOrd`
|
||||
/// holds as well. Similarly, if we have `trait Foo: 'static`, and we know that
|
||||
|
|
|
@ -1,14 +1,15 @@
|
|||
use crate::hir;
|
||||
use crate::hir::def_id::DefId;
|
||||
use crate::ty::{self, BoundRegion, Region, Ty, TyCtxt};
|
||||
use std::borrow::Cow;
|
||||
use std::fmt;
|
||||
|
||||
use errors::{Applicability, DiagnosticBuilder};
|
||||
use rustc_target::spec::abi;
|
||||
use syntax::ast;
|
||||
use syntax::errors::pluralize;
|
||||
use errors::{Applicability, DiagnosticBuilder};
|
||||
use syntax_pos::Span;
|
||||
|
||||
use crate::hir;
|
||||
use std::borrow::Cow;
|
||||
use std::fmt;
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, TypeFoldable)]
|
||||
pub struct ExpectedFound<T> {
|
||||
|
|
|
@ -36,10 +36,10 @@ pub enum InstanceDef<'tcx> {
|
|||
ReifyShim(DefId),
|
||||
|
||||
/// `<fn() as FnTrait>::call_*`
|
||||
/// `DefId` is `FnTrait::call_*`
|
||||
/// `DefId` is `FnTrait::call_*`.
|
||||
FnPtrShim(DefId, Ty<'tcx>),
|
||||
|
||||
/// `<Trait as Trait>::fn`
|
||||
/// `<dyn Trait as Trait>::fn`
|
||||
Virtual(DefId, usize),
|
||||
|
||||
/// `<[mut closure] as FnOnce>::call_once`
|
||||
|
@ -115,7 +115,7 @@ impl<'tcx> Instance<'tcx> {
|
|||
pub fn fn_sig(&self, tcx: TyCtxt<'tcx>) -> ty::PolyFnSig<'tcx> {
|
||||
let mut fn_sig = self.fn_sig_noadjust(tcx);
|
||||
if let InstanceDef::VtableShim(..) = self.def {
|
||||
// Modify fn(self, ...) to fn(self: *mut Self, ...)
|
||||
// Modify `fn(self, ...)` to `fn(self: *mut Self, ...)`.
|
||||
fn_sig = fn_sig.map_bound(|mut fn_sig| {
|
||||
let mut inputs_and_output = fn_sig.inputs_and_output.to_vec();
|
||||
inputs_and_output[0] = tcx.mk_mut_ptr(inputs_and_output[0]);
|
||||
|
|
|
@ -2103,8 +2103,8 @@ where
|
|||
ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
|
||||
assert!(i < this.fields.count());
|
||||
|
||||
// Reuse the fat *T type as its own thin pointer data field.
|
||||
// This provides information about e.g., DST struct pointees
|
||||
// Reuse the fat `*T` type as its own thin pointer data field.
|
||||
// This provides information about, e.g., DST struct pointees
|
||||
// (which may have no non-DST form), and will work as long
|
||||
// as the `Abi` or `FieldPlacement` is checked by users.
|
||||
if i == 0 {
|
||||
|
|
|
@ -1923,17 +1923,17 @@ pub struct FieldDef {
|
|||
///
|
||||
/// These are all interned (by `intern_adt_def`) into the `adt_defs` table.
|
||||
///
|
||||
/// The initialism *"Adt"* stands for an [*algebraic data type (ADT)*][adt].
|
||||
/// The initialism *ADT* stands for an [*algebraic data type (ADT)*][adt].
|
||||
/// This is slightly wrong because `union`s are not ADTs.
|
||||
/// Moreover, Rust only allows recursive data types through indirection.
|
||||
///
|
||||
/// [adt]: https://en.wikipedia.org/wiki/Algebraic_data_type
|
||||
pub struct AdtDef {
|
||||
/// `DefId` of the struct, enum or union item.
|
||||
/// The `DefId` of the struct, enum or union item.
|
||||
pub did: DefId,
|
||||
/// Variants of the ADT. If this is a struct or union, then there will be a single variant.
|
||||
pub variants: IndexVec<self::layout::VariantIdx, VariantDef>,
|
||||
/// Flags of the ADT (e.g. is this a struct? is this non-exhaustive?)
|
||||
/// Flags of the ADT (e.g., is this a struct? is this non-exhaustive?).
|
||||
flags: AdtFlags,
|
||||
/// Repr options provided by the user.
|
||||
pub repr: ReprOptions,
|
||||
|
@ -1954,7 +1954,7 @@ impl Ord for AdtDef {
|
|||
}
|
||||
|
||||
impl PartialEq for AdtDef {
|
||||
// AdtDef are always interned and this is part of TyS equality
|
||||
// `AdtDef`s are always interned, and this is part of `TyS` equality.
|
||||
#[inline]
|
||||
fn eq(&self, other: &Self) -> bool { ptr::eq(self, other) }
|
||||
}
|
||||
|
@ -1976,7 +1976,6 @@ impl<'tcx> rustc_serialize::UseSpecializedEncodable for &'tcx AdtDef {
|
|||
|
||||
impl<'tcx> rustc_serialize::UseSpecializedDecodable for &'tcx AdtDef {}
|
||||
|
||||
|
||||
impl<'a> HashStable<StableHashingContext<'a>> for AdtDef {
|
||||
fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
|
||||
thread_local! {
|
||||
|
|
|
@ -281,7 +281,7 @@ impl<'tcx> Relate<'tcx> for ty::TraitRef<'tcx> {
|
|||
a: &ty::TraitRef<'tcx>,
|
||||
b: &ty::TraitRef<'tcx>,
|
||||
) -> RelateResult<'tcx, ty::TraitRef<'tcx>> {
|
||||
// Different traits cannot be related
|
||||
// Different traits cannot be related.
|
||||
if a.def_id != b.def_id {
|
||||
Err(TypeError::Traits(expected_found(relation, &a.def_id, &b.def_id)))
|
||||
} else {
|
||||
|
@ -297,7 +297,7 @@ impl<'tcx> Relate<'tcx> for ty::ExistentialTraitRef<'tcx> {
|
|||
a: &ty::ExistentialTraitRef<'tcx>,
|
||||
b: &ty::ExistentialTraitRef<'tcx>,
|
||||
) -> RelateResult<'tcx, ty::ExistentialTraitRef<'tcx>> {
|
||||
// Different traits cannot be related
|
||||
// Different traits cannot be related.
|
||||
if a.def_id != b.def_id {
|
||||
Err(TypeError::Traits(expected_found(relation, &a.def_id, &b.def_id)))
|
||||
} else {
|
||||
|
|
|
@ -4,12 +4,13 @@
|
|||
|
||||
use crate::hir::def::Namespace;
|
||||
use crate::mir::ProjectionKind;
|
||||
use crate::mir::interpret;
|
||||
use crate::ty::{self, Lift, Ty, TyCtxt, InferConst};
|
||||
use crate::ty::fold::{TypeFoldable, TypeFolder, TypeVisitor};
|
||||
use crate::ty::print::{FmtPrinter, Printer};
|
||||
|
||||
use rustc_index::vec::{IndexVec, Idx};
|
||||
use smallvec::SmallVec;
|
||||
use crate::mir::interpret;
|
||||
|
||||
use std::fmt;
|
||||
use std::rc::Rc;
|
||||
|
|
|
@ -2,14 +2,14 @@
|
|||
|
||||
#![allow(rustc::usage_of_ty_tykind)]
|
||||
|
||||
use self::InferTy::*;
|
||||
use self::TyKind::*;
|
||||
|
||||
use crate::hir;
|
||||
use crate::hir::def_id::DefId;
|
||||
use crate::infer::canonical::Canonical;
|
||||
use crate::mir::interpret::ConstValue;
|
||||
use crate::middle::region;
|
||||
use polonius_engine::Atom;
|
||||
use rustc_index::vec::Idx;
|
||||
use rustc_macros::HashStable;
|
||||
use crate::ty::subst::{InternalSubsts, Subst, SubstsRef, GenericArg, GenericArgKind};
|
||||
use crate::ty::{self, AdtDef, Discr, DefIdTree, TypeFlags, Ty, TyCtxt, TypeFoldable};
|
||||
use crate::ty::{List, TyS, ParamEnvAnd, ParamEnv};
|
||||
|
@ -17,27 +17,30 @@ use crate::ty::layout::VariantIdx;
|
|||
use crate::util::captures::Captures;
|
||||
use crate::mir::interpret::{Scalar, GlobalId};
|
||||
|
||||
use polonius_engine::Atom;
|
||||
use rustc_index::vec::Idx;
|
||||
use rustc_macros::HashStable;
|
||||
use rustc_target::spec::abi;
|
||||
use smallvec::SmallVec;
|
||||
use std::borrow::Cow;
|
||||
use std::cmp::Ordering;
|
||||
use std::marker::PhantomData;
|
||||
use std::ops::Range;
|
||||
use rustc_target::spec::abi;
|
||||
use syntax::ast::{self, Ident};
|
||||
use syntax::symbol::{kw, Symbol};
|
||||
|
||||
use self::InferTy::*;
|
||||
use self::TyKind::*;
|
||||
|
||||
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, RustcEncodable, RustcDecodable)]
|
||||
#[derive(HashStable, TypeFoldable, Lift)]
|
||||
#[derive(
|
||||
Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, RustcEncodable, RustcDecodable,
|
||||
HashStable, TypeFoldable, Lift,
|
||||
)]
|
||||
pub struct TypeAndMut<'tcx> {
|
||||
pub ty: Ty<'tcx>,
|
||||
pub mutbl: hir::Mutability,
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq, PartialOrd, Eq, Ord, Hash,
|
||||
RustcEncodable, RustcDecodable, Copy, HashStable)]
|
||||
#[derive(
|
||||
Clone, PartialEq, PartialOrd, Eq, Ord, Hash, RustcEncodable, RustcDecodable, Copy, HashStable,
|
||||
)]
|
||||
/// A "free" region `fr` can be interpreted as "some region
|
||||
/// at least as big as the scope `fr.scope`".
|
||||
pub struct FreeRegion {
|
||||
|
@ -45,8 +48,9 @@ pub struct FreeRegion {
|
|||
pub bound_region: BoundRegion,
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq, PartialOrd, Eq, Ord, Hash,
|
||||
RustcEncodable, RustcDecodable, Copy, HashStable)]
|
||||
#[derive(
|
||||
Clone, PartialEq, PartialOrd, Eq, Ord, Hash, RustcEncodable, RustcDecodable, Copy, HashStable,
|
||||
)]
|
||||
pub enum BoundRegion {
|
||||
/// An anonymous region parameter for a given fn (&T)
|
||||
BrAnon(u32),
|
||||
|
@ -471,18 +475,18 @@ impl<'tcx> GeneratorSubsts<'tcx> {
|
|||
}
|
||||
|
||||
impl<'tcx> GeneratorSubsts<'tcx> {
|
||||
/// Generator have not been resumed yet
|
||||
/// Generator has not been resumed yet.
|
||||
pub const UNRESUMED: usize = 0;
|
||||
/// Generator has returned / is completed
|
||||
/// Generator has returned or is completed.
|
||||
pub const RETURNED: usize = 1;
|
||||
/// Generator has been poisoned
|
||||
/// Generator has been poisoned.
|
||||
pub const POISONED: usize = 2;
|
||||
|
||||
const UNRESUMED_NAME: &'static str = "Unresumed";
|
||||
const RETURNED_NAME: &'static str = "Returned";
|
||||
const POISONED_NAME: &'static str = "Panicked";
|
||||
|
||||
/// The valid variant indices of this Generator.
|
||||
/// The valid variant indices of this generator.
|
||||
#[inline]
|
||||
pub fn variant_range(&self, def_id: DefId, tcx: TyCtxt<'tcx>) -> Range<VariantIdx> {
|
||||
// FIXME requires optimized MIR
|
||||
|
@ -490,7 +494,7 @@ impl<'tcx> GeneratorSubsts<'tcx> {
|
|||
(VariantIdx::new(0)..VariantIdx::new(num_variants))
|
||||
}
|
||||
|
||||
/// The discriminant for the given variant. Panics if the variant_index is
|
||||
/// The discriminant for the given variant. Panics if the `variant_index` is
|
||||
/// out of range.
|
||||
#[inline]
|
||||
pub fn discriminant_for_variant(
|
||||
|
@ -505,7 +509,7 @@ impl<'tcx> GeneratorSubsts<'tcx> {
|
|||
Discr { val: variant_index.as_usize() as u128, ty: self.discr_ty(tcx) }
|
||||
}
|
||||
|
||||
/// The set of all discriminants for the Generator, enumerated with their
|
||||
/// The set of all discriminants for the generator, enumerated with their
|
||||
/// variant indices.
|
||||
#[inline]
|
||||
pub fn discriminants(
|
||||
|
@ -670,12 +674,12 @@ impl<'tcx> List<ExistentialPredicate<'tcx>> {
|
|||
pub fn principal(&self) -> Option<ExistentialTraitRef<'tcx>> {
|
||||
match self[0] {
|
||||
ExistentialPredicate::Trait(tr) => Some(tr),
|
||||
_ => None
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn principal_def_id(&self) -> Option<DefId> {
|
||||
self.principal().map(|d| d.def_id)
|
||||
self.principal().map(|trait_ref| trait_ref.def_id)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
|
@ -684,7 +688,7 @@ impl<'tcx> List<ExistentialPredicate<'tcx>> {
|
|||
{
|
||||
self.iter().filter_map(|predicate| {
|
||||
match *predicate {
|
||||
ExistentialPredicate::Projection(p) => Some(p),
|
||||
ExistentialPredicate::Projection(projection) => Some(projection),
|
||||
_ => None,
|
||||
}
|
||||
})
|
||||
|
@ -694,8 +698,8 @@ impl<'tcx> List<ExistentialPredicate<'tcx>> {
|
|||
pub fn auto_traits<'a>(&'a self) -> impl Iterator<Item = DefId> + 'a {
|
||||
self.iter().filter_map(|predicate| {
|
||||
match *predicate {
|
||||
ExistentialPredicate::AutoTrait(d) => Some(d),
|
||||
_ => None
|
||||
ExistentialPredicate::AutoTrait(did) => Some(did),
|
||||
_ => None,
|
||||
}
|
||||
})
|
||||
}
|
||||
|
@ -722,7 +726,8 @@ impl<'tcx> Binder<&'tcx List<ExistentialPredicate<'tcx>>> {
|
|||
}
|
||||
|
||||
pub fn iter<'a>(&'a self)
|
||||
-> impl DoubleEndedIterator<Item = Binder<ExistentialPredicate<'tcx>>> + 'tcx {
|
||||
-> impl DoubleEndedIterator<Item = Binder<ExistentialPredicate<'tcx>>> + 'tcx
|
||||
{
|
||||
self.skip_binder().iter().cloned().map(Binder::bind)
|
||||
}
|
||||
}
|
||||
|
@ -751,7 +756,7 @@ pub struct TraitRef<'tcx> {
|
|||
|
||||
impl<'tcx> TraitRef<'tcx> {
|
||||
pub fn new(def_id: DefId, substs: SubstsRef<'tcx>) -> TraitRef<'tcx> {
|
||||
TraitRef { def_id: def_id, substs: substs }
|
||||
TraitRef { def_id, substs }
|
||||
}
|
||||
|
||||
/// Returns a `TraitRef` of the form `P0: Foo<P1..Pn>` where `Pi`
|
||||
|
@ -1296,7 +1301,7 @@ pub enum RegionKind {
|
|||
/// A region variable. Should not exist after typeck.
|
||||
ReVar(RegionVid),
|
||||
|
||||
/// A placeholder region - basically the higher-ranked version of ReFree.
|
||||
/// A placeholder region -- basically, the higher-ranked version of `ReFree`.
|
||||
/// Should not exist after typeck.
|
||||
RePlaceholder(ty::PlaceholderRegion),
|
||||
|
||||
|
@ -1807,14 +1812,14 @@ impl<'tcx> TyS<'tcx> {
|
|||
match self.kind {
|
||||
Array(ty, _) | Slice(ty) => ty,
|
||||
Str => tcx.mk_mach_uint(ast::UintTy::U8),
|
||||
_ => bug!("sequence_element_type called on non-sequence value: {}", self),
|
||||
_ => bug!("`sequence_element_type` called on non-sequence value: {}", self),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn simd_type(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
|
||||
match self.kind {
|
||||
Adt(def, substs) => def.non_enum_variant().fields[0].ty(tcx, substs),
|
||||
_ => bug!("simd_type called on invalid type")
|
||||
_ => bug!("`simd_type` called on invalid type"),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1823,7 +1828,7 @@ impl<'tcx> TyS<'tcx> {
|
|||
// allow `#[repr(simd)] struct Simd<T, const N: usize>([T; N]);`.
|
||||
match self.kind {
|
||||
Adt(def, _) => def.non_enum_variant().fields.len() as u64,
|
||||
_ => bug!("simd_size called on invalid type")
|
||||
_ => bug!("`simd_size` called on invalid type"),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1833,7 +1838,7 @@ impl<'tcx> TyS<'tcx> {
|
|||
let variant = def.non_enum_variant();
|
||||
(variant.fields.len() as u64, variant.fields[0].ty(tcx, substs))
|
||||
}
|
||||
_ => bug!("simd_size_and_type called on invalid type")
|
||||
_ => bug!("`simd_size_and_type` called on invalid type"),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1894,7 +1899,7 @@ impl<'tcx> TyS<'tcx> {
|
|||
}
|
||||
}
|
||||
|
||||
/// panics if called on any type other than `Box<T>`
|
||||
/// Panics if called on any type other than `Box<T>`.
|
||||
pub fn boxed_ty(&self) -> Ty<'tcx> {
|
||||
match self.kind {
|
||||
Adt(def, substs) if def.is_box() => substs.type_at(0),
|
||||
|
@ -2114,7 +2119,8 @@ impl<'tcx> TyS<'tcx> {
|
|||
}
|
||||
|
||||
/// If the type contains variants, returns the valid range of variant indices.
|
||||
/// FIXME This requires the optimized MIR in the case of generators.
|
||||
//
|
||||
// FIXME: This requires the optimized MIR in the case of generators.
|
||||
#[inline]
|
||||
pub fn variant_range(&self, tcx: TyCtxt<'tcx>) -> Option<Range<VariantIdx>> {
|
||||
match self.kind {
|
||||
|
@ -2127,7 +2133,8 @@ impl<'tcx> TyS<'tcx> {
|
|||
|
||||
/// If the type contains variants, returns the variant for `variant_index`.
|
||||
/// Panics if `variant_index` is out of range.
|
||||
/// FIXME This requires the optimized MIR in the case of generators.
|
||||
//
|
||||
// FIXME: This requires the optimized MIR in the case of generators.
|
||||
#[inline]
|
||||
pub fn discriminant_for_variant(
|
||||
&self,
|
||||
|
@ -2142,7 +2149,7 @@ impl<'tcx> TyS<'tcx> {
|
|||
}
|
||||
}
|
||||
|
||||
/// Push onto `out` the regions directly referenced from this type (but not
|
||||
/// Pushes onto `out` the regions directly referenced from this type (but not
|
||||
/// types reachable from this type via `walk_tys`). This ignores late-bound
|
||||
/// regions binders.
|
||||
pub fn push_regions(&self, out: &mut SmallVec<[ty::Region<'tcx>; 4]>) {
|
||||
|
@ -2255,7 +2262,7 @@ impl<'tcx> TyS<'tcx> {
|
|||
ty::Infer(ty::FreshTy(_)) |
|
||||
ty::Infer(ty::FreshIntTy(_)) |
|
||||
ty::Infer(ty::FreshFloatTy(_)) =>
|
||||
bug!("is_trivially_sized applied to unexpected type: {:?}", self),
|
||||
bug!("`is_trivially_sized` applied to unexpected type: {:?}", self),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -333,14 +333,14 @@ impl<'tcx> TyCtxt<'tcx> {
|
|||
ty
|
||||
}
|
||||
|
||||
/// Same as applying struct_tail on `source` and `target`, but only
|
||||
/// Same as applying `struct_tail` on `source` and `target`, but only
|
||||
/// keeps going as long as the two types are instances of the same
|
||||
/// structure definitions.
|
||||
/// For `(Foo<Foo<T>>, Foo<dyn Trait>)`, the result will be `(Foo<T>, Trait)`,
|
||||
/// whereas struct_tail produces `T`, and `Trait`, respectively.
|
||||
///
|
||||
/// Should only be called if the types have no inference variables and do
|
||||
/// not need their lifetimes preserved (e.g. as part of codegen); otherwise
|
||||
/// not need their lifetimes preserved (e.g., as part of codegen); otherwise,
|
||||
/// normalization attempt may cause compiler bugs.
|
||||
pub fn struct_lockstep_tails_erasing_lifetimes(self,
|
||||
source: Ty<'tcx>,
|
||||
|
@ -353,7 +353,7 @@ impl<'tcx> TyCtxt<'tcx> {
|
|||
source, target, |ty| tcx.normalize_erasing_regions(param_env, ty))
|
||||
}
|
||||
|
||||
/// Same as applying struct_tail on `source` and `target`, but only
|
||||
/// Same as applying `struct_tail` on `source` and `target`, but only
|
||||
/// keeps going as long as the two types are instances of the same
|
||||
/// structure definitions.
|
||||
/// For `(Foo<Foo<T>>, Foo<dyn Trait>)`, the result will be `(Foo<T>, Trait)`,
|
||||
|
|
|
@ -1,16 +1,15 @@
|
|||
use crate::llvm::{self, AttributePlace};
|
||||
use crate::builder::Builder;
|
||||
use crate::context::CodegenCx;
|
||||
use crate::llvm::{self, AttributePlace};
|
||||
use crate::type_::Type;
|
||||
use crate::type_of::LayoutLlvmExt;
|
||||
use crate::value::Value;
|
||||
use crate::type_of::{LayoutLlvmExt};
|
||||
|
||||
use rustc_codegen_ssa::MemFlags;
|
||||
use rustc_codegen_ssa::mir::place::PlaceRef;
|
||||
use rustc_codegen_ssa::mir::operand::OperandValue;
|
||||
use rustc_target::abi::call::ArgAbi;
|
||||
|
||||
use rustc_codegen_ssa::traits::*;
|
||||
|
||||
use rustc_target::abi::call::ArgAbi;
|
||||
use rustc_target::abi::{HasDataLayout, LayoutOf};
|
||||
use rustc::ty::{Ty};
|
||||
use rustc::ty::layout::{self};
|
||||
|
@ -202,7 +201,7 @@ impl ArgAbiExt<'ll, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
|
|||
if self.is_sized_indirect() {
|
||||
OperandValue::Ref(val, None, self.layout.align.abi).store(bx, dst)
|
||||
} else if self.is_unsized_indirect() {
|
||||
bug!("unsized ArgAbi must be handled through store_fn_arg");
|
||||
bug!("unsized `ArgAbi` must be handled through `store_fn_arg`");
|
||||
} else if let PassMode::Cast(cast) = self.mode {
|
||||
// FIXME(eddyb): Figure out when the simpler Store is safe, clang
|
||||
// uses it for i16 -> {i8, i8}, but not for i24 -> {i8, i8, i8}.
|
||||
|
|
|
@ -140,7 +140,7 @@
|
|||
//! In order for link-time optimization to work properly, LLVM needs a unique
|
||||
//! type identifier that tells it across compilation units which types are the
|
||||
//! same as others. This type identifier is created by
|
||||
//! TypeMap::get_unique_type_id_of_type() using the following algorithm:
|
||||
//! `TypeMap::get_unique_type_id_of_type()` using the following algorithm:
|
||||
//!
|
||||
//! (1) Primitive types have their name as ID
|
||||
//! (2) Structs, enums and traits have a multipart identifier
|
||||
|
|
|
@ -7,16 +7,16 @@ use super::utils::{debug_context, DIB, span_start,
|
|||
use super::namespace::mangled_name_of_instance;
|
||||
use super::type_names::compute_debuginfo_type_name;
|
||||
use super::CrateDebugContext;
|
||||
use crate::abi;
|
||||
use crate::value::Value;
|
||||
use rustc_codegen_ssa::traits::*;
|
||||
|
||||
use crate::abi;
|
||||
use crate::common::CodegenCx;
|
||||
use crate::llvm;
|
||||
use crate::llvm::debuginfo::{DIArray, DIType, DIFile, DIScope, DIDescriptor,
|
||||
DICompositeType, DILexicalBlock, DIFlags, DebugEmissionKind};
|
||||
use crate::llvm_util;
|
||||
use crate::value::Value;
|
||||
|
||||
use crate::common::CodegenCx;
|
||||
use rustc_codegen_ssa::traits::*;
|
||||
use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
|
||||
use rustc::hir::CodegenFnAttrFlags;
|
||||
use rustc::hir::def::CtorKind;
|
||||
|
@ -36,6 +36,9 @@ use rustc::util::nodemap::FxHashMap;
|
|||
use rustc_fs_util::path_to_c_string;
|
||||
use rustc_data_structures::small_c_str::SmallCStr;
|
||||
use rustc_target::abi::HasDataLayout;
|
||||
use syntax::ast;
|
||||
use syntax::symbol::{Interner, Symbol};
|
||||
use syntax_pos::{self, Span, FileName};
|
||||
|
||||
use libc::{c_uint, c_longlong};
|
||||
use std::collections::hash_map::Entry;
|
||||
|
@ -45,9 +48,6 @@ use std::hash::{Hash, Hasher};
|
|||
use std::iter;
|
||||
use std::ptr;
|
||||
use std::path::{Path, PathBuf};
|
||||
use syntax::ast;
|
||||
use syntax::symbol::{Interner, Symbol};
|
||||
use syntax_pos::{self, Span, FileName};
|
||||
|
||||
impl PartialEq for llvm::Metadata {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
|
@ -70,7 +70,7 @@ impl fmt::Debug for llvm::Metadata {
|
|||
}
|
||||
|
||||
// From DWARF 5.
|
||||
// See http://www.dwarfstd.org/ShowIssue.php?issue=140129.1
|
||||
// See http://www.dwarfstd.org/ShowIssue.php?issue=140129.1.
|
||||
const DW_LANG_RUST: c_uint = 0x1c;
|
||||
#[allow(non_upper_case_globals)]
|
||||
const DW_ATE_boolean: c_uint = 0x02;
|
||||
|
@ -91,70 +91,70 @@ pub const NO_SCOPE_METADATA: Option<&DIScope> = None;
|
|||
#[derive(Copy, Debug, Hash, Eq, PartialEq, Clone)]
|
||||
pub struct UniqueTypeId(ast::Name);
|
||||
|
||||
// The TypeMap is where the CrateDebugContext holds the type metadata nodes
|
||||
// created so far. The metadata nodes are indexed by UniqueTypeId, and, for
|
||||
// faster lookup, also by Ty. The TypeMap is responsible for creating
|
||||
// UniqueTypeIds.
|
||||
/// The `TypeMap` is where the `CrateDebugContext` holds the type metadata nodes
|
||||
/// created so far. The metadata nodes are indexed by `UniqueTypeId`, and, for
|
||||
/// faster lookup, also by `Ty`. The `TypeMap` is responsible for creating
|
||||
/// `UniqueTypeId`s.
|
||||
#[derive(Default)]
|
||||
pub struct TypeMap<'ll, 'tcx> {
|
||||
// The UniqueTypeIds created so far
|
||||
/// The `UniqueTypeId`s created so far.
|
||||
unique_id_interner: Interner,
|
||||
// A map from UniqueTypeId to debuginfo metadata for that type. This is a 1:1 mapping.
|
||||
/// A map from `UniqueTypeId` to debuginfo metadata for that type. This is a 1:1 mapping.
|
||||
unique_id_to_metadata: FxHashMap<UniqueTypeId, &'ll DIType>,
|
||||
// A map from types to debuginfo metadata. This is a N:1 mapping.
|
||||
/// A map from types to debuginfo metadata. This is an N:1 mapping.
|
||||
type_to_metadata: FxHashMap<Ty<'tcx>, &'ll DIType>,
|
||||
// A map from types to UniqueTypeId. This is a N:1 mapping.
|
||||
/// A map from types to `UniqueTypeId`. This is an N:1 mapping.
|
||||
type_to_unique_id: FxHashMap<Ty<'tcx>, UniqueTypeId>
|
||||
}
|
||||
|
||||
impl TypeMap<'ll, 'tcx> {
|
||||
// Adds a Ty to metadata mapping to the TypeMap. The method will fail if
|
||||
// the mapping already exists.
|
||||
/// Adds a Ty to metadata mapping to the TypeMap. The method will fail if
|
||||
/// the mapping already exists.
|
||||
fn register_type_with_metadata(
|
||||
&mut self,
|
||||
type_: Ty<'tcx>,
|
||||
metadata: &'ll DIType,
|
||||
) {
|
||||
if self.type_to_metadata.insert(type_, metadata).is_some() {
|
||||
bug!("Type metadata for Ty '{}' is already in the TypeMap!", type_);
|
||||
bug!("type metadata for `Ty` '{}' is already in the `TypeMap`!", type_);
|
||||
}
|
||||
}
|
||||
|
||||
// Removes a Ty to metadata mapping
|
||||
// This is useful when computing the metadata for a potentially
|
||||
// recursive type (e.g. a function ptr of the form:
|
||||
//
|
||||
// fn foo() -> impl Copy { foo }
|
||||
//
|
||||
// This kind of type cannot be properly represented
|
||||
// via LLVM debuginfo. As a workaround,
|
||||
// we register a temporary Ty to metadata mapping
|
||||
// for the function before we compute its actual metadata.
|
||||
// If the metadata computation ends up recursing back to the
|
||||
// original function, it will use the temporary mapping
|
||||
// for the inner self-reference, preventing us from
|
||||
// recursing forever.
|
||||
//
|
||||
// This function is used to remove the temporary metadata
|
||||
// mapping after we've computed the actual metadata
|
||||
/// Removes a `Ty`-to-metadata mapping.
|
||||
/// This is useful when computing the metadata for a potentially
|
||||
/// recursive type (e.g., a function pointer of the form:
|
||||
///
|
||||
/// fn foo() -> impl Copy { foo }
|
||||
///
|
||||
/// This kind of type cannot be properly represented
|
||||
/// via LLVM debuginfo. As a workaround,
|
||||
/// we register a temporary Ty to metadata mapping
|
||||
/// for the function before we compute its actual metadata.
|
||||
/// If the metadata computation ends up recursing back to the
|
||||
/// original function, it will use the temporary mapping
|
||||
/// for the inner self-reference, preventing us from
|
||||
/// recursing forever.
|
||||
///
|
||||
/// This function is used to remove the temporary metadata
|
||||
/// mapping after we've computed the actual metadata.
|
||||
fn remove_type(
|
||||
&mut self,
|
||||
type_: Ty<'tcx>,
|
||||
) {
|
||||
if self.type_to_metadata.remove(type_).is_none() {
|
||||
bug!("Type metadata Ty '{}' is not in the TypeMap!", type_);
|
||||
bug!("type metadata `Ty` '{}' is not in the `TypeMap`!", type_);
|
||||
}
|
||||
}
|
||||
|
||||
// Adds a UniqueTypeId to metadata mapping to the TypeMap. The method will
|
||||
// fail if the mapping already exists.
|
||||
/// Adds a `UniqueTypeId` to metadata mapping to the `TypeMap`. The method will
|
||||
/// fail if the mapping already exists.
|
||||
fn register_unique_id_with_metadata(
|
||||
&mut self,
|
||||
unique_type_id: UniqueTypeId,
|
||||
metadata: &'ll DIType,
|
||||
) {
|
||||
if self.unique_id_to_metadata.insert(unique_type_id, metadata).is_some() {
|
||||
bug!("Type metadata for unique id '{}' is already in the TypeMap!",
|
||||
bug!("type metadata for unique ID '{}' is already in the `TypeMap`!",
|
||||
self.get_unique_type_id_as_string(unique_type_id));
|
||||
}
|
||||
}
|
||||
|
@ -167,23 +167,23 @@ impl TypeMap<'ll, 'tcx> {
|
|||
self.unique_id_to_metadata.get(&unique_type_id).cloned()
|
||||
}
|
||||
|
||||
// Get the string representation of a UniqueTypeId. This method will fail if
|
||||
// the id is unknown.
|
||||
/// Gets the string representation of a `UniqueTypeId`. This method will fail if
|
||||
/// the ID is unknown.
|
||||
fn get_unique_type_id_as_string(&self, unique_type_id: UniqueTypeId) -> &str {
|
||||
let UniqueTypeId(interner_key) = unique_type_id;
|
||||
self.unique_id_interner.get(interner_key)
|
||||
}
|
||||
|
||||
// Get the UniqueTypeId for the given type. If the UniqueTypeId for the given
|
||||
// type has been requested before, this is just a table lookup. Otherwise an
|
||||
// ID will be generated and stored for later lookup.
|
||||
/// Gets the `UniqueTypeId` for the given type. If the `UniqueTypeId` for the given
|
||||
/// type has been requested before, this is just a table lookup. Otherwise, an
|
||||
/// ID will be generated and stored for later lookup.
|
||||
fn get_unique_type_id_of_type<'a>(&mut self, cx: &CodegenCx<'a, 'tcx>,
|
||||
type_: Ty<'tcx>) -> UniqueTypeId {
|
||||
// Let's see if we already have something in the cache
|
||||
// Let's see if we already have something in the cache.
|
||||
if let Some(unique_type_id) = self.type_to_unique_id.get(&type_).cloned() {
|
||||
return unique_type_id;
|
||||
}
|
||||
// if not, generate one
|
||||
// If not, generate one.
|
||||
|
||||
// The hasher we are using to generate the UniqueTypeId. We want
|
||||
// something that provides more than the 64 bits of the DefaultHasher.
|
||||
|
@ -203,9 +203,9 @@ impl TypeMap<'ll, 'tcx> {
|
|||
return UniqueTypeId(key);
|
||||
}
|
||||
|
||||
// Get the UniqueTypeId for an enum variant. Enum variants are not really
|
||||
// types of their own, so they need special handling. We still need a
|
||||
// UniqueTypeId for them, since to debuginfo they *are* real types.
|
||||
/// Gets the `UniqueTypeId` for an enum variant. Enum variants are not really
|
||||
/// types of their own, so they need special handling. We still need a
|
||||
/// `UniqueTypeId` for them, since to debuginfo they *are* real types.
|
||||
fn get_unique_type_id_of_enum_variant<'a>(&mut self,
|
||||
cx: &CodegenCx<'a, 'tcx>,
|
||||
enum_type: Ty<'tcx>,
|
||||
|
@ -219,9 +219,9 @@ impl TypeMap<'ll, 'tcx> {
|
|||
UniqueTypeId(interner_key)
|
||||
}
|
||||
|
||||
// Get the unique type id string for an enum variant part.
|
||||
// Variant parts are not types and shouldn't really have their own id,
|
||||
// but it makes set_members_of_composite_type() simpler.
|
||||
/// Gets the unique type ID string for an enum variant part.
|
||||
/// Variant parts are not types and shouldn't really have their own ID,
|
||||
/// but it makes `set_members_of_composite_type()` simpler.
|
||||
fn get_unique_type_id_str_of_enum_variant_part(&mut self, enum_type_id: UniqueTypeId) -> &str {
|
||||
let variant_part_type_id = format!("{}_variant_part",
|
||||
self.get_unique_type_id_as_string(enum_type_id));
|
||||
|
@ -230,11 +230,11 @@ impl TypeMap<'ll, 'tcx> {
|
|||
}
|
||||
}
|
||||
|
||||
// A description of some recursive type. It can either be already finished (as
|
||||
// with FinalMetadata) or it is not yet finished, but contains all information
|
||||
// needed to generate the missing parts of the description. See the
|
||||
// documentation section on Recursive Types at the top of this file for more
|
||||
// information.
|
||||
/// A description of some recursive type. It can either be already finished (as
|
||||
/// with `FinalMetadata`) or it is not yet finished, but contains all information
|
||||
/// needed to generate the missing parts of the description. See the
|
||||
/// documentation section on Recursive Types at the top of this file for more
|
||||
/// information.
|
||||
enum RecursiveTypeDescription<'ll, 'tcx> {
|
||||
UnfinishedMetadata {
|
||||
unfinished_type: Ty<'tcx>,
|
||||
|
@ -255,7 +255,7 @@ fn create_and_register_recursive_type_forward_declaration(
|
|||
member_description_factory: MemberDescriptionFactory<'ll, 'tcx>,
|
||||
) -> RecursiveTypeDescription<'ll, 'tcx> {
|
||||
|
||||
// Insert the stub into the TypeMap in order to allow for recursive references
|
||||
// Insert the stub into the `TypeMap` in order to allow for recursive references.
|
||||
let mut type_map = debug_context(cx).type_map.borrow_mut();
|
||||
type_map.register_unique_id_with_metadata(unique_type_id, metadata_stub);
|
||||
type_map.register_type_with_metadata(unfinished_type, metadata_stub);
|
||||
|
@ -270,9 +270,9 @@ fn create_and_register_recursive_type_forward_declaration(
|
|||
}
|
||||
|
||||
impl RecursiveTypeDescription<'ll, 'tcx> {
|
||||
// Finishes up the description of the type in question (mostly by providing
|
||||
// descriptions of the fields of the given type) and returns the final type
|
||||
// metadata.
|
||||
/// Finishes up the description of the type in question (mostly by providing
|
||||
/// descriptions of the fields of the given type) and returns the final type
|
||||
/// metadata.
|
||||
fn finalize(&self, cx: &CodegenCx<'ll, 'tcx>) -> MetadataCreationResult<'ll> {
|
||||
match *self {
|
||||
FinalMetadata(metadata) => MetadataCreationResult::new(metadata, false),
|
||||
|
@ -287,7 +287,7 @@ impl RecursiveTypeDescription<'ll, 'tcx> {
|
|||
// the TypeMap so that recursive references are possible. This
|
||||
// will always be the case if the RecursiveTypeDescription has
|
||||
// been properly created through the
|
||||
// create_and_register_recursive_type_forward_declaration()
|
||||
// `create_and_register_recursive_type_forward_declaration()`
|
||||
// function.
|
||||
{
|
||||
let type_map = debug_context(cx).type_map.borrow();
|
||||
|
@ -314,8 +314,8 @@ impl RecursiveTypeDescription<'ll, 'tcx> {
|
|||
}
|
||||
}
|
||||
|
||||
// Returns from the enclosing function if the type metadata with the given
|
||||
// unique id can be found in the type map
|
||||
/// Returns from the enclosing function if the type metadata with the given
|
||||
/// unique ID can be found in the type map.
|
||||
macro_rules! return_if_metadata_created_in_meantime {
|
||||
($cx: expr, $unique_type_id: expr) => (
|
||||
if let Some(metadata) = debug_context($cx).type_map
|
||||
|
@ -527,19 +527,19 @@ pub fn type_metadata(
|
|||
t: Ty<'tcx>,
|
||||
usage_site_span: Span,
|
||||
) -> &'ll DIType {
|
||||
// Get the unique type id of this type.
|
||||
// Get the unique type ID of this type.
|
||||
let unique_type_id = {
|
||||
let mut type_map = debug_context(cx).type_map.borrow_mut();
|
||||
// First, try to find the type in TypeMap. If we have seen it before, we
|
||||
// First, try to find the type in `TypeMap`. If we have seen it before, we
|
||||
// can exit early here.
|
||||
match type_map.find_metadata_for_type(t) {
|
||||
Some(metadata) => {
|
||||
return metadata;
|
||||
},
|
||||
None => {
|
||||
// The Ty is not in the TypeMap but maybe we have already seen
|
||||
// The Ty is not in the `TypeMap` but maybe we have already seen
|
||||
// an equivalent type (e.g., only differing in region arguments).
|
||||
// In order to find out, generate the unique type id and look
|
||||
// In order to find out, generate the unique type ID and look
|
||||
// that up.
|
||||
let unique_type_id = type_map.get_unique_type_id_of_type(cx, t);
|
||||
match type_map.find_metadata_for_unique_id(unique_type_id) {
|
||||
|
@ -647,15 +647,15 @@ pub fn type_metadata(
|
|||
//
|
||||
// fn foo() -> impl Copy { foo }
|
||||
//
|
||||
// See TypeMap::remove_type for more detals
|
||||
// about the workaround
|
||||
// See `TypeMap::remove_type` for more detals
|
||||
// about the workaround.
|
||||
|
||||
let temp_type = {
|
||||
unsafe {
|
||||
// The choice of type here is pretty arbitrary -
|
||||
// anything reading the debuginfo for a recursive
|
||||
// type is going to see *somthing* weird - the only
|
||||
// question is what exactly it will see
|
||||
// question is what exactly it will see.
|
||||
let (size, align) = cx.size_and_align_of(t);
|
||||
llvm::LLVMRustDIBuilderCreateBasicType(
|
||||
DIB(cx),
|
||||
|
@ -677,7 +677,7 @@ pub fn type_metadata(
|
|||
type_map.borrow_mut().remove_type(t);
|
||||
|
||||
|
||||
// This is actually a function pointer, so wrap it in pointer DI
|
||||
// This is actually a function pointer, so wrap it in pointer DI.
|
||||
MetadataCreationResult::new(pointer_type_metadata(cx, t, fn_metadata), false)
|
||||
|
||||
}
|
||||
|
@ -743,14 +743,14 @@ pub fn type_metadata(
|
|||
let mut type_map = debug_context(cx).type_map.borrow_mut();
|
||||
|
||||
if already_stored_in_typemap {
|
||||
// Also make sure that we already have a TypeMap entry for the unique type id.
|
||||
// Also make sure that we already have a `TypeMap` entry for the unique type ID.
|
||||
let metadata_for_uid = match type_map.find_metadata_for_unique_id(unique_type_id) {
|
||||
Some(metadata) => metadata,
|
||||
None => {
|
||||
span_bug!(usage_site_span,
|
||||
"Expected type metadata for unique \
|
||||
type id '{}' to already be in \
|
||||
the debuginfo::TypeMap but it \
|
||||
"expected type metadata for unique \
|
||||
type ID '{}' to already be in \
|
||||
the `debuginfo::TypeMap` but it \
|
||||
was not. (Ty = {})",
|
||||
type_map.get_unique_type_id_as_string(unique_type_id),
|
||||
t);
|
||||
|
@ -761,9 +761,9 @@ pub fn type_metadata(
|
|||
Some(metadata) => {
|
||||
if metadata != metadata_for_uid {
|
||||
span_bug!(usage_site_span,
|
||||
"Mismatch between Ty and \
|
||||
UniqueTypeId maps in \
|
||||
debuginfo::TypeMap. \
|
||||
"mismatch between `Ty` and \
|
||||
`UniqueTypeId` maps in \
|
||||
`debuginfo::TypeMap`. \
|
||||
UniqueTypeId={}, Ty={}",
|
||||
type_map.get_unique_type_id_as_string(unique_type_id),
|
||||
t);
|
||||
|
@ -851,7 +851,7 @@ fn basic_type_metadata(cx: &CodegenCx<'ll, 'tcx>, t: Ty<'tcx>) -> &'ll DIType {
|
|||
ty::Float(float_ty) => {
|
||||
(float_ty.name_str(), DW_ATE_float)
|
||||
},
|
||||
_ => bug!("debuginfo::basic_type_metadata - t is invalid type")
|
||||
_ => bug!("debuginfo::basic_type_metadata - `t` is invalid type")
|
||||
};
|
||||
|
||||
let (size, align) = cx.size_and_align_of(t);
|
||||
|
@ -908,7 +908,7 @@ pub fn compile_unit_metadata(
|
|||
};
|
||||
|
||||
// The OSX linker has an idiosyncrasy where it will ignore some debuginfo
|
||||
// if multiple object files with the same DW_AT_name are linked together.
|
||||
// if multiple object files with the same `DW_AT_name` are linked together.
|
||||
// As a workaround we generate unique names for each object file. Those do
|
||||
// not correspond to an actual source file but that should be harmless.
|
||||
if tcx.sess.target.target.options.is_like_osx {
|
||||
|
@ -935,11 +935,9 @@ pub fn compile_unit_metadata(
|
|||
//
|
||||
// This should actually be
|
||||
//
|
||||
// ```
|
||||
// let kind = DebugEmissionKind::from_generic(tcx.sess.opts.debuginfo);
|
||||
// ```
|
||||
//
|
||||
// that is, we should set LLVM's emission kind to `LineTablesOnly` if
|
||||
// That is, we should set LLVM's emission kind to `LineTablesOnly` if
|
||||
// we are compiling with "limited" debuginfo. However, some of the
|
||||
// existing tools relied on slightly more debuginfo being generated than
|
||||
// would be the case with `LineTablesOnly`, and we did not want to break
|
||||
|
@ -1029,8 +1027,8 @@ impl MetadataCreationResult<'ll> {
|
|||
}
|
||||
}
|
||||
|
||||
// Description of a type member, which can either be a regular field (as in
|
||||
// structs or tuples) or an enum variant.
|
||||
/// Description of a type member, which can either be a regular field (as in
|
||||
/// structs or tuples) or an enum variant.
|
||||
#[derive(Debug)]
|
||||
struct MemberDescription<'ll> {
|
||||
name: String,
|
||||
|
@ -1067,10 +1065,10 @@ impl<'ll> MemberDescription<'ll> {
|
|||
}
|
||||
}
|
||||
|
||||
// A factory for MemberDescriptions. It produces a list of member descriptions
|
||||
// for some record-like type. MemberDescriptionFactories are used to defer the
|
||||
// creation of type member descriptions in order to break cycles arising from
|
||||
// recursive type definitions.
|
||||
/// A factory for `MemberDescription`s. It produces a list of member descriptions
|
||||
/// for some record-like type. `MemberDescriptionFactory`s are used to defer the
|
||||
/// creation of type member descriptions in order to break cycles arising from
|
||||
/// recursive type definitions.
|
||||
enum MemberDescriptionFactory<'ll, 'tcx> {
|
||||
StructMDF(StructMemberDescriptionFactory<'tcx>),
|
||||
TupleMDF(TupleMemberDescriptionFactory<'tcx>),
|
||||
|
@ -1106,7 +1104,7 @@ impl MemberDescriptionFactory<'ll, 'tcx> {
|
|||
// Structs
|
||||
//=-----------------------------------------------------------------------------
|
||||
|
||||
// Creates MemberDescriptions for the fields of a struct
|
||||
/// Creates `MemberDescription`s for the fields of a struct.
|
||||
struct StructMemberDescriptionFactory<'tcx> {
|
||||
ty: Ty<'tcx>,
|
||||
variant: &'tcx ty::VariantDef,
|
||||
|
@ -1177,7 +1175,7 @@ fn prepare_struct_metadata(
|
|||
// Tuples
|
||||
//=-----------------------------------------------------------------------------
|
||||
|
||||
// Creates MemberDescriptions for the fields of a tuple
|
||||
/// Creates `MemberDescription`s for the fields of a tuple.
|
||||
struct TupleMemberDescriptionFactory<'tcx> {
|
||||
ty: Ty<'tcx>,
|
||||
component_types: Vec<Ty<'tcx>>,
|
||||
|
@ -1300,14 +1298,14 @@ fn prepare_union_metadata(
|
|||
// Enums
|
||||
//=-----------------------------------------------------------------------------
|
||||
|
||||
// DWARF variant support is only available starting in LLVM 8.
|
||||
// Although the earlier enum debug info output did not work properly
|
||||
// in all situations, it is better for the time being to continue to
|
||||
// sometimes emit the old style rather than emit something completely
|
||||
// useless when rust is compiled against LLVM 6 or older. LLVM 7
|
||||
// contains an early version of the DWARF variant support, and will
|
||||
// crash when handling the new debug info format. This function
|
||||
// decides which representation will be emitted.
|
||||
/// DWARF variant support is only available starting in LLVM 8.
|
||||
/// Although the earlier enum debug info output did not work properly
|
||||
/// in all situations, it is better for the time being to continue to
|
||||
/// sometimes emit the old style rather than emit something completely
|
||||
/// useless when rust is compiled against LLVM 6 or older. LLVM 7
|
||||
/// contains an early version of the DWARF variant support, and will
|
||||
/// crash when handling the new debug info format. This function
|
||||
/// decides which representation will be emitted.
|
||||
fn use_enum_fallback(cx: &CodegenCx<'_, '_>) -> bool {
|
||||
// On MSVC we have to use the fallback mode, because LLVM doesn't
|
||||
// lower variant parts to PDB.
|
||||
|
@ -1318,11 +1316,11 @@ fn use_enum_fallback(cx: &CodegenCx<'_, '_>) -> bool {
|
|||
|| llvm_util::get_major_version() < 8;
|
||||
}
|
||||
|
||||
// Describes the members of an enum value: An enum is described as a union of
|
||||
// structs in DWARF. This MemberDescriptionFactory provides the description for
|
||||
// the members of this union; so for every variant of the given enum, this
|
||||
// factory will produce one MemberDescription (all with no name and a fixed
|
||||
// offset of zero bytes).
|
||||
/// Describes the members of an enum value; an enum is described as a union of
|
||||
/// structs in DWARF. This `MemberDescriptionFactory` provides the description for
|
||||
/// the members of this union; so for every variant of the given enum, this
|
||||
/// factory will produce one `MemberDescription` (all with no name and a fixed
|
||||
/// offset of zero bytes).
|
||||
struct EnumMemberDescriptionFactory<'ll, 'tcx> {
|
||||
enum_type: Ty<'tcx>,
|
||||
layout: TyLayout<'tcx>,
|
||||
|
@ -1456,7 +1454,7 @@ impl EnumMemberDescriptionFactory<'ll, 'tcx> {
|
|||
} => {
|
||||
if fallback {
|
||||
let variant = self.layout.for_variant(cx, dataful_variant);
|
||||
// Create a description of the non-null variant
|
||||
// Create a description of the non-null variant.
|
||||
let (variant_type_metadata, member_description_factory) =
|
||||
describe_enum_variant(cx,
|
||||
variant,
|
||||
|
@ -1566,9 +1564,9 @@ impl EnumMemberDescriptionFactory<'ll, 'tcx> {
|
|||
}
|
||||
}
|
||||
|
||||
// Creates MemberDescriptions for the fields of a single enum variant.
|
||||
// Creates `MemberDescription`s for the fields of a single enum variant.
|
||||
struct VariantMemberDescriptionFactory<'ll, 'tcx> {
|
||||
// Cloned from the layout::Struct describing the variant.
|
||||
/// Cloned from the `layout::Struct` describing the variant.
|
||||
offsets: Vec<layout::Size>,
|
||||
args: Vec<(String, Ty<'tcx>)>,
|
||||
discriminant_type_metadata: Option<&'ll DIType>,
|
||||
|
@ -1652,10 +1650,10 @@ impl<'tcx> VariantInfo<'tcx> {
|
|||
}
|
||||
}
|
||||
|
||||
// Returns a tuple of (1) type_metadata_stub of the variant, (2) a
|
||||
// MemberDescriptionFactory for producing the descriptions of the
|
||||
// fields of the variant. This is a rudimentary version of a full
|
||||
// RecursiveTypeDescription.
|
||||
/// Returns a tuple of (1) `type_metadata_stub` of the variant, (2) a
|
||||
/// `MemberDescriptionFactory` for producing the descriptions of the
|
||||
/// fields of the variant. This is a rudimentary version of a full
|
||||
/// `RecursiveTypeDescription`.
|
||||
fn describe_enum_variant(
|
||||
cx: &CodegenCx<'ll, 'tcx>,
|
||||
layout: layout::TyLayout<'tcx>,
|
||||
|
@ -2088,8 +2086,7 @@ fn set_members_of_composite_type(cx: &CodegenCx<'ll, 'tcx>,
|
|||
}
|
||||
}
|
||||
|
||||
// Compute the type parameters for a type, if any, for the given
|
||||
// metadata.
|
||||
/// Computes the type parameters for a type, if any, for the given metadata.
|
||||
fn compute_type_parameters(cx: &CodegenCx<'ll, 'tcx>, ty: Ty<'tcx>) -> Option<&'ll DIArray> {
|
||||
if let ty::Adt(def, substs) = ty.kind {
|
||||
if !substs.types().next().is_none() {
|
||||
|
@ -2134,9 +2131,9 @@ fn compute_type_parameters(cx: &CodegenCx<'ll, 'tcx>, ty: Ty<'tcx>) -> Option<&'
|
|||
}
|
||||
}
|
||||
|
||||
// A convenience wrapper around LLVMRustDIBuilderCreateStructType(). Does not do
|
||||
// any caching, does not add any fields to the struct. This can be done later
|
||||
// with set_members_of_composite_type().
|
||||
/// A convenience wrapper around `LLVMRustDIBuilderCreateStructType()`. Does not do
|
||||
/// any caching, does not add any fields to the struct. This can be done later
|
||||
/// with `set_members_of_composite_type()`.
|
||||
fn create_struct_stub(
|
||||
cx: &CodegenCx<'ll, 'tcx>,
|
||||
struct_type: Ty<'tcx>,
|
||||
|
@ -2151,9 +2148,9 @@ fn create_struct_stub(
|
|||
debug_context(cx).type_map.borrow().get_unique_type_id_as_string(unique_type_id)
|
||||
);
|
||||
let metadata_stub = unsafe {
|
||||
// LLVMRustDIBuilderCreateStructType() wants an empty array. A null
|
||||
// `LLVMRustDIBuilderCreateStructType()` wants an empty array. A null
|
||||
// pointer will lead to hard to trace and debug LLVM assertions
|
||||
// later on in llvm/lib/IR/Value.cpp.
|
||||
// later on in `llvm/lib/IR/Value.cpp`.
|
||||
let empty_array = create_DIArray(DIB(cx), &[]);
|
||||
|
||||
llvm::LLVMRustDIBuilderCreateStructType(
|
||||
|
@ -2189,9 +2186,9 @@ fn create_union_stub(
|
|||
debug_context(cx).type_map.borrow().get_unique_type_id_as_string(unique_type_id)
|
||||
);
|
||||
let metadata_stub = unsafe {
|
||||
// LLVMRustDIBuilderCreateUnionType() wants an empty array. A null
|
||||
// `LLVMRustDIBuilderCreateUnionType()` wants an empty array. A null
|
||||
// pointer will lead to hard to trace and debug LLVM assertions
|
||||
// later on in llvm/lib/IR/Value.cpp.
|
||||
// later on in `llvm/lib/IR/Value.cpp`.
|
||||
let empty_array = create_DIArray(DIB(cx), &[]);
|
||||
|
||||
llvm::LLVMRustDIBuilderCreateUnionType(
|
||||
|
@ -2231,8 +2228,8 @@ pub fn create_global_var_metadata(
|
|||
}
|
||||
|
||||
let no_mangle = attrs.flags.contains(CodegenFnAttrFlags::NO_MANGLE);
|
||||
// We may want to remove the namespace scope if we're in an extern block, see:
|
||||
// https://github.com/rust-lang/rust/pull/46457#issuecomment-351750952
|
||||
// We may want to remove the namespace scope if we're in an extern block (see
|
||||
// https://github.com/rust-lang/rust/pull/46457#issuecomment-351750952).
|
||||
let var_scope = get_namespace_for_item(cx, def_id);
|
||||
let span = tcx.def_span(def_id);
|
||||
|
||||
|
@ -2287,9 +2284,9 @@ pub fn create_vtable_metadata(cx: &CodegenCx<'ll, 'tcx>, ty: Ty<'tcx>, vtable: &
|
|||
let type_metadata = type_metadata(cx, ty, syntax_pos::DUMMY_SP);
|
||||
|
||||
unsafe {
|
||||
// LLVMRustDIBuilderCreateStructType() wants an empty array. A null
|
||||
// `LLVMRustDIBuilderCreateStructType()` wants an empty array. A null
|
||||
// pointer will lead to hard to trace and debug LLVM assertions
|
||||
// later on in llvm/lib/IR/Value.cpp.
|
||||
// later on in `llvm/lib/IR/Value.cpp`.
|
||||
let empty_array = create_DIArray(DIB(cx), &[]);
|
||||
|
||||
let name = const_cstr!("vtable");
|
||||
|
@ -2327,7 +2324,7 @@ pub fn create_vtable_metadata(cx: &CodegenCx<'ll, 'tcx>, ty: Ty<'tcx>, vtable: &
|
|||
}
|
||||
}
|
||||
|
||||
// Creates an "extension" of an existing DIScope into another file.
|
||||
/// Creates an "extension" of an existing `DIScope` into another file.
|
||||
pub fn extend_scope_to_file(
|
||||
cx: &CodegenCx<'ll, '_>,
|
||||
scope_metadata: &'ll DIScope,
|
||||
|
|
|
@ -152,7 +152,7 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
|
|||
match scalar.value {
|
||||
Primitive::Int(..) => {
|
||||
if self.cx().size_of(ret_ty).bytes() < 4 {
|
||||
// va_arg should not be called on a integer type
|
||||
// `va_arg` should not be called on a integer type
|
||||
// less than 4 bytes in length. If it is, promote
|
||||
// the integer to a `i32` and truncate the result
|
||||
// back to the smaller type.
|
||||
|
|
|
@ -1,21 +1,32 @@
|
|||
//! Codegen the completed AST to the LLVM IR.
|
||||
//!
|
||||
//! Some functions here, such as codegen_block and codegen_expr, return a value --
|
||||
//! the result of the codegen to LLVM -- while others, such as codegen_fn
|
||||
//! and mono_item, are called only for the side effect of adding a
|
||||
//! Some functions here, such as `codegen_block` and `codegen_expr`, return a value --
|
||||
//! the result of the codegen to LLVM -- while others, such as `codegen_fn`
|
||||
//! and `mono_item`, are called only for the side effect of adding a
|
||||
//! particular definition to the LLVM IR output we're producing.
|
||||
//!
|
||||
//! Hopefully useful general knowledge about codegen:
|
||||
//!
|
||||
//! * There's no way to find out the `Ty` type of a Value. Doing so
|
||||
//! * There's no way to find out the `Ty` type of a `Value`. Doing so
|
||||
//! would be "trying to get the eggs out of an omelette" (credit:
|
||||
//! pcwalton). You can, instead, find out its `llvm::Type` by calling `val_ty`,
|
||||
//! but one `llvm::Type` corresponds to many `Ty`s; for instance, `tup(int, int,
|
||||
//! int)` and `rec(x=int, y=int, z=int)` will have the same `llvm::Type`.
|
||||
|
||||
use crate::{ModuleCodegen, ModuleKind, CachedModuleCodegen};
|
||||
use crate::{CachedModuleCodegen, CrateInfo, MemFlags, ModuleCodegen, ModuleKind};
|
||||
use crate::back::write::{
|
||||
OngoingCodegen, start_async_codegen, submit_pre_lto_module_to_llvm,
|
||||
submit_post_lto_module_to_llvm,
|
||||
};
|
||||
use crate::common::{RealPredicate, TypeKind, IntPredicate};
|
||||
use crate::meth;
|
||||
use crate::mir;
|
||||
use crate::mir::operand::OperandValue;
|
||||
use crate::mir::place::PlaceRef;
|
||||
use crate::traits::*;
|
||||
|
||||
use rustc::dep_graph::cgu_reuse_tracker::CguReuse;
|
||||
use rustc::hir;
|
||||
use rustc::hir::def_id::{DefId, LOCAL_CRATE};
|
||||
use rustc::middle::cstore::EncodedMetadata;
|
||||
use rustc::middle::lang_items::StartFnLangItem;
|
||||
|
@ -23,6 +34,7 @@ use rustc::middle::weak_lang_items;
|
|||
use rustc::mir::mono::{CodegenUnitNameBuilder, CodegenUnit, MonoItem};
|
||||
use rustc::ty::{self, Ty, TyCtxt, Instance};
|
||||
use rustc::ty::layout::{self, Align, TyLayout, LayoutOf, VariantIdx, HasTyCtxt};
|
||||
use rustc::ty::layout::{FAT_PTR_ADDR, FAT_PTR_EXTRA};
|
||||
use rustc::ty::query::Providers;
|
||||
use rustc::middle::cstore::{self, LinkagePreference};
|
||||
use rustc::util::common::{time, print_time_passes_entry, set_time_depth, time_depth};
|
||||
|
@ -31,25 +43,12 @@ use rustc::session::Session;
|
|||
use rustc::util::nodemap::FxHashMap;
|
||||
use rustc_index::vec::Idx;
|
||||
use rustc_codegen_utils::{symbol_names_test, check_for_rustc_errors_attr};
|
||||
use rustc::ty::layout::{FAT_PTR_ADDR, FAT_PTR_EXTRA};
|
||||
use crate::mir::place::PlaceRef;
|
||||
use crate::back::write::{OngoingCodegen, start_async_codegen, submit_pre_lto_module_to_llvm,
|
||||
submit_post_lto_module_to_llvm};
|
||||
use crate::{MemFlags, CrateInfo};
|
||||
use crate::common::{RealPredicate, TypeKind, IntPredicate};
|
||||
use crate::meth;
|
||||
use crate::mir;
|
||||
|
||||
use crate::traits::*;
|
||||
use syntax::attr;
|
||||
use syntax_pos::Span;
|
||||
|
||||
use std::cmp;
|
||||
use std::ops::{Deref, DerefMut};
|
||||
use std::time::{Instant, Duration};
|
||||
use syntax_pos::Span;
|
||||
use syntax::attr;
|
||||
use rustc::hir;
|
||||
|
||||
use crate::mir::operand::OperandValue;
|
||||
|
||||
pub fn bin_op_to_icmp_predicate(op: hir::BinOpKind,
|
||||
signed: bool)
|
||||
|
@ -116,9 +115,8 @@ pub fn compare_simd_types<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
|
|||
/// Retrieves the information we are losing (making dynamic) in an unsizing
|
||||
/// adjustment.
|
||||
///
|
||||
/// The `old_info` argument is a bit funny. It is intended for use
|
||||
/// in an upcast, where the new vtable for an object will be derived
|
||||
/// from the old one.
|
||||
/// The `old_info` argument is a bit odd. It is intended for use in an upcast,
|
||||
/// where the new vtable for an object will be derived from the old one.
|
||||
pub fn unsized_info<'tcx, Cx: CodegenMethods<'tcx>>(
|
||||
cx: &Cx,
|
||||
source: Ty<'tcx>,
|
||||
|
@ -140,16 +138,19 @@ pub fn unsized_info<'tcx, Cx: CodegenMethods<'tcx>>(
|
|||
(_, &ty::Dynamic(ref data, ..)) => {
|
||||
let vtable_ptr = cx.layout_of(cx.tcx().mk_mut_ptr(target))
|
||||
.field(cx, FAT_PTR_EXTRA);
|
||||
cx.const_ptrcast(meth::get_vtable(cx, source, data.principal()),
|
||||
cx.backend_type(vtable_ptr))
|
||||
cx.const_ptrcast(
|
||||
meth::get_vtable(cx, source, data.principal()),
|
||||
cx.backend_type(vtable_ptr),
|
||||
)
|
||||
}
|
||||
_ => bug!("unsized_info: invalid unsizing {:?} -> {:?}",
|
||||
source,
|
||||
target),
|
||||
_ => bug!(
|
||||
"unsized_info: invalid unsizing {:?} -> {:?}",
|
||||
source, target
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
/// Coerce `src` to `dst_ty`. `src_ty` must be a thin pointer.
|
||||
/// Coerces `src` to `dst_ty`. `src_ty` must be a thin pointer.
|
||||
pub fn unsize_thin_ptr<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
|
||||
bx: &mut Bx,
|
||||
src: Bx::Value,
|
||||
|
@ -199,8 +200,8 @@ pub fn unsize_thin_ptr<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
|
|||
}
|
||||
}
|
||||
|
||||
/// Coerce `src`, which is a reference to a value of type `src_ty`,
|
||||
/// to a value of type `dst_ty` and store the result in `dst`
|
||||
/// Coerces `src`, which is a reference to a value of type `src_ty`,
|
||||
/// to a value of type `dst_ty`, and stores the result in `dst`.
|
||||
pub fn coerce_unsized_into<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
|
||||
bx: &mut Bx,
|
||||
src: PlaceRef<'tcx, Bx::Value>,
|
||||
|
@ -250,9 +251,11 @@ pub fn coerce_unsized_into<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
|
|||
}
|
||||
}
|
||||
}
|
||||
_ => bug!("coerce_unsized_into: invalid coercion {:?} -> {:?}",
|
||||
_ => bug!(
|
||||
"coerce_unsized_into: invalid coercion {:?} -> {:?}",
|
||||
src_ty,
|
||||
dst_ty),
|
||||
dst_ty,
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1,8 +1,7 @@
|
|||
use rustc_target::abi::call::FnAbi;
|
||||
|
||||
use crate::traits::*;
|
||||
|
||||
use rustc::ty::{self, Ty, Instance};
|
||||
use rustc_target::abi::call::FnAbi;
|
||||
|
||||
#[derive(Copy, Clone, Debug)]
|
||||
pub struct VirtualIndex(u64);
|
||||
|
@ -20,7 +19,7 @@ impl<'a, 'tcx> VirtualIndex {
|
|||
self,
|
||||
bx: &mut Bx,
|
||||
llvtable: Bx::Value,
|
||||
fn_abi: &FnAbi<'tcx, Ty<'tcx>>
|
||||
fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
|
||||
) -> Bx::Value {
|
||||
// Load the data pointer from the object.
|
||||
debug!("get_fn({:?}, {:?})", llvtable, self);
|
||||
|
@ -33,7 +32,7 @@ impl<'a, 'tcx> VirtualIndex {
|
|||
let gep = bx.inbounds_gep(llvtable, &[bx.const_usize(self.0)]);
|
||||
let ptr = bx.load(gep, ptr_align);
|
||||
bx.nonnull_metadata(ptr);
|
||||
// Vtable loads are invariant
|
||||
// Vtable loads are invariant.
|
||||
bx.set_invariant_load(ptr);
|
||||
ptr
|
||||
}
|
||||
|
@ -41,7 +40,7 @@ impl<'a, 'tcx> VirtualIndex {
|
|||
pub fn get_usize<Bx: BuilderMethods<'a, 'tcx>>(
|
||||
self,
|
||||
bx: &mut Bx,
|
||||
llvtable: Bx::Value
|
||||
llvtable: Bx::Value,
|
||||
) -> Bx::Value {
|
||||
// Load the data pointer from the object.
|
||||
debug!("get_int({:?}, {:?})", llvtable, self);
|
||||
|
@ -50,7 +49,7 @@ impl<'a, 'tcx> VirtualIndex {
|
|||
let usize_align = bx.tcx().data_layout.pointer_align.abi;
|
||||
let gep = bx.inbounds_gep(llvtable, &[bx.const_usize(self.0)]);
|
||||
let ptr = bx.load(gep, usize_align);
|
||||
// Vtable loads are invariant
|
||||
// Vtable loads are invariant.
|
||||
bx.set_invariant_load(ptr);
|
||||
ptr
|
||||
}
|
||||
|
@ -78,7 +77,7 @@ pub fn get_vtable<'tcx, Cx: CodegenMethods<'tcx>>(
|
|||
return val;
|
||||
}
|
||||
|
||||
// Not in the cache. Build it.
|
||||
// Not in the cache; build it.
|
||||
let nullptr = cx.const_null(cx.type_i8p());
|
||||
|
||||
let methods_root;
|
||||
|
@ -105,7 +104,7 @@ pub fn get_vtable<'tcx, Cx: CodegenMethods<'tcx>>(
|
|||
let layout = cx.layout_of(ty);
|
||||
// /////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// If you touch this code, be sure to also make the corresponding changes to
|
||||
// `get_vtable` in rust_mir/interpret/traits.rs
|
||||
// `get_vtable` in `rust_mir/interpret/traits.rs`.
|
||||
// /////////////////////////////////////////////////////////////////////////////////////////////
|
||||
let components: Vec<_> = [
|
||||
cx.get_fn_addr(Instance::resolve_drop_in_place(cx.tcx(), ty)),
|
||||
|
|
|
@ -1,19 +1,18 @@
|
|||
use super::{FunctionCx, LocalRef};
|
||||
use super::place::PlaceRef;
|
||||
|
||||
use crate::MemFlags;
|
||||
use crate::base;
|
||||
use crate::glue;
|
||||
use crate::traits::*;
|
||||
|
||||
use rustc::mir::interpret::{ConstValue, ErrorHandled, Pointer, Scalar};
|
||||
use rustc::mir;
|
||||
use rustc::ty;
|
||||
use rustc::ty::layout::{self, Align, LayoutOf, TyLayout, Size};
|
||||
|
||||
use crate::base;
|
||||
use crate::MemFlags;
|
||||
use crate::glue;
|
||||
|
||||
use crate::traits::*;
|
||||
|
||||
use std::fmt;
|
||||
|
||||
use super::{FunctionCx, LocalRef};
|
||||
use super::place::PlaceRef;
|
||||
|
||||
/// The representation of a Rust value. The enum variant is in fact
|
||||
/// uniquely determined by the value's type, but is kept as a
|
||||
/// safety check.
|
||||
|
@ -343,6 +342,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandValue<V> {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn store_unsized<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
|
||||
self,
|
||||
bx: &mut Bx,
|
||||
|
|
|
@ -1,28 +1,28 @@
|
|||
use super::{FunctionCx, LocalRef};
|
||||
use super::operand::OperandValue;
|
||||
|
||||
use crate::MemFlags;
|
||||
use crate::common::IntPredicate;
|
||||
use crate::glue;
|
||||
use crate::traits::*;
|
||||
|
||||
use rustc::ty::{self, Instance, Ty};
|
||||
use rustc::ty::layout::{self, Align, TyLayout, LayoutOf, VariantIdx, HasTyCtxt};
|
||||
use rustc::mir;
|
||||
use rustc::mir::tcx::PlaceTy;
|
||||
use crate::MemFlags;
|
||||
use crate::common::IntPredicate;
|
||||
use crate::glue;
|
||||
|
||||
use crate::traits::*;
|
||||
|
||||
use super::{FunctionCx, LocalRef};
|
||||
use super::operand::OperandValue;
|
||||
|
||||
#[derive(Copy, Clone, Debug)]
|
||||
pub struct PlaceRef<'tcx, V> {
|
||||
/// Pointer to the contents of the place.
|
||||
/// A pointer to the contents of the place.
|
||||
pub llval: V,
|
||||
|
||||
/// This place's extra data if it is unsized, or null.
|
||||
/// This place's extra data if it is unsized, or `None` if null.
|
||||
pub llextra: Option<V>,
|
||||
|
||||
/// Monomorphized type of this place, including variant information.
|
||||
/// The monomorphized type of this place, including variant information.
|
||||
pub layout: TyLayout<'tcx>,
|
||||
|
||||
/// What alignment we know for this place.
|
||||
/// The alignment we know for this place.
|
||||
pub align: Align,
|
||||
}
|
||||
|
||||
|
@ -107,7 +107,6 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
|
|||
bug!("unexpected layout `{:#?}` in PlaceRef::len", self.layout)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
|
||||
|
|
|
@ -1,22 +1,22 @@
|
|||
use super::{FunctionCx, LocalRef};
|
||||
use super::operand::{OperandRef, OperandValue};
|
||||
use super::place::PlaceRef;
|
||||
|
||||
use crate::base;
|
||||
use crate::MemFlags;
|
||||
use crate::common::{self, RealPredicate, IntPredicate};
|
||||
use crate::traits::*;
|
||||
|
||||
use rustc::ty::{self, Ty, adjustment::{PointerCast}, Instance};
|
||||
use rustc::ty::cast::{CastTy, IntTy};
|
||||
use rustc::ty::layout::{self, LayoutOf, HasTyCtxt};
|
||||
use rustc::mir;
|
||||
use rustc::middle::lang_items::ExchangeMallocFnLangItem;
|
||||
use rustc_apfloat::{ieee, Float, Status, Round};
|
||||
use std::{u128, i128};
|
||||
use syntax::symbol::sym;
|
||||
use syntax::source_map::{DUMMY_SP, Span};
|
||||
|
||||
use crate::base;
|
||||
use crate::MemFlags;
|
||||
use crate::common::{self, RealPredicate, IntPredicate};
|
||||
|
||||
use crate::traits::*;
|
||||
|
||||
use super::{FunctionCx, LocalRef};
|
||||
use super::operand::{OperandRef, OperandValue};
|
||||
use super::place::PlaceRef;
|
||||
use std::{u128, i128};
|
||||
|
||||
impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||
pub fn codegen_rvalue(
|
||||
|
@ -31,8 +31,8 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
|||
match *rvalue {
|
||||
mir::Rvalue::Use(ref operand) => {
|
||||
let cg_operand = self.codegen_operand(&mut bx, operand);
|
||||
// FIXME: consider not copying constants through stack. (fixable by codegenning
|
||||
// constants into OperandValue::Ref, why don’t we do that yet if we don’t?)
|
||||
// FIXME: consider not copying constants through stack. (Fixable by codegen'ing
|
||||
// constants into `OperandValue::Ref`; why don’t we do that yet if we don’t?)
|
||||
cg_operand.val.store(&mut bx, dest);
|
||||
bx
|
||||
}
|
||||
|
@ -41,7 +41,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
|||
// The destination necessarily contains a fat pointer, so if
|
||||
// it's a scalar pair, it's a fat pointer or newtype thereof.
|
||||
if bx.cx().is_backend_scalar_pair(dest.layout) {
|
||||
// into-coerce of a thin pointer to a fat pointer - just
|
||||
// Into-coerce of a thin pointer to a fat pointer -- just
|
||||
// use the operand path.
|
||||
let (mut bx, temp) = self.codegen_rvalue_operand(bx, rvalue);
|
||||
temp.val.store(&mut bx, dest);
|
||||
|
@ -56,10 +56,10 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
|||
match operand.val {
|
||||
OperandValue::Pair(..) |
|
||||
OperandValue::Immediate(_) => {
|
||||
// unsize from an immediate structure. We don't
|
||||
// Unsize from an immediate structure. We don't
|
||||
// really need a temporary alloca here, but
|
||||
// avoiding it would require us to have
|
||||
// `coerce_unsized_into` use extractvalue to
|
||||
// `coerce_unsized_into` use `extractvalue` to
|
||||
// index into the struct, and this case isn't
|
||||
// important enough for it.
|
||||
debug!("codegen_rvalue: creating ugly alloca");
|
||||
|
@ -74,7 +74,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
|||
base::coerce_unsized_into(&mut bx, source, dest);
|
||||
}
|
||||
OperandValue::Ref(_, Some(_), _) => {
|
||||
bug!("unsized coercion on an unsized rvalue")
|
||||
bug!("unsized coercion on an unsized rvalue");
|
||||
}
|
||||
}
|
||||
bx
|
||||
|
@ -160,7 +160,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
|||
bx
|
||||
}
|
||||
|
||||
_ => bug!("unsized assignment other than Rvalue::Use"),
|
||||
_ => bug!("unsized assignment other than `Rvalue::Use`"),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -220,17 +220,16 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
|||
}
|
||||
}
|
||||
mir::CastKind::Pointer(PointerCast::UnsafeFnPointer) => {
|
||||
// this is a no-op at the LLVM level
|
||||
// This is a no-op at the LLVM level.
|
||||
operand.val
|
||||
}
|
||||
mir::CastKind::Pointer(PointerCast::Unsize) => {
|
||||
assert!(bx.cx().is_backend_scalar_pair(cast));
|
||||
match operand.val {
|
||||
OperandValue::Pair(lldata, llextra) => {
|
||||
// unsize from a fat pointer - this is a
|
||||
// unsize from a fat pointer -- this is a
|
||||
// "trait-object-to-supertrait" coercion, for
|
||||
// example,
|
||||
// &'a fmt::Debug+Send => &'a fmt::Debug,
|
||||
// example, `&'a fmt::Debug + Send => &'a fmt::Debug`.
|
||||
|
||||
// HACK(eddyb) have to bitcast pointers
|
||||
// until LLVM removes pointee types.
|
||||
|
@ -245,13 +244,13 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
|||
OperandValue::Pair(lldata, llextra)
|
||||
}
|
||||
OperandValue::Ref(..) => {
|
||||
bug!("by-ref operand {:?} in codegen_rvalue_operand",
|
||||
bug!("by-ref operand {:?} in `codegen_rvalue_operand`",
|
||||
operand);
|
||||
}
|
||||
}
|
||||
}
|
||||
mir::CastKind::Pointer(PointerCast::MutToConstPointer)
|
||||
| mir::CastKind::Misc if bx.cx().is_backend_scalar_pair(operand.layout) => {
|
||||
mir::CastKind::Pointer(PointerCast::MutToConstPointer) |
|
||||
mir::CastKind::Misc if bx.cx().is_backend_scalar_pair(operand.layout) => {
|
||||
if let OperandValue::Pair(data_ptr, meta) = operand.val {
|
||||
if bx.cx().is_backend_scalar_pair(cast) {
|
||||
let data_cast = bx.pointercast(data_ptr,
|
||||
|
@ -265,12 +264,12 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
|||
OperandValue::Immediate(llval)
|
||||
}
|
||||
} else {
|
||||
bug!("Unexpected non-Pair operand")
|
||||
bug!("unexpected non-pair operand");
|
||||
}
|
||||
}
|
||||
mir::CastKind::Pointer(PointerCast::MutToConstPointer)
|
||||
| mir::CastKind::Pointer(PointerCast::ArrayToPointer)
|
||||
| mir::CastKind::Misc => {
|
||||
mir::CastKind::Pointer(PointerCast::MutToConstPointer) |
|
||||
mir::CastKind::Pointer(PointerCast::ArrayToPointer) |
|
||||
mir::CastKind::Misc => {
|
||||
assert!(bx.cx().is_backend_immediate(cast));
|
||||
let ll_t_out = bx.cx().immediate_backend_type(cast);
|
||||
if operand.layout.abi.is_uninhabited() {
|
||||
|
|
|
@ -1,17 +1,18 @@
|
|||
use rustc::ty::layout::{HasTyCtxt, LayoutOf, TyLayout};
|
||||
use rustc::ty::Ty;
|
||||
|
||||
use super::write::WriteBackendMethods;
|
||||
use super::CodegenObject;
|
||||
|
||||
use rustc::ty::layout::{HasTyCtxt, LayoutOf, TyLayout};
|
||||
use rustc::ty::Ty;
|
||||
use rustc::middle::cstore::EncodedMetadata;
|
||||
use rustc::session::{Session, config};
|
||||
use rustc::ty::TyCtxt;
|
||||
use rustc_codegen_utils::codegen_backend::CodegenBackend;
|
||||
use std::sync::Arc;
|
||||
use std::sync::mpsc;
|
||||
use syntax::expand::allocator::AllocatorKind;
|
||||
use syntax_pos::symbol::Symbol;
|
||||
|
||||
use std::sync::Arc;
|
||||
use std::sync::mpsc;
|
||||
|
||||
pub trait BackendTypes {
|
||||
type Value: CodegenObject;
|
||||
type Function: CodegenObject;
|
||||
|
|
|
@ -4,14 +4,17 @@ use super::debuginfo::DebugInfoBuilderMethods;
|
|||
use super::intrinsic::IntrinsicCallMethods;
|
||||
use super::type_::ArgAbiMethods;
|
||||
use super::{HasCodegen, StaticBuilderMethods};
|
||||
|
||||
use crate::common::{AtomicOrdering, AtomicRmwBinOp, IntPredicate, RealPredicate,
|
||||
SynchronizationScope};
|
||||
use crate::mir::operand::OperandRef;
|
||||
use crate::mir::place::PlaceRef;
|
||||
use crate::MemFlags;
|
||||
|
||||
use rustc::ty::Ty;
|
||||
use rustc::ty::layout::{Align, Size, HasParamEnv};
|
||||
use rustc_target::spec::{HasTargetSpec};
|
||||
use rustc_target::spec::HasTargetSpec;
|
||||
|
||||
use std::ops::Range;
|
||||
use std::iter::TrustedLen;
|
||||
|
||||
|
|
|
@ -41,9 +41,9 @@ pub use self::type_::{
|
|||
ArgAbiMethods, BaseTypeMethods, DerivedTypeMethods, LayoutTypeMethods, TypeMethods,
|
||||
};
|
||||
pub use self::write::{ModuleBufferMethods, ThinBufferMethods, WriteBackendMethods};
|
||||
use rustc::ty::layout::{HasParamEnv, HasTyCtxt};
|
||||
use rustc_target::spec::{HasTargetSpec};
|
||||
|
||||
use rustc::ty::layout::{HasParamEnv, HasTyCtxt};
|
||||
use rustc_target::spec::HasTargetSpec;
|
||||
|
||||
use std::fmt;
|
||||
|
||||
|
|
|
@ -224,7 +224,7 @@ impl Diagnostic {
|
|||
}));
|
||||
msg.push((format!("`{}", found_extra), Style::NoStyle));
|
||||
|
||||
// For now, just attach these as notes
|
||||
// For now, just attach these as notes.
|
||||
self.highlighted_note(msg);
|
||||
self
|
||||
}
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
use crate::hair::{self, *};
|
||||
use crate::hair::cx::Cx;
|
||||
use crate::hair::cx::to_ref::ToRef;
|
||||
|
||||
use rustc::middle::region;
|
||||
use rustc::hir;
|
||||
use rustc::ty;
|
||||
|
|
|
@ -136,7 +136,7 @@ impl<Tag> Operand<Tag> {
|
|||
|
||||
#[derive(Copy, Clone, Debug, PartialEq)]
|
||||
pub struct OpTy<'tcx, Tag=()> {
|
||||
op: Operand<Tag>, // Keep this private, it helps enforce invariants
|
||||
op: Operand<Tag>, // Keep this private; it helps enforce invariants.
|
||||
pub layout: TyLayout<'tcx>,
|
||||
}
|
||||
|
||||
|
@ -203,7 +203,7 @@ pub(super) fn from_known_layout<'tcx>(
|
|||
if cfg!(debug_assertions) {
|
||||
let layout2 = compute()?;
|
||||
assert_eq!(layout.details, layout2.details,
|
||||
"Mismatch in layout of supposedly equal-layout types {:?} and {:?}",
|
||||
"mismatch in layout of supposedly equal-layout types {:?} and {:?}",
|
||||
layout.ty, layout2.ty);
|
||||
}
|
||||
Ok(layout)
|
||||
|
|
|
@ -48,7 +48,7 @@ pub enum Place<Tag=(), Id=AllocId> {
|
|||
|
||||
#[derive(Copy, Clone, Debug)]
|
||||
pub struct PlaceTy<'tcx, Tag=()> {
|
||||
place: Place<Tag>, // Keep this private, it helps enforce invariants
|
||||
place: Place<Tag>, // Keep this private; it helps enforce invariants.
|
||||
pub layout: TyLayout<'tcx>,
|
||||
}
|
||||
|
||||
|
|
|
@ -1,14 +1,14 @@
|
|||
use super::{InterpCx, Machine, MemoryKind, FnVal};
|
||||
|
||||
use rustc::ty::{self, Ty, Instance, TypeFoldable};
|
||||
use rustc::ty::layout::{Size, Align, LayoutOf, HasDataLayout};
|
||||
use rustc::mir::interpret::{Scalar, Pointer, InterpResult, PointerArithmetic,};
|
||||
|
||||
use super::{InterpCx, Machine, MemoryKind, FnVal};
|
||||
|
||||
impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
/// Creates a dynamic vtable for the given type and vtable origin. This is used only for
|
||||
/// objects.
|
||||
///
|
||||
/// The `trait_ref` encodes the erased self type. Hence if we are
|
||||
/// The `trait_ref` encodes the erased self type. Hence, if we are
|
||||
/// making an object `Foo<Trait>` from a value of type `Foo<T>`, then
|
||||
/// `trait_ref` would map `T: Trait`.
|
||||
pub fn get_vtable(
|
||||
|
@ -51,7 +51,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
|||
let ptr_align = self.tcx.data_layout.pointer_align.abi;
|
||||
// /////////////////////////////////////////////////////////////////////////////////////////
|
||||
// If you touch this code, be sure to also make the corresponding changes to
|
||||
// `get_vtable` in rust_codegen_llvm/meth.rs
|
||||
// `get_vtable` in `rust_codegen_llvm/meth.rs`.
|
||||
// /////////////////////////////////////////////////////////////////////////////////////////
|
||||
let vtable = self.memory.allocate(
|
||||
ptr_size * (3 + methods.len() as u64),
|
||||
|
@ -97,16 +97,16 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
|||
Ok(vtable)
|
||||
}
|
||||
|
||||
/// Resolve the function at the specified slot in the provided
|
||||
/// Resolves the function at the specified slot in the provided
|
||||
/// vtable. An index of '0' corresponds to the first method
|
||||
/// declared in the trait of the provided vtable
|
||||
/// declared in the trait of the provided vtable.
|
||||
pub fn get_vtable_slot(
|
||||
&self,
|
||||
vtable: Scalar<M::PointerTag>,
|
||||
idx: usize
|
||||
) -> InterpResult<'tcx, FnVal<'tcx, M::ExtraFnVal>> {
|
||||
let ptr_size = self.pointer_size();
|
||||
// Skip over the 'drop_ptr', 'size', and 'align' fields
|
||||
// Skip over the 'drop_ptr', 'size', and 'align' fields.
|
||||
let vtable_slot = vtable.ptr_offset(ptr_size * (idx as u64 + 3), self)?;
|
||||
let vtable_slot = self.memory.check_ptr_access(
|
||||
vtable_slot,
|
||||
|
@ -118,12 +118,12 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
|||
Ok(self.memory.get_fn(fn_ptr)?)
|
||||
}
|
||||
|
||||
/// Returns the drop fn instance as well as the actual dynamic type
|
||||
/// Returns the drop fn instance as well as the actual dynamic type.
|
||||
pub fn read_drop_type_from_vtable(
|
||||
&self,
|
||||
vtable: Scalar<M::PointerTag>,
|
||||
) -> InterpResult<'tcx, (ty::Instance<'tcx>, Ty<'tcx>)> {
|
||||
// we don't care about the pointee type, we just want a pointer
|
||||
// We don't care about the pointee type; we just want a pointer.
|
||||
let vtable = self.memory.check_ptr_access(
|
||||
vtable,
|
||||
self.tcx.data_layout.pointer_size,
|
||||
|
@ -149,7 +149,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
|||
vtable: Scalar<M::PointerTag>,
|
||||
) -> InterpResult<'tcx, (Size, Align)> {
|
||||
let pointer_size = self.pointer_size();
|
||||
// We check for size = 3*ptr_size, that covers the drop fn (unused here),
|
||||
// We check for `size = 3 * ptr_size`, which covers the drop fn (unused here),
|
||||
// the size, and the align (which we read below).
|
||||
let vtable = self.memory.check_ptr_access(
|
||||
vtable,
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
//! Mono Item Collection
|
||||
//! ===========================
|
||||
//! ====================
|
||||
//!
|
||||
//! This module is responsible for discovering all items that will contribute to
|
||||
//! to code generation of the crate. The important part here is that it not only
|
||||
|
@ -174,9 +174,10 @@
|
|||
//! this is not implemented however: a mono item will be produced
|
||||
//! regardless of whether it is actually needed or not.
|
||||
|
||||
use crate::monomorphize;
|
||||
|
||||
use rustc::hir::{self, CodegenFnAttrFlags};
|
||||
use rustc::hir::itemlikevisit::ItemLikeVisitor;
|
||||
|
||||
use rustc::hir::def_id::{DefId, LOCAL_CRATE};
|
||||
use rustc::mir::interpret::{AllocId, ConstValue};
|
||||
use rustc::middle::lang_items::{ExchangeMallocFnLangItem, StartFnLangItem};
|
||||
|
@ -189,8 +190,6 @@ use rustc::mir::{self, Location, PlaceBase, Static, StaticKind};
|
|||
use rustc::mir::visit::Visitor as MirVisitor;
|
||||
use rustc::mir::mono::{MonoItem, InstantiationMode};
|
||||
use rustc::mir::interpret::{Scalar, GlobalId, GlobalAlloc, ErrorHandled};
|
||||
|
||||
use crate::monomorphize;
|
||||
use rustc::util::nodemap::{FxHashSet, FxHashMap, DefIdMap};
|
||||
use rustc::util::common::time;
|
||||
|
||||
|
@ -530,7 +529,6 @@ struct MirNeighborCollector<'a, 'tcx> {
|
|||
}
|
||||
|
||||
impl<'a, 'tcx> MirVisitor<'tcx> for MirNeighborCollector<'a, 'tcx> {
|
||||
|
||||
fn visit_rvalue(&mut self, rvalue: &mir::Rvalue<'tcx>, location: Location) {
|
||||
debug!("visiting rvalue {:?}", *rvalue);
|
||||
|
||||
|
@ -698,7 +696,7 @@ impl<'a, 'tcx> MirVisitor<'tcx> for MirNeighborCollector<'a, 'tcx> {
|
|||
}
|
||||
}
|
||||
PlaceBase::Local(_) => {
|
||||
// Locals have no relevance for collector
|
||||
// Locals have no relevance for collector.
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -752,7 +750,7 @@ fn visit_instance_use<'tcx>(
|
|||
ty::InstanceDef::ReifyShim(..) |
|
||||
ty::InstanceDef::Virtual(..) |
|
||||
ty::InstanceDef::DropGlue(_, None) => {
|
||||
// don't need to emit shim if we are calling directly.
|
||||
// Don't need to emit shim if we are calling directly.
|
||||
if !is_direct_call {
|
||||
output.push(create_fn_mono_item(instance));
|
||||
}
|
||||
|
@ -769,8 +767,8 @@ fn visit_instance_use<'tcx>(
|
|||
}
|
||||
}
|
||||
|
||||
// Returns true if we should codegen an instance in the local crate.
|
||||
// Returns false if we can just link to the upstream crate and therefore don't
|
||||
// Returns `true` if we should codegen an instance in the local crate.
|
||||
// Returns `false` if we can just link to the upstream crate and therefore don't
|
||||
// need a mono item.
|
||||
fn should_monomorphize_locally<'tcx>(tcx: TyCtxt<'tcx>, instance: &Instance<'tcx>) -> bool {
|
||||
let def_id = match instance.def {
|
||||
|
@ -786,24 +784,24 @@ fn should_monomorphize_locally<'tcx>(tcx: TyCtxt<'tcx>, instance: &Instance<'tcx
|
|||
};
|
||||
|
||||
if tcx.is_foreign_item(def_id) {
|
||||
// We can always link to foreign items
|
||||
// We can always link to foreign items.
|
||||
return false;
|
||||
}
|
||||
|
||||
if def_id.is_local() {
|
||||
// local items cannot be referred to locally without monomorphizing them locally
|
||||
// Local items cannot be referred to locally without monomorphizing them locally.
|
||||
return true;
|
||||
}
|
||||
|
||||
if tcx.is_reachable_non_generic(def_id) ||
|
||||
is_available_upstream_generic(tcx, def_id, instance.substs) {
|
||||
// We can link to the item in question, no instance needed
|
||||
// in this crate
|
||||
// in this crate.
|
||||
return false;
|
||||
}
|
||||
|
||||
if !tcx.is_mir_available(def_id) {
|
||||
bug!("Cannot create local mono-item for {:?}", def_id)
|
||||
bug!("cannot create local mono-item for {:?}", def_id)
|
||||
}
|
||||
return true;
|
||||
|
||||
|
@ -823,7 +821,7 @@ fn should_monomorphize_locally<'tcx>(tcx: TyCtxt<'tcx>, instance: &Instance<'tcx
|
|||
|
||||
// If this instance has non-erasable parameters, it cannot be a shared
|
||||
// monomorphization. Non-generic instances are already handled above
|
||||
// by `is_reachable_non_generic()`
|
||||
// by `is_reachable_non_generic()`.
|
||||
if substs.non_erasable_generics().next().is_none() {
|
||||
return false
|
||||
}
|
||||
|
@ -836,7 +834,7 @@ fn should_monomorphize_locally<'tcx>(tcx: TyCtxt<'tcx>, instance: &Instance<'tcx
|
|||
}
|
||||
}
|
||||
|
||||
/// For given pair of source and target type that occur in an unsizing coercion,
|
||||
/// For a given pair of source and target type that occur in an unsizing coercion,
|
||||
/// this function finds the pair of types that determines the vtable linking
|
||||
/// them.
|
||||
///
|
||||
|
@ -930,10 +928,9 @@ fn find_vtable_types_for_unsizing<'tcx>(
|
|||
source_fields.len() == target_fields.len());
|
||||
|
||||
find_vtable_types_for_unsizing(tcx,
|
||||
source_fields[coerce_index].ty(tcx,
|
||||
source_substs),
|
||||
target_fields[coerce_index].ty(tcx,
|
||||
target_substs))
|
||||
source_fields[coerce_index].ty(tcx, source_substs),
|
||||
target_fields[coerce_index].ty(tcx, target_substs)
|
||||
)
|
||||
}
|
||||
_ => bug!("find_vtable_types_for_unsizing: invalid coercion {:?} -> {:?}",
|
||||
source_ty,
|
||||
|
@ -975,7 +972,7 @@ fn create_mono_items_for_vtable_methods<'tcx>(
|
|||
output.extend(methods);
|
||||
}
|
||||
|
||||
// Also add the destructor
|
||||
// Also add the destructor.
|
||||
visit_drop_use(tcx, impl_ty, false, output);
|
||||
}
|
||||
}
|
||||
|
@ -1002,7 +999,7 @@ impl ItemLikeVisitor<'v> for RootCollector<'_, 'v> {
|
|||
hir::ItemKind::TraitAlias(..) |
|
||||
hir::ItemKind::OpaqueTy(..) |
|
||||
hir::ItemKind::Mod(..) => {
|
||||
// Nothing to do, just keep recursing...
|
||||
// Nothing to do, just keep recursing.
|
||||
}
|
||||
|
||||
hir::ItemKind::Impl(..) => {
|
||||
|
@ -1075,7 +1072,7 @@ impl ItemLikeVisitor<'v> for RootCollector<'_, 'v> {
|
|||
let def_id = self.tcx.hir().local_def_id(ii.hir_id);
|
||||
self.push_if_root(def_id);
|
||||
}
|
||||
_ => { /* Nothing to do here */ }
|
||||
_ => { /* nothing to do here */ }
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1095,7 +1092,7 @@ impl RootCollector<'_, 'v> {
|
|||
}
|
||||
}
|
||||
|
||||
/// If `def_id` represents a root, then push it onto the list of
|
||||
/// If `def_id` represents a root, pushes it onto the list of
|
||||
/// outputs. (Note that all roots must be monomorphic.)
|
||||
fn push_if_root(&mut self, def_id: DefId) {
|
||||
if self.is_root(def_id) {
|
||||
|
@ -1217,7 +1214,7 @@ fn create_mono_items_for_default_impls<'tcx>(
|
|||
}
|
||||
}
|
||||
|
||||
/// Scan the miri alloc in order to find function calls, closures, and drop-glue
|
||||
/// Scans the miri alloc in order to find function calls, closures, and drop-glue.
|
||||
fn collect_miri<'tcx>(tcx: TyCtxt<'tcx>, alloc_id: AllocId, output: &mut Vec<MonoItem<'tcx>>) {
|
||||
let alloc_kind = tcx.alloc_map.lock().get(alloc_id);
|
||||
match alloc_kind {
|
||||
|
@ -1244,7 +1241,7 @@ fn collect_miri<'tcx>(tcx: TyCtxt<'tcx>, alloc_id: AllocId, output: &mut Vec<Mon
|
|||
}
|
||||
}
|
||||
|
||||
/// Scan the MIR in order to find function calls, closures, and drop-glue
|
||||
/// Scans the MIR in order to find function calls, closures, and drop-glue.
|
||||
fn collect_neighbours<'tcx>(
|
||||
tcx: TyCtxt<'tcx>,
|
||||
instance: Instance<'tcx>,
|
||||
|
|
|
@ -22,7 +22,7 @@ pub fn custom_coerce_unsize_info<'tcx>(
|
|||
tcx.coerce_unsized_info(impl_def_id).custom_kind.unwrap()
|
||||
}
|
||||
vtable => {
|
||||
bug!("invalid CoerceUnsized vtable: {:?}", vtable);
|
||||
bug!("invalid `CoerceUnsized` vtable: {:?}", vtable);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -3,6 +3,7 @@
|
|||
// substitutions.
|
||||
|
||||
use crate::check::FnCtxt;
|
||||
|
||||
use rustc::hir;
|
||||
use rustc::hir::def_id::{DefId, DefIndex};
|
||||
use rustc::hir::intravisit::{self, NestedVisitorMap, Visitor};
|
||||
|
@ -12,10 +13,11 @@ use rustc::ty::fold::{TypeFoldable, TypeFolder};
|
|||
use rustc::ty::{self, Ty, TyCtxt};
|
||||
use rustc::util::nodemap::DefIdSet;
|
||||
use rustc_data_structures::sync::Lrc;
|
||||
use std::mem;
|
||||
use syntax::symbol::sym;
|
||||
use syntax_pos::Span;
|
||||
|
||||
use std::mem;
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
// Entry point
|
||||
|
||||
|
@ -481,8 +483,10 @@ impl<'cx, 'tcx> WritebackCx<'cx, 'tcx> {
|
|||
if let ty::Opaque(defin_ty_def_id, _substs) = definition_ty.kind {
|
||||
if let hir::OpaqueTyOrigin::TypeAlias = opaque_defn.origin {
|
||||
if def_id == defin_ty_def_id {
|
||||
debug!("Skipping adding concrete definition for opaque type {:?} {:?}",
|
||||
opaque_defn, defin_ty_def_id);
|
||||
debug!(
|
||||
"skipping adding concrete definition for opaque type {:?} {:?}",
|
||||
opaque_defn, defin_ty_def_id
|
||||
);
|
||||
skip_add = true;
|
||||
}
|
||||
}
|
||||
|
@ -507,7 +511,7 @@ impl<'cx, 'tcx> WritebackCx<'cx, 'tcx> {
|
|||
if old.concrete_type != definition_ty || old.substs != opaque_defn.substs {
|
||||
span_bug!(
|
||||
span,
|
||||
"visit_opaque_types tried to write different types for the same \
|
||||
"`visit_opaque_types` tried to write different types for the same \
|
||||
opaque type: {:?}, {:?}, {:?}, {:?}",
|
||||
def_id,
|
||||
definition_ty,
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
// Check that we can manually implement an object
|
||||
// unsafe trait for its trait object
|
||||
//
|
||||
// Check that we can manually implement an object-unsafe trait for its trait object.
|
||||
|
||||
// run-pass
|
||||
|
||||
#![feature(object_safe_for_dispatch)]
|
||||
|
@ -46,7 +45,7 @@ fn main() {
|
|||
|
||||
let mut res = String::new();
|
||||
|
||||
// Directly call static
|
||||
// Directly call static.
|
||||
res.push(Struct::stat()); // "A"
|
||||
res.push(<dyn Bad>::stat()); // "AC"
|
||||
|
||||
|
@ -55,15 +54,13 @@ fn main() {
|
|||
// These look similar enough...
|
||||
let bad = unsafe { std::mem::transmute::<&dyn Good, &dyn Bad>(good) };
|
||||
|
||||
// Call virtual
|
||||
// Call virtual.
|
||||
res.push(s.virt()); // "ACB"
|
||||
res.push(bad.virt()); // "ACBD"
|
||||
|
||||
// Indirectly call static
|
||||
// Indirectly call static.
|
||||
res.push(s.indirect()); // "ACBDA"
|
||||
res.push(bad.indirect()); // "ACBDAC"
|
||||
|
||||
if &res != "ACBDAC" {
|
||||
panic!();
|
||||
}
|
||||
assert_eq!(&res, "ACBDAC");
|
||||
}
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
// run-pass
|
||||
// Check that trait-objects without a principal codegen properly.
|
||||
// Check that trait objects without a principal codegen properly.
|
||||
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
use std::mem;
|
||||
|
|
Loading…
Add table
Reference in a new issue