Auto merge of #107738 - matthiaskrgr:rollup-o18lzi8, r=matthiaskrgr

Rollup of 9 pull requests

Successful merges:

 - #106477 (Refine error spans for "The trait bound `T: Trait` is not satisfied" when passing literal structs/tuples)
 - #107596 (Add nicer output to PGO build timer)
 - #107692 (Sort Generator `print-type-sizes` according to their yield points)
 - #107714 (Clarify wording on f64::round() and f32::round())
 - #107720 (end entry paragraph with a period (.))
 - #107724 (remove unused rustc_* imports)
 - #107725 (Turn MarkdownWithToc into a struct with named fields)
 - #107731 (interpret: move discriminant reading and writing to separate file)
 - #107735 (Add mailmap for commits made by xes@meta.com)

Failed merges:

r? `@ghost`
`@rustbot` modify labels: rollup
This commit is contained in:
bors 2023-02-06 20:20:53 +00:00
commit e1eaa2d5d4
46 changed files with 1536 additions and 425 deletions

View file

@ -166,6 +166,7 @@ Eduard-Mihai Burtescu <edy.burt@gmail.com>
Eduardo Bautista <me@eduardobautista.com> <=>
Eduardo Bautista <me@eduardobautista.com> <mail@eduardobautista.com>
Eduardo Broto <ebroto@tutanota.com>
Edward Shen <code@eddie.sh> <xes@meta.com>
Elliott Slaughter <elliottslaughter@gmail.com> <eslaughter@mozilla.com>
Elly Fong-Jones <elly@leptoquark.net>
Eric Holk <eric.holk@gmail.com> <eholk@cs.indiana.edu>

View file

@ -3689,7 +3689,6 @@ dependencies = [
name = "rustc_ast_lowering"
version = "0.0.0"
dependencies = [
"rustc_arena",
"rustc_ast",
"rustc_ast_pretty",
"rustc_data_structures",
@ -3698,7 +3697,6 @@ dependencies = [
"rustc_index",
"rustc_macros",
"rustc_middle",
"rustc_query_system",
"rustc_session",
"rustc_span",
"rustc_target",
@ -3731,7 +3729,6 @@ name = "rustc_ast_pretty"
version = "0.0.0"
dependencies = [
"rustc_ast",
"rustc_parse_format",
"rustc_span",
]
@ -3838,7 +3835,6 @@ dependencies = [
"rustc_metadata",
"rustc_middle",
"rustc_query_system",
"rustc_serialize",
"rustc_session",
"rustc_span",
"rustc_symbol_mangling",
@ -3864,7 +3860,6 @@ dependencies = [
"rustc_arena",
"rustc_ast",
"rustc_attr",
"rustc_const_eval",
"rustc_data_structures",
"rustc_errors",
"rustc_fs_util",
@ -3905,7 +3900,6 @@ dependencies = [
"rustc_macros",
"rustc_middle",
"rustc_mir_dataflow",
"rustc_query_system",
"rustc_session",
"rustc_span",
"rustc_target",
@ -4100,15 +4094,12 @@ dependencies = [
"rustc_data_structures",
"rustc_errors",
"rustc_feature",
"rustc_graphviz",
"rustc_hir",
"rustc_hir_pretty",
"rustc_index",
"rustc_infer",
"rustc_lint",
"rustc_macros",
"rustc_middle",
"rustc_serialize",
"rustc_session",
"rustc_span",
"rustc_target",
@ -4195,7 +4186,6 @@ dependencies = [
"rustc_macros",
"rustc_middle",
"rustc_serialize",
"rustc_session",
"rustc_span",
"rustc_target",
"smallvec",
@ -4238,7 +4228,6 @@ dependencies = [
"rustc_privacy",
"rustc_query_impl",
"rustc_resolve",
"rustc_serialize",
"rustc_session",
"rustc_span",
"rustc_symbol_mangling",
@ -4404,7 +4393,6 @@ dependencies = [
"rustc_apfloat",
"rustc_arena",
"rustc_ast",
"rustc_attr",
"rustc_data_structures",
"rustc_errors",
"rustc_hir",
@ -4436,7 +4424,6 @@ dependencies = [
"rustc_macros",
"rustc_middle",
"rustc_serialize",
"rustc_session",
"rustc_span",
"rustc_target",
"smallvec",
@ -4567,7 +4554,6 @@ dependencies = [
"rustc_middle",
"rustc_session",
"rustc_span",
"rustc_trait_selection",
"tracing",
]
@ -4588,7 +4574,6 @@ dependencies = [
"rustc_serialize",
"rustc_session",
"rustc_span",
"rustc_target",
"thin-vec",
"tracing",
]
@ -4789,7 +4774,6 @@ dependencies = [
"rustc_hir",
"rustc_index",
"rustc_infer",
"rustc_lint_defs",
"rustc_macros",
"rustc_middle",
"rustc_parse_format",
@ -4811,7 +4795,6 @@ dependencies = [
"chalk-ir",
"chalk-solve",
"rustc_ast",
"rustc_attr",
"rustc_data_structures",
"rustc_hir",
"rustc_index",

View file

@ -7,7 +7,6 @@ edition = "2021"
doctest = false
[dependencies]
rustc_arena = { path = "../rustc_arena" }
rustc_ast = { path = "../rustc_ast" }
rustc_ast_pretty = { path = "../rustc_ast_pretty" }
rustc_data_structures = { path = "../rustc_data_structures" }
@ -16,7 +15,6 @@ rustc_hir = { path = "../rustc_hir" }
rustc_index = { path = "../rustc_index" }
rustc_middle = { path = "../rustc_middle" }
rustc_macros = { path = "../rustc_macros" }
rustc_query_system = { path = "../rustc_query_system" }
rustc_session = { path = "../rustc_session" }
rustc_span = { path = "../rustc_span" }
rustc_target = { path = "../rustc_target" }

View file

@ -7,5 +7,4 @@ edition = "2021"
[dependencies]
rustc_ast = { path = "../rustc_ast" }
rustc_parse_format = { path = "../rustc_parse_format" }
rustc_span = { path = "../rustc_span" }

View file

@ -30,7 +30,6 @@ rustc_macros = { path = "../rustc_macros" }
rustc_metadata = { path = "../rustc_metadata" }
rustc_query_system = { path = "../rustc_query_system" }
rustc_session = { path = "../rustc_session" }
rustc_serialize = { path = "../rustc_serialize" }
rustc_symbol_mangling = { path = "../rustc_symbol_mangling" }
rustc_target = { path = "../rustc_target" }
smallvec = { version = "1.8.1", features = ["union", "may_dangle"] }

View file

@ -41,7 +41,6 @@ rustc_metadata = { path = "../rustc_metadata" }
rustc_query_system = { path = "../rustc_query_system" }
rustc_target = { path = "../rustc_target" }
rustc_session = { path = "../rustc_session" }
rustc_const_eval = { path = "../rustc_const_eval" }
[dependencies.object]
version = "0.30.1"

View file

@ -19,7 +19,6 @@ rustc_infer = { path = "../rustc_infer" }
rustc_macros = { path = "../rustc_macros" }
rustc_middle = { path = "../rustc_middle" }
rustc_mir_dataflow = { path = "../rustc_mir_dataflow" }
rustc_query_system = { path = "../rustc_query_system" }
rustc_session = { path = "../rustc_session" }
rustc_target = { path = "../rustc_target" }
rustc_trait_selection = { path = "../rustc_trait_selection" }

View file

@ -0,0 +1,238 @@
//! Functions for reading and writing discriminants of multi-variant layouts (enums and generators).
use rustc_middle::ty::layout::{LayoutOf, PrimitiveExt};
use rustc_middle::{mir, ty};
use rustc_target::abi::{self, TagEncoding};
use rustc_target::abi::{VariantIdx, Variants};
use super::{ImmTy, InterpCx, InterpResult, Machine, OpTy, PlaceTy, Scalar};
impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
/// Writes the discriminant of the given variant.
#[instrument(skip(self), level = "trace")]
pub fn write_discriminant(
&mut self,
variant_index: VariantIdx,
dest: &PlaceTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx> {
// Layout computation excludes uninhabited variants from consideration
// therefore there's no way to represent those variants in the given layout.
// Essentially, uninhabited variants do not have a tag that corresponds to their
// discriminant, so we cannot do anything here.
// When evaluating we will always error before even getting here, but ConstProp 'executes'
// dead code, so we cannot ICE here.
if dest.layout.for_variant(self, variant_index).abi.is_uninhabited() {
throw_ub!(UninhabitedEnumVariantWritten)
}
match dest.layout.variants {
abi::Variants::Single { index } => {
assert_eq!(index, variant_index);
}
abi::Variants::Multiple {
tag_encoding: TagEncoding::Direct,
tag: tag_layout,
tag_field,
..
} => {
// No need to validate that the discriminant here because the
// `TyAndLayout::for_variant()` call earlier already checks the variant is valid.
let discr_val =
dest.layout.ty.discriminant_for_variant(*self.tcx, variant_index).unwrap().val;
// raw discriminants for enums are isize or bigger during
// their computation, but the in-memory tag is the smallest possible
// representation
let size = tag_layout.size(self);
let tag_val = size.truncate(discr_val);
let tag_dest = self.place_field(dest, tag_field)?;
self.write_scalar(Scalar::from_uint(tag_val, size), &tag_dest)?;
}
abi::Variants::Multiple {
tag_encoding:
TagEncoding::Niche { untagged_variant, ref niche_variants, niche_start },
tag: tag_layout,
tag_field,
..
} => {
// No need to validate that the discriminant here because the
// `TyAndLayout::for_variant()` call earlier already checks the variant is valid.
if variant_index != untagged_variant {
let variants_start = niche_variants.start().as_u32();
let variant_index_relative = variant_index
.as_u32()
.checked_sub(variants_start)
.expect("overflow computing relative variant idx");
// We need to use machine arithmetic when taking into account `niche_start`:
// tag_val = variant_index_relative + niche_start_val
let tag_layout = self.layout_of(tag_layout.primitive().to_int_ty(*self.tcx))?;
let niche_start_val = ImmTy::from_uint(niche_start, tag_layout);
let variant_index_relative_val =
ImmTy::from_uint(variant_index_relative, tag_layout);
let tag_val = self.binary_op(
mir::BinOp::Add,
&variant_index_relative_val,
&niche_start_val,
)?;
// Write result.
let niche_dest = self.place_field(dest, tag_field)?;
self.write_immediate(*tag_val, &niche_dest)?;
}
}
}
Ok(())
}
/// Read discriminant, return the runtime value as well as the variant index.
/// Can also legally be called on non-enums (e.g. through the discriminant_value intrinsic)!
#[instrument(skip(self), level = "trace")]
pub fn read_discriminant(
&self,
op: &OpTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx, (Scalar<M::Provenance>, VariantIdx)> {
trace!("read_discriminant_value {:#?}", op.layout);
// Get type and layout of the discriminant.
let discr_layout = self.layout_of(op.layout.ty.discriminant_ty(*self.tcx))?;
trace!("discriminant type: {:?}", discr_layout.ty);
// We use "discriminant" to refer to the value associated with a particular enum variant.
// This is not to be confused with its "variant index", which is just determining its position in the
// declared list of variants -- they can differ with explicitly assigned discriminants.
// We use "tag" to refer to how the discriminant is encoded in memory, which can be either
// straight-forward (`TagEncoding::Direct`) or with a niche (`TagEncoding::Niche`).
let (tag_scalar_layout, tag_encoding, tag_field) = match op.layout.variants {
Variants::Single { index } => {
let discr = match op.layout.ty.discriminant_for_variant(*self.tcx, index) {
Some(discr) => {
// This type actually has discriminants.
assert_eq!(discr.ty, discr_layout.ty);
Scalar::from_uint(discr.val, discr_layout.size)
}
None => {
// On a type without actual discriminants, variant is 0.
assert_eq!(index.as_u32(), 0);
Scalar::from_uint(index.as_u32(), discr_layout.size)
}
};
return Ok((discr, index));
}
Variants::Multiple { tag, ref tag_encoding, tag_field, .. } => {
(tag, tag_encoding, tag_field)
}
};
// There are *three* layouts that come into play here:
// - The discriminant has a type for typechecking. This is `discr_layout`, and is used for
// the `Scalar` we return.
// - The tag (encoded discriminant) has layout `tag_layout`. This is always an integer type,
// and used to interpret the value we read from the tag field.
// For the return value, a cast to `discr_layout` is performed.
// - The field storing the tag has a layout, which is very similar to `tag_layout` but
// may be a pointer. This is `tag_val.layout`; we just use it for sanity checks.
// Get layout for tag.
let tag_layout = self.layout_of(tag_scalar_layout.primitive().to_int_ty(*self.tcx))?;
// Read tag and sanity-check `tag_layout`.
let tag_val = self.read_immediate(&self.operand_field(op, tag_field)?)?;
assert_eq!(tag_layout.size, tag_val.layout.size);
assert_eq!(tag_layout.abi.is_signed(), tag_val.layout.abi.is_signed());
trace!("tag value: {}", tag_val);
// Figure out which discriminant and variant this corresponds to.
Ok(match *tag_encoding {
TagEncoding::Direct => {
let scalar = tag_val.to_scalar();
// Generate a specific error if `tag_val` is not an integer.
// (`tag_bits` itself is only used for error messages below.)
let tag_bits = scalar
.try_to_int()
.map_err(|dbg_val| err_ub!(InvalidTag(dbg_val)))?
.assert_bits(tag_layout.size);
// Cast bits from tag layout to discriminant layout.
// After the checks we did above, this cannot fail, as
// discriminants are int-like.
let discr_val =
self.cast_from_int_like(scalar, tag_val.layout, discr_layout.ty).unwrap();
let discr_bits = discr_val.assert_bits(discr_layout.size);
// Convert discriminant to variant index, and catch invalid discriminants.
let index = match *op.layout.ty.kind() {
ty::Adt(adt, _) => {
adt.discriminants(*self.tcx).find(|(_, var)| var.val == discr_bits)
}
ty::Generator(def_id, substs, _) => {
let substs = substs.as_generator();
substs
.discriminants(def_id, *self.tcx)
.find(|(_, var)| var.val == discr_bits)
}
_ => span_bug!(self.cur_span(), "tagged layout for non-adt non-generator"),
}
.ok_or_else(|| err_ub!(InvalidTag(Scalar::from_uint(tag_bits, tag_layout.size))))?;
// Return the cast value, and the index.
(discr_val, index.0)
}
TagEncoding::Niche { untagged_variant, ref niche_variants, niche_start } => {
let tag_val = tag_val.to_scalar();
// Compute the variant this niche value/"tag" corresponds to. With niche layout,
// discriminant (encoded in niche/tag) and variant index are the same.
let variants_start = niche_variants.start().as_u32();
let variants_end = niche_variants.end().as_u32();
let variant = match tag_val.try_to_int() {
Err(dbg_val) => {
// So this is a pointer then, and casting to an int failed.
// Can only happen during CTFE.
// The niche must be just 0, and the ptr not null, then we know this is
// okay. Everything else, we conservatively reject.
let ptr_valid = niche_start == 0
&& variants_start == variants_end
&& !self.scalar_may_be_null(tag_val)?;
if !ptr_valid {
throw_ub!(InvalidTag(dbg_val))
}
untagged_variant
}
Ok(tag_bits) => {
let tag_bits = tag_bits.assert_bits(tag_layout.size);
// We need to use machine arithmetic to get the relative variant idx:
// variant_index_relative = tag_val - niche_start_val
let tag_val = ImmTy::from_uint(tag_bits, tag_layout);
let niche_start_val = ImmTy::from_uint(niche_start, tag_layout);
let variant_index_relative_val =
self.binary_op(mir::BinOp::Sub, &tag_val, &niche_start_val)?;
let variant_index_relative =
variant_index_relative_val.to_scalar().assert_bits(tag_val.layout.size);
// Check if this is in the range that indicates an actual discriminant.
if variant_index_relative <= u128::from(variants_end - variants_start) {
let variant_index_relative = u32::try_from(variant_index_relative)
.expect("we checked that this fits into a u32");
// Then computing the absolute variant idx should not overflow any more.
let variant_index = variants_start
.checked_add(variant_index_relative)
.expect("overflow computing absolute variant idx");
let variants_len = op
.layout
.ty
.ty_adt_def()
.expect("tagged layout for non adt")
.variants()
.len();
assert!(usize::try_from(variant_index).unwrap() < variants_len);
VariantIdx::from_u32(variant_index)
} else {
untagged_variant
}
}
};
// Compute the size of the scalar we need to return.
// No need to cast, because the variant index directly serves as discriminant and is
// encoded in the tag.
(Scalar::from_uint(variant.as_u32(), discr_layout.size), variant)
}
})
}
}

View file

@ -1,6 +1,7 @@
//! An interpreter for MIR used in CTFE and by miri
mod cast;
mod discriminant;
mod eval_context;
mod intern;
mod intrinsics;

View file

@ -4,13 +4,12 @@
use either::{Either, Left, Right};
use rustc_hir::def::Namespace;
use rustc_middle::ty::layout::{LayoutOf, PrimitiveExt, TyAndLayout};
use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
use rustc_middle::ty::print::{FmtPrinter, PrettyPrinter};
use rustc_middle::ty::{ConstInt, Ty, ValTree};
use rustc_middle::{mir, ty};
use rustc_span::Span;
use rustc_target::abi::{self, Abi, Align, HasDataLayout, Size, TagEncoding};
use rustc_target::abi::{VariantIdx, Variants};
use rustc_target::abi::{self, Abi, Align, HasDataLayout, Size};
use super::{
alloc_range, from_known_layout, mir_assign_valid_types, AllocId, ConstValue, Frame, GlobalId,
@ -657,154 +656,6 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
};
Ok(OpTy { op, layout, align: Some(layout.align.abi) })
}
/// Read discriminant, return the runtime value as well as the variant index.
/// Can also legally be called on non-enums (e.g. through the discriminant_value intrinsic)!
pub fn read_discriminant(
&self,
op: &OpTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx, (Scalar<M::Provenance>, VariantIdx)> {
trace!("read_discriminant_value {:#?}", op.layout);
// Get type and layout of the discriminant.
let discr_layout = self.layout_of(op.layout.ty.discriminant_ty(*self.tcx))?;
trace!("discriminant type: {:?}", discr_layout.ty);
// We use "discriminant" to refer to the value associated with a particular enum variant.
// This is not to be confused with its "variant index", which is just determining its position in the
// declared list of variants -- they can differ with explicitly assigned discriminants.
// We use "tag" to refer to how the discriminant is encoded in memory, which can be either
// straight-forward (`TagEncoding::Direct`) or with a niche (`TagEncoding::Niche`).
let (tag_scalar_layout, tag_encoding, tag_field) = match op.layout.variants {
Variants::Single { index } => {
let discr = match op.layout.ty.discriminant_for_variant(*self.tcx, index) {
Some(discr) => {
// This type actually has discriminants.
assert_eq!(discr.ty, discr_layout.ty);
Scalar::from_uint(discr.val, discr_layout.size)
}
None => {
// On a type without actual discriminants, variant is 0.
assert_eq!(index.as_u32(), 0);
Scalar::from_uint(index.as_u32(), discr_layout.size)
}
};
return Ok((discr, index));
}
Variants::Multiple { tag, ref tag_encoding, tag_field, .. } => {
(tag, tag_encoding, tag_field)
}
};
// There are *three* layouts that come into play here:
// - The discriminant has a type for typechecking. This is `discr_layout`, and is used for
// the `Scalar` we return.
// - The tag (encoded discriminant) has layout `tag_layout`. This is always an integer type,
// and used to interpret the value we read from the tag field.
// For the return value, a cast to `discr_layout` is performed.
// - The field storing the tag has a layout, which is very similar to `tag_layout` but
// may be a pointer. This is `tag_val.layout`; we just use it for sanity checks.
// Get layout for tag.
let tag_layout = self.layout_of(tag_scalar_layout.primitive().to_int_ty(*self.tcx))?;
// Read tag and sanity-check `tag_layout`.
let tag_val = self.read_immediate(&self.operand_field(op, tag_field)?)?;
assert_eq!(tag_layout.size, tag_val.layout.size);
assert_eq!(tag_layout.abi.is_signed(), tag_val.layout.abi.is_signed());
trace!("tag value: {}", tag_val);
// Figure out which discriminant and variant this corresponds to.
Ok(match *tag_encoding {
TagEncoding::Direct => {
let scalar = tag_val.to_scalar();
// Generate a specific error if `tag_val` is not an integer.
// (`tag_bits` itself is only used for error messages below.)
let tag_bits = scalar
.try_to_int()
.map_err(|dbg_val| err_ub!(InvalidTag(dbg_val)))?
.assert_bits(tag_layout.size);
// Cast bits from tag layout to discriminant layout.
// After the checks we did above, this cannot fail, as
// discriminants are int-like.
let discr_val =
self.cast_from_int_like(scalar, tag_val.layout, discr_layout.ty).unwrap();
let discr_bits = discr_val.assert_bits(discr_layout.size);
// Convert discriminant to variant index, and catch invalid discriminants.
let index = match *op.layout.ty.kind() {
ty::Adt(adt, _) => {
adt.discriminants(*self.tcx).find(|(_, var)| var.val == discr_bits)
}
ty::Generator(def_id, substs, _) => {
let substs = substs.as_generator();
substs
.discriminants(def_id, *self.tcx)
.find(|(_, var)| var.val == discr_bits)
}
_ => span_bug!(self.cur_span(), "tagged layout for non-adt non-generator"),
}
.ok_or_else(|| err_ub!(InvalidTag(Scalar::from_uint(tag_bits, tag_layout.size))))?;
// Return the cast value, and the index.
(discr_val, index.0)
}
TagEncoding::Niche { untagged_variant, ref niche_variants, niche_start } => {
let tag_val = tag_val.to_scalar();
// Compute the variant this niche value/"tag" corresponds to. With niche layout,
// discriminant (encoded in niche/tag) and variant index are the same.
let variants_start = niche_variants.start().as_u32();
let variants_end = niche_variants.end().as_u32();
let variant = match tag_val.try_to_int() {
Err(dbg_val) => {
// So this is a pointer then, and casting to an int failed.
// Can only happen during CTFE.
// The niche must be just 0, and the ptr not null, then we know this is
// okay. Everything else, we conservatively reject.
let ptr_valid = niche_start == 0
&& variants_start == variants_end
&& !self.scalar_may_be_null(tag_val)?;
if !ptr_valid {
throw_ub!(InvalidTag(dbg_val))
}
untagged_variant
}
Ok(tag_bits) => {
let tag_bits = tag_bits.assert_bits(tag_layout.size);
// We need to use machine arithmetic to get the relative variant idx:
// variant_index_relative = tag_val - niche_start_val
let tag_val = ImmTy::from_uint(tag_bits, tag_layout);
let niche_start_val = ImmTy::from_uint(niche_start, tag_layout);
let variant_index_relative_val =
self.binary_op(mir::BinOp::Sub, &tag_val, &niche_start_val)?;
let variant_index_relative =
variant_index_relative_val.to_scalar().assert_bits(tag_val.layout.size);
// Check if this is in the range that indicates an actual discriminant.
if variant_index_relative <= u128::from(variants_end - variants_start) {
let variant_index_relative = u32::try_from(variant_index_relative)
.expect("we checked that this fits into a u32");
// Then computing the absolute variant idx should not overflow any more.
let variant_index = variants_start
.checked_add(variant_index_relative)
.expect("overflow computing absolute variant idx");
let variants_len = op
.layout
.ty
.ty_adt_def()
.expect("tagged layout for non adt")
.variants()
.len();
assert!(usize::try_from(variant_index).unwrap() < variants_len);
VariantIdx::from_u32(variant_index)
} else {
untagged_variant
}
}
};
// Compute the size of the scalar we need to return.
// No need to cast, because the variant index directly serves as discriminant and is
// encoded in the tag.
(Scalar::from_uint(variant.as_u32(), discr_layout.size), variant)
}
})
}
}
// Some nodes are used a lot. Make sure they don't unintentionally get bigger.

View file

@ -7,8 +7,8 @@ use either::{Either, Left, Right};
use rustc_ast::Mutability;
use rustc_middle::mir;
use rustc_middle::ty;
use rustc_middle::ty::layout::{LayoutOf, PrimitiveExt, TyAndLayout};
use rustc_target::abi::{self, Abi, Align, HasDataLayout, Size, TagEncoding, VariantIdx};
use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
use rustc_target::abi::{self, Abi, Align, HasDataLayout, Size, VariantIdx};
use super::{
alloc_range, mir_assign_valid_types, AllocId, AllocRef, AllocRefMut, CheckInAllocMsg,
@ -767,87 +767,8 @@ where
MPlaceTy { mplace, layout, align: layout.align.abi }
}
/// Writes the discriminant of the given variant.
#[instrument(skip(self), level = "debug")]
pub fn write_discriminant(
&mut self,
variant_index: VariantIdx,
dest: &PlaceTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx> {
// Layout computation excludes uninhabited variants from consideration
// therefore there's no way to represent those variants in the given layout.
// Essentially, uninhabited variants do not have a tag that corresponds to their
// discriminant, so we cannot do anything here.
// When evaluating we will always error before even getting here, but ConstProp 'executes'
// dead code, so we cannot ICE here.
if dest.layout.for_variant(self, variant_index).abi.is_uninhabited() {
throw_ub!(UninhabitedEnumVariantWritten)
}
match dest.layout.variants {
abi::Variants::Single { index } => {
assert_eq!(index, variant_index);
}
abi::Variants::Multiple {
tag_encoding: TagEncoding::Direct,
tag: tag_layout,
tag_field,
..
} => {
// No need to validate that the discriminant here because the
// `TyAndLayout::for_variant()` call earlier already checks the variant is valid.
let discr_val =
dest.layout.ty.discriminant_for_variant(*self.tcx, variant_index).unwrap().val;
// raw discriminants for enums are isize or bigger during
// their computation, but the in-memory tag is the smallest possible
// representation
let size = tag_layout.size(self);
let tag_val = size.truncate(discr_val);
let tag_dest = self.place_field(dest, tag_field)?;
self.write_scalar(Scalar::from_uint(tag_val, size), &tag_dest)?;
}
abi::Variants::Multiple {
tag_encoding:
TagEncoding::Niche { untagged_variant, ref niche_variants, niche_start },
tag: tag_layout,
tag_field,
..
} => {
// No need to validate that the discriminant here because the
// `TyAndLayout::for_variant()` call earlier already checks the variant is valid.
if variant_index != untagged_variant {
let variants_start = niche_variants.start().as_u32();
let variant_index_relative = variant_index
.as_u32()
.checked_sub(variants_start)
.expect("overflow computing relative variant idx");
// We need to use machine arithmetic when taking into account `niche_start`:
// tag_val = variant_index_relative + niche_start_val
let tag_layout = self.layout_of(tag_layout.primitive().to_int_ty(*self.tcx))?;
let niche_start_val = ImmTy::from_uint(niche_start, tag_layout);
let variant_index_relative_val =
ImmTy::from_uint(variant_index_relative, tag_layout);
let tag_val = self.binary_op(
mir::BinOp::Add,
&variant_index_relative_val,
&niche_start_val,
)?;
// Write result.
let niche_dest = self.place_field(dest, tag_field)?;
self.write_immediate(*tag_val, &niche_dest)?;
}
}
}
Ok(())
}
/// Writes the discriminant of the given variant.
#[instrument(skip(self), level = "debug")]
/// Writes the aggregate to the destination.
#[instrument(skip(self), level = "trace")]
pub fn write_aggregate(
&mut self,
kind: &mir::AggregateKind<'tcx>,

View file

@ -15,9 +15,7 @@ rustc_middle = { path = "../rustc_middle" }
rustc_attr = { path = "../rustc_attr" }
rustc_data_structures = { path = "../rustc_data_structures" }
rustc_errors = { path = "../rustc_errors" }
rustc_graphviz = { path = "../rustc_graphviz" }
rustc_hir = { path = "../rustc_hir" }
rustc_hir_pretty = { path = "../rustc_hir_pretty" }
rustc_target = { path = "../rustc_target" }
rustc_session = { path = "../rustc_session" }
smallvec = { version = "1.8.1", features = ["union", "may_dangle"] }
@ -27,6 +25,5 @@ rustc_index = { path = "../rustc_index" }
rustc_infer = { path = "../rustc_infer" }
rustc_trait_selection = { path = "../rustc_trait_selection" }
rustc_lint = { path = "../rustc_lint" }
rustc_serialize = { path = "../rustc_serialize" }
rustc_type_ir = { path = "../rustc_type_ir" }
rustc_feature = { path = "../rustc_feature" }

View file

@ -0,0 +1,457 @@
use crate::FnCtxt;
use rustc_hir as hir;
use rustc_hir::def::Res;
use rustc_middle::ty::{self, DefIdTree, Ty};
use rustc_trait_selection::traits;
impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
/**
* Recursively searches for the most-specific blamable expression.
* For example, if you have a chain of constraints like:
* - want `Vec<i32>: Copy`
* - because `Option<Vec<i32>>: Copy` needs `Vec<i32>: Copy` because `impl <T: Copy> Copy for Option<T>`
* - because `(Option<Vec<i32>, bool)` needs `Option<Vec<i32>>: Copy` because `impl <A: Copy, B: Copy> Copy for (A, B)`
* then if you pass in `(Some(vec![1, 2, 3]), false)`, this helper `point_at_specific_expr_if_possible`
* will find the expression `vec![1, 2, 3]` as the "most blameable" reason for this missing constraint.
*
* This function only updates the error span.
*/
pub fn blame_specific_expr_if_possible(
&self,
error: &mut traits::FulfillmentError<'tcx>,
expr: &'tcx hir::Expr<'tcx>,
) {
// Whether it succeeded or failed, it likely made some amount of progress.
// In the very worst case, it's just the same `expr` we originally passed in.
let expr = match self.blame_specific_expr_if_possible_for_obligation_cause_code(
&error.obligation.cause.code(),
expr,
) {
Ok(expr) => expr,
Err(expr) => expr,
};
// Either way, use this expression to update the error span.
// If it doesn't overlap the existing span at all, use the original span.
// FIXME: It would possibly be better to do this more continuously, at each level...
error.obligation.cause.span = expr
.span
.find_ancestor_in_same_ctxt(error.obligation.cause.span)
.unwrap_or(error.obligation.cause.span);
}
fn blame_specific_expr_if_possible_for_obligation_cause_code(
&self,
obligation_cause_code: &traits::ObligationCauseCode<'tcx>,
expr: &'tcx hir::Expr<'tcx>,
) -> Result<&'tcx hir::Expr<'tcx>, &'tcx hir::Expr<'tcx>> {
match obligation_cause_code {
traits::ObligationCauseCode::ExprBindingObligation(_, _, _, _) => {
// This is the "root"; we assume that the `expr` is already pointing here.
// Therefore, we return `Ok` so that this `expr` can be refined further.
Ok(expr)
}
traits::ObligationCauseCode::ImplDerivedObligation(impl_derived) => self
.blame_specific_expr_if_possible_for_derived_predicate_obligation(
impl_derived,
expr,
),
_ => {
// We don't recognize this kind of constraint, so we cannot refine the expression
// any further.
Err(expr)
}
}
}
/// We want to achieve the error span in the following example:
///
/// ```ignore (just for demonstration)
/// struct Burrito<Filling> {
/// filling: Filling,
/// }
/// impl <Filling: Delicious> Delicious for Burrito<Filling> {}
/// fn eat_delicious_food<Food: Delicious>(_food: Food) {}
///
/// fn will_type_error() {
/// eat_delicious_food(Burrito { filling: Kale });
/// } // ^--- The trait bound `Kale: Delicious`
/// // is not satisfied
/// ```
///
/// Without calling this function, the error span will cover the entire argument expression.
///
/// Before we do any of this logic, we recursively call `point_at_specific_expr_if_possible` on the parent
/// obligation. Hence we refine the `expr` "outwards-in" and bail at the first kind of expression/impl we don't recognize.
///
/// This function returns a `Result<&Expr, &Expr>` - either way, it returns the `Expr` whose span should be
/// reported as an error. If it is `Ok`, then it means it refined successfull. If it is `Err`, then it may be
/// only a partial success - but it cannot be refined even further.
fn blame_specific_expr_if_possible_for_derived_predicate_obligation(
&self,
obligation: &traits::ImplDerivedObligationCause<'tcx>,
expr: &'tcx hir::Expr<'tcx>,
) -> Result<&'tcx hir::Expr<'tcx>, &'tcx hir::Expr<'tcx>> {
// First, we attempt to refine the `expr` for our span using the parent obligation.
// If this cannot be done, then we are already stuck, so we stop early (hence the use
// of the `?` try operator here).
let expr = self.blame_specific_expr_if_possible_for_obligation_cause_code(
&*obligation.derived.parent_code,
expr,
)?;
// This is the "trait" (meaning, the predicate "proved" by this `impl`) which provides the `Self` type we care about.
// For the purposes of this function, we hope that it is a `struct` type, and that our current `expr` is a literal of
// that struct type.
let impl_trait_self_ref: Option<ty::TraitRef<'tcx>> =
self.tcx.impl_trait_ref(obligation.impl_def_id).map(|impl_def| impl_def.skip_binder());
let Some(impl_trait_self_ref) = impl_trait_self_ref else {
// It is possible that this is absent. In this case, we make no progress.
return Err(expr);
};
// We only really care about the `Self` type itself, which we extract from the ref.
let impl_self_ty: Ty<'tcx> = impl_trait_self_ref.self_ty();
let impl_predicates: ty::GenericPredicates<'tcx> =
self.tcx.predicates_of(obligation.impl_def_id);
let Some(impl_predicate_index) = obligation.impl_def_predicate_index else {
// We don't have the index, so we can only guess.
return Err(expr);
};
if impl_predicate_index >= impl_predicates.predicates.len() {
// This shouldn't happen, but since this is only a diagnostic improvement, avoid breaking things.
return Err(expr);
}
let relevant_broken_predicate: ty::PredicateKind<'tcx> =
impl_predicates.predicates[impl_predicate_index].0.kind().skip_binder();
match relevant_broken_predicate {
ty::PredicateKind::Clause(ty::Clause::Trait(broken_trait)) => {
// ...
self.blame_specific_part_of_expr_corresponding_to_generic_param(
broken_trait.trait_ref.self_ty().into(),
expr,
impl_self_ty.into(),
)
}
_ => Err(expr),
}
}
/// Drills into `expr` to arrive at the equivalent location of `find_generic_param` in `in_ty`.
/// For example, given
/// - expr: `(Some(vec![1, 2, 3]), false)`
/// - param: `T`
/// - in_ty: `(Option<Vec<T>, bool)`
/// we would drill until we arrive at `vec![1, 2, 3]`.
///
/// If successful, we return `Ok(refined_expr)`. If unsuccesful, we return `Err(partially_refined_expr`),
/// which will go as far as possible. For example, given `(foo(), false)` instead, we would drill to
/// `foo()` and then return `Err("foo()")`.
///
/// This means that you can (and should) use the `?` try operator to chain multiple calls to this
/// function with different types, since you can only continue drilling the second time if you
/// succeeded the first time.
fn blame_specific_part_of_expr_corresponding_to_generic_param(
&self,
param: ty::GenericArg<'tcx>,
expr: &'tcx hir::Expr<'tcx>,
in_ty: ty::GenericArg<'tcx>,
) -> Result<&'tcx hir::Expr<'tcx>, &'tcx hir::Expr<'tcx>> {
if param == in_ty {
// The types match exactly, so we have drilled as far as we can.
return Ok(expr);
}
let ty::GenericArgKind::Type(in_ty) = in_ty.unpack() else {
return Err(expr);
};
if let (hir::ExprKind::Tup(expr_elements), ty::Tuple(in_ty_elements)) =
(&expr.kind, in_ty.kind())
{
if in_ty_elements.len() != expr_elements.len() {
return Err(expr);
}
// Find out which of `in_ty_elements` refer to `param`.
// FIXME: It may be better to take the first if there are multiple,
// just so that the error points to a smaller expression.
let Some((drill_expr, drill_ty)) = Self::is_iterator_singleton(expr_elements.iter().zip( in_ty_elements.iter()).filter(|(_expr_elem, in_ty_elem)| {
Self::find_param_in_ty((*in_ty_elem).into(), param)
})) else {
// The param is not mentioned, or it is mentioned in multiple indexes.
return Err(expr);
};
return self.blame_specific_part_of_expr_corresponding_to_generic_param(
param,
drill_expr,
drill_ty.into(),
);
}
if let (
hir::ExprKind::Struct(expr_struct_path, expr_struct_fields, _expr_struct_rest),
ty::Adt(in_ty_adt, in_ty_adt_generic_args),
) = (&expr.kind, in_ty.kind())
{
// First, confirm that this struct is the same one as in the types, and if so,
// find the right variant.
let Res::Def(expr_struct_def_kind, expr_struct_def_id) = self.typeck_results.borrow().qpath_res(expr_struct_path, expr.hir_id) else {
return Err(expr);
};
let variant_def_id = match expr_struct_def_kind {
hir::def::DefKind::Struct => {
if in_ty_adt.did() != expr_struct_def_id {
// FIXME: Deal with type aliases?
return Err(expr);
}
expr_struct_def_id
}
hir::def::DefKind::Variant => {
// If this is a variant, its parent is the type definition.
if in_ty_adt.did() != self.tcx.parent(expr_struct_def_id) {
// FIXME: Deal with type aliases?
return Err(expr);
}
expr_struct_def_id
}
_ => {
return Err(expr);
}
};
// We need to know which of the generic parameters mentions our target param.
// We expect that at least one of them does, since it is expected to be mentioned.
let Some((drill_generic_index, generic_argument_type)) =
Self::is_iterator_singleton(
in_ty_adt_generic_args.iter().enumerate().filter(
|(_index, in_ty_generic)| {
Self::find_param_in_ty(*in_ty_generic, param)
},
),
) else {
return Err(expr);
};
let struct_generic_parameters: &ty::Generics = self.tcx.generics_of(in_ty_adt.did());
if drill_generic_index >= struct_generic_parameters.params.len() {
return Err(expr);
}
let param_to_point_at_in_struct = self.tcx.mk_param_from_def(
struct_generic_parameters.param_at(drill_generic_index, self.tcx),
);
// We make 3 steps:
// Suppose we have a type like
// ```ignore (just for demonstration)
// struct ExampleStruct<T> {
// enabled: bool,
// item: Option<(usize, T, bool)>,
// }
//
// f(ExampleStruct {
// enabled: false,
// item: Some((0, Box::new(String::new()), 1) }, true)),
// });
// ```
// Here, `f` is passed a `ExampleStruct<Box<String>>`, but it wants
// for `String: Copy`, which isn't true here.
//
// (1) First, we drill into `.item` and highlight that expression
// (2) Then we use the template type `Option<(usize, T, bool)>` to
// drill into the `T`, arriving at a `Box<String>` expression.
// (3) Then we keep going, drilling into this expression using our
// outer contextual information.
// (1) Find the (unique) field which mentions the type in our constraint:
let (field_expr, field_type) = self
.point_at_field_if_possible(
in_ty_adt.did(),
param_to_point_at_in_struct,
variant_def_id,
expr_struct_fields,
)
.ok_or(expr)?;
// (2) Continue drilling into the struct, ignoring the struct's
// generic argument types.
let expr = self.blame_specific_part_of_expr_corresponding_to_generic_param(
param_to_point_at_in_struct,
field_expr,
field_type.into(),
)?;
// (3) Continue drilling into the expression, having "passed
// through" the struct entirely.
return self.blame_specific_part_of_expr_corresponding_to_generic_param(
param,
expr,
generic_argument_type,
);
}
if let (
hir::ExprKind::Call(expr_callee, expr_args),
ty::Adt(in_ty_adt, in_ty_adt_generic_args),
) = (&expr.kind, in_ty.kind())
{
let hir::ExprKind::Path(expr_callee_path) = &expr_callee.kind else {
// FIXME: This case overlaps with another one worth handling,
// which should happen above since it applies to non-ADTs:
// we can drill down into regular generic functions.
return Err(expr);
};
// This is (possibly) a constructor call, like `Some(...)` or `MyStruct(a, b, c)`.
let Res::Def(expr_struct_def_kind, expr_ctor_def_id) = self.typeck_results.borrow().qpath_res(expr_callee_path, expr_callee.hir_id) else {
return Err(expr);
};
let variant_def_id = match expr_struct_def_kind {
hir::def::DefKind::Ctor(hir::def::CtorOf::Struct, hir::def::CtorKind::Fn) => {
if in_ty_adt.did() != self.tcx.parent(expr_ctor_def_id) {
// FIXME: Deal with type aliases?
return Err(expr);
}
self.tcx.parent(expr_ctor_def_id)
}
hir::def::DefKind::Ctor(hir::def::CtorOf::Variant, hir::def::CtorKind::Fn) => {
// If this is a variant, its parent is the type definition.
if in_ty_adt.did() != self.tcx.parent(expr_ctor_def_id) {
// FIXME: Deal with type aliases?
return Err(expr);
}
expr_ctor_def_id
}
_ => {
return Err(expr);
}
};
// We need to know which of the generic parameters mentions our target param.
// We expect that at least one of them does, since it is expected to be mentioned.
let Some((drill_generic_index, generic_argument_type)) =
Self::is_iterator_singleton(
in_ty_adt_generic_args.iter().enumerate().filter(
|(_index, in_ty_generic)| {
Self::find_param_in_ty(*in_ty_generic, param)
},
),
) else {
return Err(expr);
};
let struct_generic_parameters: &ty::Generics = self.tcx.generics_of(in_ty_adt.did());
if drill_generic_index >= struct_generic_parameters.params.len() {
return Err(expr);
}
let param_to_point_at_in_struct = self.tcx.mk_param_from_def(
struct_generic_parameters.param_at(drill_generic_index, self.tcx),
);
// We make 3 steps:
// Suppose we have a type like
// ```ignore (just for demonstration)
// struct ExampleStruct<T> {
// enabled: bool,
// item: Option<(usize, T, bool)>,
// }
//
// f(ExampleStruct {
// enabled: false,
// item: Some((0, Box::new(String::new()), 1) }, true)),
// });
// ```
// Here, `f` is passed a `ExampleStruct<Box<String>>`, but it wants
// for `String: Copy`, which isn't true here.
//
// (1) First, we drill into `.item` and highlight that expression
// (2) Then we use the template type `Option<(usize, T, bool)>` to
// drill into the `T`, arriving at a `Box<String>` expression.
// (3) Then we keep going, drilling into this expression using our
// outer contextual information.
// (1) Find the (unique) field index which mentions the type in our constraint:
let Some((field_index, field_type)) = Self::is_iterator_singleton(
in_ty_adt
.variant_with_id(variant_def_id)
.fields
.iter()
.map(|field| field.ty(self.tcx, *in_ty_adt_generic_args))
.enumerate()
.filter(|(_index, field_type)| Self::find_param_in_ty((*field_type).into(), param))
) else {
return Err(expr);
};
if field_index >= expr_args.len() {
return Err(expr);
}
// (2) Continue drilling into the struct, ignoring the struct's
// generic argument types.
let expr = self.blame_specific_part_of_expr_corresponding_to_generic_param(
param_to_point_at_in_struct,
&expr_args[field_index],
field_type.into(),
)?;
// (3) Continue drilling into the expression, having "passed
// through" the struct entirely.
return self.blame_specific_part_of_expr_corresponding_to_generic_param(
param,
expr,
generic_argument_type,
);
}
// At this point, none of the basic patterns matched.
// One major possibility which remains is that we have a function call.
// In this case, it's often possible to dive deeper into the call to find something to blame,
// but this is not always possible.
Err(expr)
}
// FIXME: This can be made into a private, non-impl function later.
/// Traverses the given ty (either a `ty::Ty` or a `ty::GenericArg`) and searches for references
/// to the given `param_to_point_at`. Returns `true` if it finds any use of the param.
pub fn find_param_in_ty(
ty: ty::GenericArg<'tcx>,
param_to_point_at: ty::GenericArg<'tcx>,
) -> bool {
let mut walk = ty.walk();
while let Some(arg) = walk.next() {
if arg == param_to_point_at {
return true;
} else if let ty::GenericArgKind::Type(ty) = arg.unpack()
&& let ty::Alias(ty::Projection, ..) = ty.kind()
{
// This logic may seem a bit strange, but typically when
// we have a projection type in a function signature, the
// argument that's being passed into that signature is
// not actually constraining that projection's substs in
// a meaningful way. So we skip it, and see improvements
// in some UI tests.
walk.skip_current_subtree();
}
}
false
}
// FIXME: This can be made into a private, non-impl function later.
/// Returns `Some(iterator.next())` if it has exactly one item, and `None` otherwise.
pub fn is_iterator_singleton<T>(mut iterator: impl Iterator<Item = T>) -> Option<T> {
match (iterator.next(), iterator.next()) {
(_, Some(_)) => None,
(first, _) => first,
}
}
}

View file

@ -34,9 +34,10 @@ use rustc_trait_selection::traits::{self, ObligationCauseCode, SelectionContext}
use std::iter;
use std::mem;
use std::ops::ControlFlow;
use std::slice;
use std::ops::ControlFlow;
impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
pub(in super::super) fn check_casts(&mut self) {
// don't hold the borrow to deferred_cast_checks while checking to avoid borrow checker errors
@ -1843,7 +1844,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
.into_iter()
.flatten()
{
if self.point_at_arg_if_possible(
if self.blame_specific_arg_if_possible(
error,
def_id,
param,
@ -1873,7 +1874,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
.into_iter()
.flatten()
{
if self.point_at_arg_if_possible(
if self.blame_specific_arg_if_possible(
error,
def_id,
param,
@ -1898,16 +1899,24 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
for param in
[param_to_point_at, fallback_param_to_point_at, self_param_to_point_at]
{
if let Some(param) = param
&& self.point_at_field_if_possible(
error,
if let Some(param) = param {
let refined_expr = self.point_at_field_if_possible(
def_id,
param,
variant_def_id,
fields,
)
{
return true;
);
match refined_expr {
None => {}
Some((refined_expr, _)) => {
error.obligation.cause.span = refined_expr
.span
.find_ancestor_in_same_ctxt(error.obligation.cause.span)
.unwrap_or(refined_expr.span);
return true;
}
}
}
}
}
@ -1940,7 +1949,16 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
}
}
fn point_at_arg_if_possible(
/// - `blame_specific_*` means that the function will recursively traverse the expression,
/// looking for the most-specific-possible span to blame.
///
/// - `point_at_*` means that the function will only go "one level", pointing at the specific
/// expression mentioned.
///
/// `blame_specific_arg_if_possible` will find the most-specific expression anywhere inside
/// the provided function call expression, and mark it as responsible for the fullfillment
/// error.
fn blame_specific_arg_if_possible(
&self,
error: &mut traits::FulfillmentError<'tcx>,
def_id: DefId,
@ -1959,13 +1977,20 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
.inputs()
.iter()
.enumerate()
.filter(|(_, ty)| find_param_in_ty(**ty, param_to_point_at))
.filter(|(_, ty)| Self::find_param_in_ty((**ty).into(), param_to_point_at))
.collect();
// If there's one field that references the given generic, great!
if let [(idx, _)] = args_referencing_param.as_slice()
&& let Some(arg) = receiver
.map_or(args.get(*idx), |rcvr| if *idx == 0 { Some(rcvr) } else { args.get(*idx - 1) }) {
error.obligation.cause.span = arg.span.find_ancestor_in_same_ctxt(error.obligation.cause.span).unwrap_or(arg.span);
if let hir::Node::Expr(arg_expr) = self.tcx.hir().get(arg.hir_id) {
// This is more specific than pointing at the entire argument.
self.blame_specific_expr_if_possible(error, arg_expr)
}
error.obligation.cause.map_code(|parent_code| {
ObligationCauseCode::FunctionArgumentObligation {
arg_hir_id: arg.hir_id,
@ -1983,14 +2008,14 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
false
}
fn point_at_field_if_possible(
// FIXME: Make this private and move to mod adjust_fulfillment_errors
pub fn point_at_field_if_possible(
&self,
error: &mut traits::FulfillmentError<'tcx>,
def_id: DefId,
param_to_point_at: ty::GenericArg<'tcx>,
variant_def_id: DefId,
expr_fields: &[hir::ExprField<'tcx>],
) -> bool {
) -> Option<(&'tcx hir::Expr<'tcx>, Ty<'tcx>)> {
let def = self.tcx.adt_def(def_id);
let identity_substs = ty::InternalSubsts::identity_for_item(self.tcx, def_id);
@ -2000,7 +2025,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
.iter()
.filter(|field| {
let field_ty = field.ty(self.tcx, identity_substs);
find_param_in_ty(field_ty, param_to_point_at)
Self::find_param_in_ty(field_ty.into(), param_to_point_at)
})
.collect();
@ -2010,17 +2035,12 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
// same rules that check_expr_struct uses for macro hygiene.
if self.tcx.adjust_ident(expr_field.ident, variant_def_id) == field.ident(self.tcx)
{
error.obligation.cause.span = expr_field
.expr
.span
.find_ancestor_in_same_ctxt(error.obligation.cause.span)
.unwrap_or(expr_field.span);
return true;
return Some((expr_field.expr, self.tcx.type_of(field.did)));
}
}
}
false
None
}
fn point_at_path_if_possible(
@ -2240,23 +2260,3 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
}
}
}
fn find_param_in_ty<'tcx>(ty: Ty<'tcx>, param_to_point_at: ty::GenericArg<'tcx>) -> bool {
let mut walk = ty.walk();
while let Some(arg) = walk.next() {
if arg == param_to_point_at {
return true;
} else if let ty::GenericArgKind::Type(ty) = arg.unpack()
&& let ty::Alias(ty::Projection, ..) = ty.kind()
{
// This logic may seem a bit strange, but typically when
// we have a projection type in a function signature, the
// argument that's being passed into that signature is
// not actually constraining that projection's substs in
// a meaningful way. So we skip it, and see improvements
// in some UI tests.
walk.skip_current_subtree();
}
}
false
}

View file

@ -1,4 +1,5 @@
mod _impl;
mod adjust_fulfillment_errors;
mod arg_matrix;
mod checks;
mod suggestions;

View file

@ -1563,6 +1563,7 @@ impl<'a, 'tcx> ProbeContext<'a, 'tcx> {
traits::ImplDerivedObligationCause {
derived,
impl_def_id,
impl_def_predicate_index: None,
span,
},
))

View file

@ -15,7 +15,6 @@ rustc_hir = { path = "../rustc_hir" }
rustc_index = { path = "../rustc_index" }
rustc_macros = { path = "../rustc_macros" }
rustc_serialize = { path = "../rustc_serialize" }
rustc_session = { path = "../rustc_session" }
rustc_span = { path = "../rustc_span" }
rustc_target = { path = "../rustc_target" }
smallvec = { version = "1.8.1", features = ["union", "may_dangle"] }

View file

@ -145,30 +145,32 @@ impl<'tcx> Elaborator<'tcx> {
// Get predicates declared on the trait.
let predicates = tcx.super_predicates_of(data.def_id());
let obligations = predicates.predicates.iter().map(|&(mut pred, span)| {
// when parent predicate is non-const, elaborate it to non-const predicates.
if data.constness == ty::BoundConstness::NotConst {
pred = pred.without_const(tcx);
}
let obligations =
predicates.predicates.iter().enumerate().map(|(index, &(mut pred, span))| {
// when parent predicate is non-const, elaborate it to non-const predicates.
if data.constness == ty::BoundConstness::NotConst {
pred = pred.without_const(tcx);
}
let cause = obligation.cause.clone().derived_cause(
bound_predicate.rebind(data),
|derived| {
traits::ImplDerivedObligation(Box::new(
traits::ImplDerivedObligationCause {
derived,
impl_def_id: data.def_id(),
span,
},
))
},
);
predicate_obligation(
pred.subst_supertrait(tcx, &bound_predicate.rebind(data.trait_ref)),
obligation.param_env,
cause,
)
});
let cause = obligation.cause.clone().derived_cause(
bound_predicate.rebind(data),
|derived| {
traits::ImplDerivedObligation(Box::new(
traits::ImplDerivedObligationCause {
derived,
impl_def_id: data.def_id(),
impl_def_predicate_index: Some(index),
span,
},
))
},
);
predicate_obligation(
pred.subst_supertrait(tcx, &bound_predicate.rebind(data.trait_ref)),
obligation.param_env,
cause,
)
});
debug!(?data, ?obligations, "super_predicates");
// Only keep those bounds that we haven't already seen.

View file

@ -20,7 +20,6 @@ rustc_macros = { path = "../rustc_macros" }
rustc_parse = { path = "../rustc_parse" }
rustc_session = { path = "../rustc_session" }
rustc_span = { path = "../rustc_span" }
rustc_serialize = { path = "../rustc_serialize" }
rustc_middle = { path = "../rustc_middle" }
rustc_ast_lowering = { path = "../rustc_ast_lowering" }
rustc_ast_passes = { path = "../rustc_ast_passes" }

View file

@ -475,6 +475,8 @@ pub enum WellFormedLoc {
pub struct ImplDerivedObligationCause<'tcx> {
pub derived: DerivedObligationCause<'tcx>,
pub impl_def_id: DefId,
/// The index of the derived predicate in the parent impl's predicates.
pub impl_def_predicate_index: Option<usize>,
pub span: Span,
}

View file

@ -11,7 +11,6 @@ tracing = "0.1"
either = "1"
rustc_middle = { path = "../rustc_middle" }
rustc_apfloat = { path = "../rustc_apfloat" }
rustc_attr = { path = "../rustc_attr" }
rustc_data_structures = { path = "../rustc_data_structures" }
rustc_index = { path = "../rustc_index" }
rustc_errors = { path = "../rustc_errors" }

View file

@ -19,6 +19,5 @@ rustc_index = { path = "../rustc_index" }
rustc_macros = { path = "../rustc_macros" }
rustc_middle = { path = "../rustc_middle" }
rustc_serialize = { path = "../rustc_serialize" }
rustc_session = { path = "../rustc_session" }
rustc_target = { path = "../rustc_target" }
rustc_span = { path = "../rustc_span" }

View file

@ -13,6 +13,5 @@ rustc_macros = { path = "../rustc_macros" }
rustc_middle = { path = "../rustc_middle" }
rustc_session = { path = "../rustc_session" }
rustc_span = { path = "../rustc_span" }
rustc_trait_selection = { path = "../rustc_trait_selection" }
rustc_hir_analysis = { path = "../rustc_hir_analysis" }
tracing = "0.1"

View file

@ -20,7 +20,6 @@ rustc-rayon-core = { version = "0.4.0", optional = true }
rustc_serialize = { path = "../rustc_serialize" }
rustc_session = { path = "../rustc_session" }
rustc_span = { path = "../rustc_span" }
rustc_target = { path = "../rustc_target" }
thin-vec = "0.2.9"
tracing = "0.1"

View file

@ -84,7 +84,11 @@ impl CodeStats {
// Sort variants so the largest ones are shown first. A stable sort is
// used here so that source code order is preserved for all variants
// that have the same size.
variants.sort_by(|info1, info2| info2.size.cmp(&info1.size));
// Except for Generators, whose variants are already sorted according to
// their yield points in `variant_info_for_generator`.
if kind != DataTypeKind::Generator {
variants.sort_by(|info1, info2| info2.size.cmp(&info1.size));
}
let info = TypeSizeInfo {
kind,
type_description: type_desc.to_string(),

View file

@ -16,7 +16,6 @@ rustc_errors = { path = "../rustc_errors" }
rustc_hir = { path = "../rustc_hir" }
rustc_index = { path = "../rustc_index" }
rustc_infer = { path = "../rustc_infer" }
rustc_lint_defs = { path = "../rustc_lint_defs" }
rustc_macros = { path = "../rustc_macros" }
rustc_query_system = { path = "../rustc_query_system" }
rustc_serialize = { path = "../rustc_serialize" }

View file

@ -1190,6 +1190,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
ImplDerivedObligation(Box::new(ImplDerivedObligationCause {
derived,
impl_def_id,
impl_def_predicate_index: None,
span: obligation.cause.span,
}))
});

View file

@ -2608,11 +2608,12 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
assert_eq!(predicates.parent, None);
let predicates = predicates.instantiate_own(tcx, substs);
let mut obligations = Vec::with_capacity(predicates.len());
for (predicate, span) in predicates {
for (index, (predicate, span)) in predicates.into_iter().enumerate() {
let cause = cause.clone().derived_cause(parent_trait_pred, |derived| {
ImplDerivedObligation(Box::new(ImplDerivedObligationCause {
derived,
impl_def_id: def_id,
impl_def_predicate_index: Some(index),
span,
}))
});

View file

@ -5,7 +5,6 @@ edition = "2021"
[dependencies]
tracing = "0.1"
rustc_attr = { path = "../rustc_attr" }
rustc_middle = { path = "../rustc_middle" }
rustc_data_structures = { path = "../rustc_data_structures" }
rustc_hir = { path = "../rustc_hir" }

View file

@ -970,7 +970,7 @@ fn variant_info_for_generator<'tcx>(
})
.collect();
let variant_infos: Vec<_> = generator
let mut variant_infos: Vec<_> = generator
.variant_fields
.iter_enumerated()
.map(|(variant_idx, variant_def)| {
@ -1033,6 +1033,15 @@ fn variant_info_for_generator<'tcx>(
}
})
.collect();
// The first three variants are hardcoded to be `UNRESUMED`, `RETURNED` and `POISONED`.
// We will move the `RETURNED` and `POISONED` elements to the end so we
// are left with a sorting order according to the generators yield points:
// First `Unresumed`, then the `SuspendN` followed by `Returned` and `Panicked` (POISONED).
let end_states = variant_infos.drain(1..=2);
let end_states: Vec<_> = end_states.collect();
variant_infos.extend(end_states);
(
variant_infos,
match tag_encoding {

View file

@ -18,7 +18,7 @@ macro_rules! uint_impl {
pub const MIN: Self = 0;
/// The largest value that can be represented by this integer type
#[doc = concat!("(2<sup>", $BITS, "</sup> &minus; 1", $bound_condition, ")")]
#[doc = concat!("(2<sup>", $BITS, "</sup> &minus; 1", $bound_condition, ").")]
///
/// # Examples
///

View file

@ -69,8 +69,8 @@ impl f32 {
unsafe { intrinsics::ceilf32(self) }
}
/// Returns the nearest integer to `self`. Round half-way cases away from
/// `0.0`.
/// Returns the nearest integer to `self`. If a value is half-way between two
/// integers, round away from `0.0`.
///
/// # Examples
///

View file

@ -69,8 +69,8 @@ impl f64 {
unsafe { intrinsics::ceilf64(self) }
}
/// Returns the nearest integer to `self`. Round half-way cases away from
/// `0.0`.
/// Returns the nearest integer to `self`. If a value is half-way between two
/// integers, round away from `0.0`.
///
/// # Examples
///

View file

@ -15,10 +15,9 @@ import sys
import time
import traceback
import urllib.request
from collections import OrderedDict
from io import StringIO
from pathlib import Path
from typing import Callable, Dict, Iterable, List, Optional, Union
from typing import Callable, Dict, Iterable, Iterator, List, Optional, Tuple, Union
PGO_HOST = os.environ["PGO_HOST"]
@ -204,48 +203,105 @@ class WindowsPipeline(Pipeline):
return False
def get_timestamp() -> float:
return time.time()
Duration = float
TimerSection = Union[Duration, "Timer"]
def iterate_sections(section: TimerSection, name: str, level: int = 0) -> Iterator[Tuple[int, str, Duration]]:
"""
Hierarchically iterate the sections of a timer, in a depth-first order.
"""
if isinstance(section, Duration):
yield (level, name, section)
elif isinstance(section, Timer):
yield (level, name, section.total_duration())
for (child_name, child_section) in section.sections:
yield from iterate_sections(child_section, child_name, level=level + 1)
else:
assert False
class Timer:
def __init__(self):
# We want this dictionary to be ordered by insertion.
# We use `OrderedDict` for compatibility with older Python versions.
self.stages = OrderedDict()
def __init__(self, parent_names: Tuple[str, ...] = ()):
self.sections: List[Tuple[str, TimerSection]] = []
self.section_active = False
self.parent_names = parent_names
@contextlib.contextmanager
def stage(self, name: str):
assert name not in self.stages
def section(self, name: str) -> "Timer":
assert not self.section_active
self.section_active = True
start = time.time()
start = get_timestamp()
exc = None
child_timer = Timer(parent_names=self.parent_names + (name, ))
full_name = " > ".join(child_timer.parent_names)
try:
LOGGER.info(f"Stage `{name}` starts")
yield
LOGGER.info(f"Section `{full_name}` starts")
yield child_timer
except BaseException as exception:
exc = exception
raise
finally:
end = time.time()
end = get_timestamp()
duration = end - start
self.stages[name] = duration
if exc is None:
LOGGER.info(f"Stage `{name}` ended: OK ({duration:.2f}s)")
if child_timer.has_children():
self.sections.append((name, child_timer))
else:
LOGGER.info(f"Stage `{name}` ended: FAIL ({duration:.2f}s)")
self.sections.append((name, duration))
if exc is None:
LOGGER.info(f"Section `{full_name}` ended: OK ({duration:.2f}s)")
else:
LOGGER.info(f"Section `{full_name}` ended: FAIL ({duration:.2f}s)")
self.section_active = False
def total_duration(self) -> Duration:
duration = 0
for (_, section) in self.sections:
if isinstance(section, Duration):
duration += section
else:
duration += section.total_duration()
return duration
def has_children(self) -> bool:
return len(self.sections) > 0
def print_stats(self):
total_duration = sum(self.stages.values())
rows = []
for (child_name, child_section) in self.sections:
for (level, name, duration) in iterate_sections(child_section, child_name, level=0):
label = f"{' ' * level}{name}:"
rows.append((label, duration))
# 57 is the width of the whole table
divider = "-" * 57
# Empty row
rows.append(("", ""))
total_duration_label = "Total duration:"
total_duration = self.total_duration()
rows.append((total_duration_label, humantime(total_duration)))
space_after_label = 2
max_label_length = max(16, max(len(label) for (label, _) in rows)) + space_after_label
table_width = max_label_length + 23
divider = "-" * table_width
with StringIO() as output:
print(divider, file=output)
for (name, duration) in self.stages.items():
pct = (duration / total_duration) * 100
name_str = f"{name}:"
print(f"{name_str:<34} {duration:>12.2f}s ({pct:>5.2f}%)", file=output)
total_duration_label = "Total duration:"
print(f"{total_duration_label:<34} {total_duration:>12.2f}s", file=output)
for (label, duration) in rows:
if isinstance(duration, Duration):
pct = (duration / total_duration) * 100
value = f"{duration:>12.2f}s ({pct:>5.2f}%)"
else:
value = f"{duration:>{len(total_duration_label) + 7}}"
print(f"{label:<{max_label_length}} {value}", file=output)
print(divider, file=output, end="")
LOGGER.info(f"Timer results\n{output.getvalue()}")
@ -265,6 +321,21 @@ def change_cwd(dir: Path):
os.chdir(cwd)
def humantime(time_s: float) -> str:
hours = time_s // 3600
time_s = time_s % 3600
minutes = time_s // 60
seconds = time_s % 60
result = ""
if hours > 0:
result += f"{int(hours)}h "
if minutes > 0:
result += f"{int(minutes)}m "
result += f"{round(seconds)}s"
return result
def move_path(src: Path, dst: Path):
LOGGER.info(f"Moving `{src}` to `{dst}`")
shutil.move(src, dst)
@ -585,15 +656,16 @@ def execute_build_pipeline(timer: Timer, pipeline: Pipeline, final_build_args: L
pipeline.build_rustc_perf()
# Stage 1: Build rustc + PGO instrumented LLVM
with timer.stage("Build rustc (LLVM PGO)"):
build_rustc(pipeline, args=[
"--llvm-profile-generate"
], env=dict(
LLVM_PROFILE_DIR=str(pipeline.llvm_profile_dir_root() / "prof-%p")
))
with timer.section("Stage 1 (LLVM PGO)") as stage1:
with stage1.section("Build rustc and LLVM"):
build_rustc(pipeline, args=[
"--llvm-profile-generate"
], env=dict(
LLVM_PROFILE_DIR=str(pipeline.llvm_profile_dir_root() / "prof-%p")
))
with timer.stage("Gather profiles (LLVM PGO)"):
gather_llvm_profiles(pipeline)
with stage1.section("Gather profiles"):
gather_llvm_profiles(pipeline)
clear_llvm_files(pipeline)
final_build_args += [
@ -602,14 +674,15 @@ def execute_build_pipeline(timer: Timer, pipeline: Pipeline, final_build_args: L
]
# Stage 2: Build PGO instrumented rustc + LLVM
with timer.stage("Build rustc (rustc PGO)"):
build_rustc(pipeline, args=[
"--rust-profile-generate",
pipeline.rustc_profile_dir_root()
])
with timer.section("Stage 2 (rustc PGO)") as stage2:
with stage2.section("Build rustc and LLVM"):
build_rustc(pipeline, args=[
"--rust-profile-generate",
pipeline.rustc_profile_dir_root()
])
with timer.stage("Gather profiles (rustc PGO)"):
gather_rustc_profiles(pipeline)
with stage2.section("Gather profiles"):
gather_rustc_profiles(pipeline)
clear_llvm_files(pipeline)
final_build_args += [
@ -619,14 +692,15 @@ def execute_build_pipeline(timer: Timer, pipeline: Pipeline, final_build_args: L
# Stage 3: Build rustc + BOLT instrumented LLVM
if pipeline.supports_bolt():
with timer.stage("Build rustc (LLVM BOLT)"):
build_rustc(pipeline, args=[
"--llvm-profile-use",
pipeline.llvm_profile_merged_file(),
"--llvm-bolt-profile-generate",
])
with timer.stage("Gather profiles (LLVM BOLT)"):
gather_llvm_bolt_profiles(pipeline)
with timer.section("Stage 3 (LLVM BOLT)") as stage3:
with stage3.section("Build rustc and LLVM"):
build_rustc(pipeline, args=[
"--llvm-profile-use",
pipeline.llvm_profile_merged_file(),
"--llvm-bolt-profile-generate",
])
with stage3.section("Gather profiles"):
gather_llvm_bolt_profiles(pipeline)
clear_llvm_files(pipeline)
final_build_args += [
@ -635,7 +709,7 @@ def execute_build_pipeline(timer: Timer, pipeline: Pipeline, final_build_args: L
]
# Stage 4: Build PGO optimized rustc + PGO/BOLT optimized LLVM
with timer.stage("Final build"):
with timer.section("Stage 4 (final build)"):
cmd(final_build_args)

View file

@ -102,14 +102,14 @@ pub struct Markdown<'a> {
/// E.g. if `heading_offset: HeadingOffset::H2`, then `# something` renders an `<h2>`.
pub heading_offset: HeadingOffset,
}
/// A tuple struct like `Markdown` that renders the markdown with a table of contents.
pub(crate) struct MarkdownWithToc<'a>(
pub(crate) &'a str,
pub(crate) &'a mut IdMap,
pub(crate) ErrorCodes,
pub(crate) Edition,
pub(crate) &'a Option<Playground>,
);
/// A struct like `Markdown` that renders the markdown with a table of contents.
pub(crate) struct MarkdownWithToc<'a> {
pub(crate) content: &'a str,
pub(crate) ids: &'a mut IdMap,
pub(crate) error_codes: ErrorCodes,
pub(crate) edition: Edition,
pub(crate) playground: &'a Option<Playground>,
}
/// A tuple struct like `Markdown` that renders the markdown escaping HTML tags
/// and includes no paragraph tags.
pub(crate) struct MarkdownItemInfo<'a>(pub(crate) &'a str, pub(crate) &'a mut IdMap);
@ -1048,7 +1048,7 @@ impl Markdown<'_> {
impl MarkdownWithToc<'_> {
pub(crate) fn into_string(self) -> String {
let MarkdownWithToc(md, ids, codes, edition, playground) = self;
let MarkdownWithToc { content: md, ids, error_codes: codes, edition, playground } = self;
let p = Parser::new_ext(md, main_body_opts()).into_offset_iter();

View file

@ -72,7 +72,14 @@ pub(crate) fn render<P: AsRef<Path>>(
let mut ids = IdMap::new();
let error_codes = ErrorCodes::from(options.unstable_features.is_nightly_build());
let text = if !options.markdown_no_toc {
MarkdownWithToc(text, &mut ids, error_codes, edition, &playground).into_string()
MarkdownWithToc {
content: text,
ids: &mut ids,
error_codes,
edition,
playground: &playground,
}
.into_string()
} else {
Markdown {
content: text,

View file

@ -1,17 +1,17 @@
print-type-size type: `[async fn body@$DIR/large-arg.rs:6:21: 8:2]`: 3076 bytes, alignment: 1 bytes
print-type-size discriminant: 1 bytes
print-type-size variant `Unresumed`: 0 bytes
print-type-size variant `Suspend0`: 3075 bytes
print-type-size local `.__awaitee`: 3075 bytes, offset: 0 bytes, alignment: 1 bytes
print-type-size variant `Unresumed`: 0 bytes
print-type-size variant `Returned`: 0 bytes
print-type-size variant `Panicked`: 0 bytes
print-type-size type: `[async fn body@$DIR/large-arg.rs:10:30: 12:2]`: 3075 bytes, alignment: 1 bytes
print-type-size discriminant: 1 bytes
print-type-size variant `Unresumed`: 1024 bytes
print-type-size upvar `.t`: 1024 bytes, offset: 0 bytes, alignment: 1 bytes
print-type-size variant `Suspend0`: 3074 bytes
print-type-size upvar `.t`: 1024 bytes, offset: 0 bytes, alignment: 1 bytes
print-type-size local `.__awaitee`: 2050 bytes
print-type-size variant `Unresumed`: 1024 bytes
print-type-size upvar `.t`: 1024 bytes, offset: 0 bytes, alignment: 1 bytes
print-type-size variant `Returned`: 1024 bytes
print-type-size upvar `.t`: 1024 bytes, offset: 0 bytes, alignment: 1 bytes
print-type-size variant `Panicked`: 1024 bytes
@ -24,11 +24,11 @@ print-type-size field `.uninit`: 0 bytes
print-type-size field `.value`: 3075 bytes
print-type-size type: `[async fn body@$DIR/large-arg.rs:13:26: 15:2]`: 2050 bytes, alignment: 1 bytes
print-type-size discriminant: 1 bytes
print-type-size variant `Unresumed`: 1024 bytes
print-type-size upvar `.t`: 1024 bytes, offset: 0 bytes, alignment: 1 bytes
print-type-size variant `Suspend0`: 2049 bytes
print-type-size upvar `.t`: 1024 bytes, offset: 0 bytes, alignment: 1 bytes
print-type-size local `.__awaitee`: 1025 bytes
print-type-size variant `Unresumed`: 1024 bytes
print-type-size upvar `.t`: 1024 bytes, offset: 0 bytes, alignment: 1 bytes
print-type-size variant `Returned`: 1024 bytes
print-type-size upvar `.t`: 1024 bytes, offset: 0 bytes, alignment: 1 bytes
print-type-size variant `Panicked`: 1024 bytes

View file

@ -1,8 +1,8 @@
error[E0277]: the trait bound `B<C>: Copy` is not satisfied
--> $DIR/deriving-copyclone.rs:31:13
--> $DIR/deriving-copyclone.rs:31:26
|
LL | is_copy(B { a: 1, b: C });
| ------- ^^^^^^^^^^^^^^^^ the trait `Copy` is not implemented for `B<C>`
| ------- ^ the trait `Copy` is not implemented for `B<C>`
| |
| required by a bound introduced by this call
|
@ -19,14 +19,14 @@ LL | fn is_copy<T: Copy>(_: T) {}
= note: this error originates in the derive macro `Copy` (in Nightly builds, run with -Z macro-backtrace for more info)
help: consider borrowing here
|
LL | is_copy(&B { a: 1, b: C });
| +
LL | is_copy(B { a: 1, b: &C });
| +
error[E0277]: the trait bound `B<C>: Clone` is not satisfied
--> $DIR/deriving-copyclone.rs:32:14
--> $DIR/deriving-copyclone.rs:32:27
|
LL | is_clone(B { a: 1, b: C });
| -------- ^^^^^^^^^^^^^^^^ the trait `Clone` is not implemented for `B<C>`
| -------- ^ the trait `Clone` is not implemented for `B<C>`
| |
| required by a bound introduced by this call
|
@ -43,14 +43,14 @@ LL | fn is_clone<T: Clone>(_: T) {}
= note: this error originates in the derive macro `Clone` (in Nightly builds, run with -Z macro-backtrace for more info)
help: consider borrowing here
|
LL | is_clone(&B { a: 1, b: C });
| +
LL | is_clone(B { a: 1, b: &C });
| +
error[E0277]: the trait bound `B<D>: Copy` is not satisfied
--> $DIR/deriving-copyclone.rs:35:13
--> $DIR/deriving-copyclone.rs:35:26
|
LL | is_copy(B { a: 1, b: D });
| ------- ^^^^^^^^^^^^^^^^ the trait `Copy` is not implemented for `B<D>`
| ------- ^ the trait `Copy` is not implemented for `B<D>`
| |
| required by a bound introduced by this call
|
@ -67,8 +67,8 @@ LL | fn is_copy<T: Copy>(_: T) {}
= note: this error originates in the derive macro `Copy` (in Nightly builds, run with -Z macro-backtrace for more info)
help: consider borrowing here
|
LL | is_copy(&B { a: 1, b: D });
| +
LL | is_copy(B { a: 1, b: &D });
| +
error: aborting due to 3 previous errors

View file

@ -0,0 +1,28 @@
trait T1 {}
trait T2 {}
trait T3 {}
trait T4 {}
impl<B: T2> T1 for Wrapper<B> {}
impl T2 for i32 {}
impl T3 for i32 {}
impl<A: T3> T2 for Burrito<A> {}
struct Wrapper<W> {
value: W,
}
struct Burrito<F> {
filling: F,
}
fn want<V: T1>(_x: V) {}
fn example<Q>(q: Q) {
want(Wrapper { value: Burrito { filling: q } });
//~^ ERROR the trait bound `Q: T3` is not satisfied [E0277]
}
fn main() {}

View file

@ -0,0 +1,35 @@
error[E0277]: the trait bound `Q: T3` is not satisfied
--> $DIR/blame-trait-error.rs:24:46
|
LL | want(Wrapper { value: Burrito { filling: q } });
| ---- ^ the trait `T3` is not implemented for `Q`
| |
| required by a bound introduced by this call
|
note: required for `Burrito<Q>` to implement `T2`
--> $DIR/blame-trait-error.rs:11:13
|
LL | impl<A: T3> T2 for Burrito<A> {}
| -- ^^ ^^^^^^^^^^
| |
| unsatisfied trait bound introduced here
note: required for `Wrapper<Burrito<Q>>` to implement `T1`
--> $DIR/blame-trait-error.rs:6:13
|
LL | impl<B: T2> T1 for Wrapper<B> {}
| -- ^^ ^^^^^^^^^^
| |
| unsatisfied trait bound introduced here
note: required by a bound in `want`
--> $DIR/blame-trait-error.rs:21:12
|
LL | fn want<V: T1>(_x: V) {}
| ^^ required by this bound in `want`
help: consider restricting type parameter `Q`
|
LL | fn example<Q: T3>(q: Q) {
| ++++
error: aborting due to previous error
For more information about this error, try `rustc --explain E0277`.

View file

@ -0,0 +1,131 @@
// This test examines the error spans reported when a generic `impl` fails.
// For example, if a function wants an `Option<T>` where `T: Copy` but you pass `Some(vec![1, 2])`,
// then we want to point at the `vec![1, 2]` and not the `Some( ... )` expression.
trait T1 {}
trait T2 {}
trait T3 {}
trait T4 {}
impl T2 for i32 {}
impl T3 for i32 {}
struct Wrapper<W> {
value: W,
}
impl<B: T2> T1 for Wrapper<B> {}
struct Burrito<F> {
spicy: bool,
filling: F,
}
impl<A: T3> T2 for Burrito<A> {}
struct BurritoTuple<F>(F);
impl<C: T3> T2 for BurritoTuple<C> {}
enum BurritoKinds<G> {
SmallBurrito { spicy: bool, small_filling: G },
LargeBurrito { spicy: bool, large_filling: G },
MultiBurrito { first_filling: G, second_filling: G },
}
impl<D: T3> T2 for BurritoKinds<D> {}
struct Taco<H>(bool, H);
impl<E: T3> T2 for Taco<E> {}
enum TacoKinds<H> {
OneTaco(bool, H),
TwoTacos(bool, H, H),
}
impl<F: T3> T2 for TacoKinds<F> {}
struct GenericBurrito<Spiciness, Filling> {
spiciness: Spiciness,
filling: Filling,
}
impl<X, Y: T3> T2 for GenericBurrito<X, Y> {}
struct NotSpicy;
impl<A: T3, B: T3> T2 for (A, B) {}
impl<A: T2, B: T2> T1 for (A, B) {}
fn want<V: T1>(_x: V) {}
// Some more-complex examples:
type AliasBurrito<T> = GenericBurrito<T, T>;
// The following example is fairly confusing. The idea is that we want to "misdirect" the location
// of the error.
struct Two<A, B> {
a: A,
b: B,
}
impl<X, Y: T1, Z> T1 for Two<Two<X, Y>, Z> {}
struct DoubleWrapper<T> {
item: Wrapper<T>,
}
impl<T: T1> T1 for DoubleWrapper<T> {}
fn example<Q>(q: Q) {
// In each of the following examples, we expect the error span to point at the 'q' variable,
// since the missing constraint is `Q: T3`.
// Verifies for struct:
want(Wrapper { value: Burrito { spicy: false, filling: q } });
//~^ ERROR the trait bound `Q: T3` is not satisfied [E0277]
// Verifies for enum with named fields in variant:
want(Wrapper { value: BurritoKinds::SmallBurrito { spicy: true, small_filling: q } });
//~^ ERROR the trait bound `Q: T3` is not satisfied [E0277]
// Verifies for tuple struct:
want(Wrapper { value: Taco(false, q) });
//~^ ERROR the trait bound `Q: T3` is not satisfied [E0277]
// Verifies for tuple enum variant:
want(Wrapper { value: TacoKinds::OneTaco(false, q) });
//~^ ERROR the trait bound `Q: T3` is not satisfied [E0277]
// Verifies for generic type with multiple parameters:
want(Wrapper { value: GenericBurrito { spiciness: NotSpicy, filling: q } });
//~^ ERROR the trait bound `Q: T3` is not satisfied [E0277]
// Verifies for tuple:
want((3, q));
//~^ ERROR the trait bound `Q: T2` is not satisfied [E0277]
// Verifies for nested tuple:
want(Wrapper { value: (3, q) });
//~^ ERROR the trait bound `Q: T3` is not satisfied [E0277]
// Verifies for nested tuple:
want(((3, q), 5));
//~^ ERROR the trait bound `Q: T3` is not satisfied [E0277]
want(DoubleWrapper { item: Wrapper { value: q } });
//~^ ERROR the trait bound `Q: T1` is not satisfied [E0277]
want(DoubleWrapper { item: Wrapper { value: DoubleWrapper { item: Wrapper { value: q } } } });
//~^ ERROR the trait bound `Q: T1` is not satisfied [E0277]
// Verifies for type alias to struct:
want(Wrapper { value: AliasBurrito { spiciness: q, filling: q } });
//~^ ERROR the trait bound `Q: T3` is not satisfied [E0277]
want(Two { a: Two { a: (), b: q }, b: () });
//~^ ERROR the trait bound `Q: T1` is not satisfied [E0277]
// We *should* blame the 'q'.
// FIXME: Right now, the wrong field is blamed.
want(
Two { a: Two { a: (), b: Two { a: Two { a: (), b: q }, b: () } }, b: () },
//~^ ERROR the trait bound `Q: T1` is not satisfied [E0277]
);
}
fn main() {}

View file

@ -0,0 +1,380 @@
error[E0277]: the trait bound `Q: T3` is not satisfied
--> $DIR/blame-trait-error-spans-on-exprs.rs:79:60
|
LL | want(Wrapper { value: Burrito { spicy: false, filling: q } });
| ---- required by a bound introduced by this call ^ the trait `T3` is not implemented for `Q`
|
note: required for `Burrito<Q>` to implement `T2`
--> $DIR/blame-trait-error-spans-on-exprs.rs:22:13
|
LL | impl<A: T3> T2 for Burrito<A> {}
| -- ^^ ^^^^^^^^^^
| |
| unsatisfied trait bound introduced here
note: required for `Wrapper<Burrito<Q>>` to implement `T1`
--> $DIR/blame-trait-error-spans-on-exprs.rs:16:13
|
LL | impl<B: T2> T1 for Wrapper<B> {}
| -- ^^ ^^^^^^^^^^
| |
| unsatisfied trait bound introduced here
note: required by a bound in `want`
--> $DIR/blame-trait-error-spans-on-exprs.rs:53:12
|
LL | fn want<V: T1>(_x: V) {}
| ^^ required by this bound in `want`
help: consider restricting type parameter `Q`
|
LL | fn example<Q: T3>(q: Q) {
| ++++
error[E0277]: the trait bound `Q: T3` is not satisfied
--> $DIR/blame-trait-error-spans-on-exprs.rs:83:84
|
LL | want(Wrapper { value: BurritoKinds::SmallBurrito { spicy: true, small_filling: q } });
| ---- required by a bound introduced by this call ^ the trait `T3` is not implemented for `Q`
|
note: required for `BurritoKinds<Q>` to implement `T2`
--> $DIR/blame-trait-error-spans-on-exprs.rs:32:13
|
LL | impl<D: T3> T2 for BurritoKinds<D> {}
| -- ^^ ^^^^^^^^^^^^^^^
| |
| unsatisfied trait bound introduced here
note: required for `Wrapper<BurritoKinds<Q>>` to implement `T1`
--> $DIR/blame-trait-error-spans-on-exprs.rs:16:13
|
LL | impl<B: T2> T1 for Wrapper<B> {}
| -- ^^ ^^^^^^^^^^
| |
| unsatisfied trait bound introduced here
note: required by a bound in `want`
--> $DIR/blame-trait-error-spans-on-exprs.rs:53:12
|
LL | fn want<V: T1>(_x: V) {}
| ^^ required by this bound in `want`
help: consider restricting type parameter `Q`
|
LL | fn example<Q: T3>(q: Q) {
| ++++
error[E0277]: the trait bound `Q: T3` is not satisfied
--> $DIR/blame-trait-error-spans-on-exprs.rs:87:39
|
LL | want(Wrapper { value: Taco(false, q) });
| ---- ^ the trait `T3` is not implemented for `Q`
| |
| required by a bound introduced by this call
|
note: required for `Taco<Q>` to implement `T2`
--> $DIR/blame-trait-error-spans-on-exprs.rs:35:13
|
LL | impl<E: T3> T2 for Taco<E> {}
| -- ^^ ^^^^^^^
| |
| unsatisfied trait bound introduced here
note: required for `Wrapper<Taco<Q>>` to implement `T1`
--> $DIR/blame-trait-error-spans-on-exprs.rs:16:13
|
LL | impl<B: T2> T1 for Wrapper<B> {}
| -- ^^ ^^^^^^^^^^
| |
| unsatisfied trait bound introduced here
note: required by a bound in `want`
--> $DIR/blame-trait-error-spans-on-exprs.rs:53:12
|
LL | fn want<V: T1>(_x: V) {}
| ^^ required by this bound in `want`
help: consider restricting type parameter `Q`
|
LL | fn example<Q: T3>(q: Q) {
| ++++
error[E0277]: the trait bound `Q: T3` is not satisfied
--> $DIR/blame-trait-error-spans-on-exprs.rs:91:27
|
LL | want(Wrapper { value: TacoKinds::OneTaco(false, q) });
| ---- ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ the trait `T3` is not implemented for `Q`
| |
| required by a bound introduced by this call
|
note: required for `TacoKinds<Q>` to implement `T2`
--> $DIR/blame-trait-error-spans-on-exprs.rs:41:13
|
LL | impl<F: T3> T2 for TacoKinds<F> {}
| -- ^^ ^^^^^^^^^^^^
| |
| unsatisfied trait bound introduced here
note: required for `Wrapper<TacoKinds<Q>>` to implement `T1`
--> $DIR/blame-trait-error-spans-on-exprs.rs:16:13
|
LL | impl<B: T2> T1 for Wrapper<B> {}
| -- ^^ ^^^^^^^^^^
| |
| unsatisfied trait bound introduced here
note: required by a bound in `want`
--> $DIR/blame-trait-error-spans-on-exprs.rs:53:12
|
LL | fn want<V: T1>(_x: V) {}
| ^^ required by this bound in `want`
help: consider restricting type parameter `Q`
|
LL | fn example<Q: T3>(q: Q) {
| ++++
error[E0277]: the trait bound `Q: T3` is not satisfied
--> $DIR/blame-trait-error-spans-on-exprs.rs:95:74
|
LL | want(Wrapper { value: GenericBurrito { spiciness: NotSpicy, filling: q } });
| ---- required by a bound introduced by this call ^ the trait `T3` is not implemented for `Q`
|
note: required for `GenericBurrito<NotSpicy, Q>` to implement `T2`
--> $DIR/blame-trait-error-spans-on-exprs.rs:47:16
|
LL | impl<X, Y: T3> T2 for GenericBurrito<X, Y> {}
| -- ^^ ^^^^^^^^^^^^^^^^^^^^
| |
| unsatisfied trait bound introduced here
note: required for `Wrapper<GenericBurrito<NotSpicy, Q>>` to implement `T1`
--> $DIR/blame-trait-error-spans-on-exprs.rs:16:13
|
LL | impl<B: T2> T1 for Wrapper<B> {}
| -- ^^ ^^^^^^^^^^
| |
| unsatisfied trait bound introduced here
note: required by a bound in `want`
--> $DIR/blame-trait-error-spans-on-exprs.rs:53:12
|
LL | fn want<V: T1>(_x: V) {}
| ^^ required by this bound in `want`
help: consider restricting type parameter `Q`
|
LL | fn example<Q: T3>(q: Q) {
| ++++
error[E0277]: the trait bound `Q: T2` is not satisfied
--> $DIR/blame-trait-error-spans-on-exprs.rs:99:14
|
LL | want((3, q));
| ---- ^ the trait `T2` is not implemented for `Q`
| |
| required by a bound introduced by this call
|
note: required for `(i32, Q)` to implement `T1`
--> $DIR/blame-trait-error-spans-on-exprs.rs:51:20
|
LL | impl<A: T2, B: T2> T1 for (A, B) {}
| -- ^^ ^^^^^^
| |
| unsatisfied trait bound introduced here
note: required by a bound in `want`
--> $DIR/blame-trait-error-spans-on-exprs.rs:53:12
|
LL | fn want<V: T1>(_x: V) {}
| ^^ required by this bound in `want`
help: consider restricting type parameter `Q`
|
LL | fn example<Q: T2>(q: Q) {
| ++++
error[E0277]: the trait bound `Q: T3` is not satisfied
--> $DIR/blame-trait-error-spans-on-exprs.rs:103:31
|
LL | want(Wrapper { value: (3, q) });
| ---- ^ the trait `T3` is not implemented for `Q`
| |
| required by a bound introduced by this call
|
note: required for `(i32, Q)` to implement `T2`
--> $DIR/blame-trait-error-spans-on-exprs.rs:50:20
|
LL | impl<A: T3, B: T3> T2 for (A, B) {}
| -- ^^ ^^^^^^
| |
| unsatisfied trait bound introduced here
note: required for `Wrapper<(i32, Q)>` to implement `T1`
--> $DIR/blame-trait-error-spans-on-exprs.rs:16:13
|
LL | impl<B: T2> T1 for Wrapper<B> {}
| -- ^^ ^^^^^^^^^^
| |
| unsatisfied trait bound introduced here
note: required by a bound in `want`
--> $DIR/blame-trait-error-spans-on-exprs.rs:53:12
|
LL | fn want<V: T1>(_x: V) {}
| ^^ required by this bound in `want`
help: consider restricting type parameter `Q`
|
LL | fn example<Q: T3>(q: Q) {
| ++++
error[E0277]: the trait bound `Q: T3` is not satisfied
--> $DIR/blame-trait-error-spans-on-exprs.rs:107:15
|
LL | want(((3, q), 5));
| ---- ^ the trait `T3` is not implemented for `Q`
| |
| required by a bound introduced by this call
|
note: required for `(i32, Q)` to implement `T2`
--> $DIR/blame-trait-error-spans-on-exprs.rs:50:20
|
LL | impl<A: T3, B: T3> T2 for (A, B) {}
| -- ^^ ^^^^^^
| |
| unsatisfied trait bound introduced here
note: required for `((i32, Q), i32)` to implement `T1`
--> $DIR/blame-trait-error-spans-on-exprs.rs:51:20
|
LL | impl<A: T2, B: T2> T1 for (A, B) {}
| -- ^^ ^^^^^^
| |
| unsatisfied trait bound introduced here
note: required by a bound in `want`
--> $DIR/blame-trait-error-spans-on-exprs.rs:53:12
|
LL | fn want<V: T1>(_x: V) {}
| ^^ required by this bound in `want`
help: consider restricting type parameter `Q`
|
LL | fn example<Q: T3>(q: Q) {
| ++++
error[E0277]: the trait bound `Q: T1` is not satisfied
--> $DIR/blame-trait-error-spans-on-exprs.rs:110:49
|
LL | want(DoubleWrapper { item: Wrapper { value: q } });
| ---- ^ the trait `T1` is not implemented for `Q`
| |
| required by a bound introduced by this call
|
note: required for `DoubleWrapper<Q>` to implement `T1`
--> $DIR/blame-trait-error-spans-on-exprs.rs:72:13
|
LL | impl<T: T1> T1 for DoubleWrapper<T> {}
| -- ^^ ^^^^^^^^^^^^^^^^
| |
| unsatisfied trait bound introduced here
note: required by a bound in `want`
--> $DIR/blame-trait-error-spans-on-exprs.rs:53:12
|
LL | fn want<V: T1>(_x: V) {}
| ^^ required by this bound in `want`
help: consider restricting type parameter `Q`
|
LL | fn example<Q: T1>(q: Q) {
| ++++
error[E0277]: the trait bound `Q: T1` is not satisfied
--> $DIR/blame-trait-error-spans-on-exprs.rs:113:88
|
LL | want(DoubleWrapper { item: Wrapper { value: DoubleWrapper { item: Wrapper { value: q } } } });
| ---- required by a bound introduced by this call ^ the trait `T1` is not implemented for `Q`
|
note: required for `DoubleWrapper<Q>` to implement `T1`
--> $DIR/blame-trait-error-spans-on-exprs.rs:72:13
|
LL | impl<T: T1> T1 for DoubleWrapper<T> {}
| -- ^^ ^^^^^^^^^^^^^^^^
| |
| unsatisfied trait bound introduced here
= note: 1 redundant requirement hidden
= note: required for `DoubleWrapper<DoubleWrapper<Q>>` to implement `T1`
note: required by a bound in `want`
--> $DIR/blame-trait-error-spans-on-exprs.rs:53:12
|
LL | fn want<V: T1>(_x: V) {}
| ^^ required by this bound in `want`
help: consider restricting type parameter `Q`
|
LL | fn example<Q: T1>(q: Q) {
| ++++
error[E0277]: the trait bound `Q: T3` is not satisfied
--> $DIR/blame-trait-error-spans-on-exprs.rs:117:27
|
LL | want(Wrapper { value: AliasBurrito { spiciness: q, filling: q } });
| ---- ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ the trait `T3` is not implemented for `Q`
| |
| required by a bound introduced by this call
|
note: required for `GenericBurrito<Q, Q>` to implement `T2`
--> $DIR/blame-trait-error-spans-on-exprs.rs:47:16
|
LL | impl<X, Y: T3> T2 for GenericBurrito<X, Y> {}
| -- ^^ ^^^^^^^^^^^^^^^^^^^^
| |
| unsatisfied trait bound introduced here
note: required for `Wrapper<GenericBurrito<Q, Q>>` to implement `T1`
--> $DIR/blame-trait-error-spans-on-exprs.rs:16:13
|
LL | impl<B: T2> T1 for Wrapper<B> {}
| -- ^^ ^^^^^^^^^^
| |
| unsatisfied trait bound introduced here
note: required by a bound in `want`
--> $DIR/blame-trait-error-spans-on-exprs.rs:53:12
|
LL | fn want<V: T1>(_x: V) {}
| ^^ required by this bound in `want`
help: consider restricting type parameter `Q`
|
LL | fn example<Q: T3>(q: Q) {
| ++++
error[E0277]: the trait bound `Q: T1` is not satisfied
--> $DIR/blame-trait-error-spans-on-exprs.rs:120:35
|
LL | want(Two { a: Two { a: (), b: q }, b: () });
| ---- ^ the trait `T1` is not implemented for `Q`
| |
| required by a bound introduced by this call
|
note: required for `Two<Two<(), Q>, ()>` to implement `T1`
--> $DIR/blame-trait-error-spans-on-exprs.rs:66:19
|
LL | impl<X, Y: T1, Z> T1 for Two<Two<X, Y>, Z> {}
| -- ^^ ^^^^^^^^^^^^^^^^^
| |
| unsatisfied trait bound introduced here
note: required by a bound in `want`
--> $DIR/blame-trait-error-spans-on-exprs.rs:53:12
|
LL | fn want<V: T1>(_x: V) {}
| ^^ required by this bound in `want`
help: consider restricting type parameter `Q`
|
LL | fn example<Q: T1>(q: Q) {
| ++++
error[E0277]: the trait bound `Q: T1` is not satisfied
--> $DIR/blame-trait-error-spans-on-exprs.rs:126:59
|
LL | want(
| ---- required by a bound introduced by this call
LL | Two { a: Two { a: (), b: Two { a: Two { a: (), b: q }, b: () } }, b: () },
| ^ the trait `T1` is not implemented for `Q`
|
note: required for `Two<Two<(), Q>, ()>` to implement `T1`
--> $DIR/blame-trait-error-spans-on-exprs.rs:66:19
|
LL | impl<X, Y: T1, Z> T1 for Two<Two<X, Y>, Z> {}
| -- ^^ ^^^^^^^^^^^^^^^^^
| |
| unsatisfied trait bound introduced here
= note: 1 redundant requirement hidden
= note: required for `Two<Two<(), Two<Two<(), Q>, ()>>, ()>` to implement `T1`
note: required by a bound in `want`
--> $DIR/blame-trait-error-spans-on-exprs.rs:53:12
|
LL | fn want<V: T1>(_x: V) {}
| ^^ required by this bound in `want`
help: consider restricting type parameter `Q`
|
LL | fn example<Q: T1>(q: Q) {
| ++++
error: aborting due to 13 previous errors
For more information about this error, try `rustc --explain E0277`.

View file

@ -1,11 +1,11 @@
print-type-size type: `[async fn body@$DIR/async.rs:8:36: 11:2]`: 16386 bytes, alignment: 1 bytes
print-type-size discriminant: 1 bytes
print-type-size variant `Unresumed`: 8192 bytes
print-type-size upvar `.arg`: 8192 bytes, offset: 0 bytes, alignment: 1 bytes
print-type-size variant `Suspend0`: 16385 bytes
print-type-size upvar `.arg`: 8192 bytes, offset: 0 bytes, alignment: 1 bytes
print-type-size local `.arg`: 8192 bytes
print-type-size local `.__awaitee`: 1 bytes
print-type-size variant `Unresumed`: 8192 bytes
print-type-size upvar `.arg`: 8192 bytes, offset: 0 bytes, alignment: 1 bytes
print-type-size variant `Returned`: 8192 bytes
print-type-size upvar `.arg`: 8192 bytes, offset: 0 bytes, alignment: 1 bytes
print-type-size variant `Panicked`: 8192 bytes

View file

@ -2,9 +2,9 @@ print-type-size type: `[generator@$DIR/generator.rs:10:5: 10:14]`: 8193 bytes, a
print-type-size discriminant: 1 bytes
print-type-size variant `Unresumed`: 8192 bytes
print-type-size upvar `.array`: 8192 bytes, offset: 0 bytes, alignment: 1 bytes
print-type-size variant `Suspend0`: 8192 bytes
print-type-size upvar `.array`: 8192 bytes, offset: 0 bytes, alignment: 1 bytes
print-type-size variant `Returned`: 8192 bytes
print-type-size upvar `.array`: 8192 bytes, offset: 0 bytes, alignment: 1 bytes
print-type-size variant `Panicked`: 8192 bytes
print-type-size upvar `.array`: 8192 bytes, offset: 0 bytes, alignment: 1 bytes
print-type-size variant `Suspend0`: 8192 bytes
print-type-size upvar `.array`: 8192 bytes, offset: 0 bytes, alignment: 1 bytes

View file

@ -1,11 +1,11 @@
print-type-size type: `[generator@$DIR/generator_discr_placement.rs:11:13: 11:15]`: 8 bytes, alignment: 4 bytes
print-type-size discriminant: 1 bytes
print-type-size variant `Unresumed`: 0 bytes
print-type-size variant `Suspend0`: 7 bytes
print-type-size padding: 3 bytes
print-type-size local `.w`: 4 bytes, alignment: 4 bytes
print-type-size variant `Suspend1`: 7 bytes
print-type-size padding: 3 bytes
print-type-size local `.z`: 4 bytes, alignment: 4 bytes
print-type-size variant `Unresumed`: 0 bytes
print-type-size variant `Returned`: 0 bytes
print-type-size variant `Panicked`: 0 bytes

View file

@ -101,10 +101,10 @@ LL | fn is_send<T: Send>(_: T) {}
| ^^^^ required by this bound in `is_send`
error[E0277]: `main::TestType` cannot be sent between threads safely
--> $DIR/negated-auto-traits-error.rs:66:13
--> $DIR/negated-auto-traits-error.rs:66:20
|
LL | is_sync(Outer2(TestType));
| ------- ^^^^^^^^^^^^^^^^ `main::TestType` cannot be sent between threads safely
| ------- ^^^^^^^^ `main::TestType` cannot be sent between threads safely
| |
| required by a bound introduced by this call
|