interpret: debug-check ScalarPair layout information
This commit is contained in:
parent
f75d884046
commit
5b20da8180
5 changed files with 89 additions and 46 deletions
|
@ -84,14 +84,18 @@ impl<'tcx, Tag: Provenance> Immediate<Tag> {
|
|||
}
|
||||
|
||||
#[inline]
|
||||
pub fn to_scalar_pair(self) -> InterpResult<'tcx, (Scalar<Tag>, Scalar<Tag>)> {
|
||||
pub fn to_scalar_or_uninit_pair(self) -> (ScalarMaybeUninit<Tag>, ScalarMaybeUninit<Tag>) {
|
||||
match self {
|
||||
Immediate::ScalarPair(val1, val2) => Ok((val1.check_init()?, val2.check_init()?)),
|
||||
Immediate::Scalar(..) => {
|
||||
bug!("Got a scalar where a scalar pair was expected")
|
||||
}
|
||||
Immediate::ScalarPair(val1, val2) => (val1, val2),
|
||||
Immediate::Scalar(..) => bug!("Got a scalar where a scalar pair was expected"),
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn to_scalar_pair(self) -> InterpResult<'tcx, (Scalar<Tag>, Scalar<Tag>)> {
|
||||
let (val1, val2) = self.to_scalar_or_uninit_pair();
|
||||
Ok((val1.check_init()?, val2.check_init()?))
|
||||
}
|
||||
}
|
||||
|
||||
// ScalarPair needs a type to interpret, so we often have an immediate and a type together
|
||||
|
@ -251,6 +255,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
|||
fn try_read_immediate_from_mplace(
|
||||
&self,
|
||||
mplace: &MPlaceTy<'tcx, M::PointerTag>,
|
||||
force: bool,
|
||||
) -> InterpResult<'tcx, Option<ImmTy<'tcx, M::PointerTag>>> {
|
||||
if mplace.layout.is_unsized() {
|
||||
// Don't touch unsized
|
||||
|
@ -271,27 +276,40 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
|||
// case where some of the bytes are initialized and others are not. So, we need an extra
|
||||
// check that walks over the type of `mplace` to make sure it is truly correct to treat this
|
||||
// like a `Scalar` (or `ScalarPair`).
|
||||
match mplace.layout.abi {
|
||||
Abi::Scalar(abi::Scalar::Initialized { .. }) => {
|
||||
let scalar = alloc.read_scalar(alloc_range(Size::ZERO, mplace.layout.size))?;
|
||||
Ok(Some(ImmTy { imm: scalar.into(), layout: mplace.layout }))
|
||||
}
|
||||
let scalar_layout = match mplace.layout.abi {
|
||||
// `if` does not work nested inside patterns, making this a bit awkward to express.
|
||||
Abi::Scalar(abi::Scalar::Initialized { value: s, .. }) => Some(s),
|
||||
Abi::Scalar(s) if force => Some(s.primitive()),
|
||||
_ => None,
|
||||
};
|
||||
if let Some(_) = scalar_layout {
|
||||
let scalar = alloc.read_scalar(alloc_range(Size::ZERO, mplace.layout.size))?;
|
||||
return Ok(Some(ImmTy { imm: scalar.into(), layout: mplace.layout }));
|
||||
}
|
||||
let scalar_pair_layout = match mplace.layout.abi {
|
||||
Abi::ScalarPair(
|
||||
abi::Scalar::Initialized { value: a, .. },
|
||||
abi::Scalar::Initialized { value: b, .. },
|
||||
) => {
|
||||
// We checked `ptr_align` above, so all fields will have the alignment they need.
|
||||
// We would anyway check against `ptr_align.restrict_for_offset(b_offset)`,
|
||||
// which `ptr.offset(b_offset)` cannot possibly fail to satisfy.
|
||||
let (a_size, b_size) = (a.size(self), b.size(self));
|
||||
let b_offset = a_size.align_to(b.align(self).abi);
|
||||
assert!(b_offset.bytes() > 0); // we later use the offset to tell apart the fields
|
||||
let a_val = alloc.read_scalar(alloc_range(Size::ZERO, a_size))?;
|
||||
let b_val = alloc.read_scalar(alloc_range(b_offset, b_size))?;
|
||||
Ok(Some(ImmTy { imm: Immediate::ScalarPair(a_val, b_val), layout: mplace.layout }))
|
||||
}
|
||||
_ => Ok(None),
|
||||
) => Some((a, b)),
|
||||
Abi::ScalarPair(a, b) if force => Some((a.primitive(), b.primitive())),
|
||||
_ => None,
|
||||
};
|
||||
if let Some((a, b)) = scalar_pair_layout {
|
||||
// We checked `ptr_align` above, so all fields will have the alignment they need.
|
||||
// We would anyway check against `ptr_align.restrict_for_offset(b_offset)`,
|
||||
// which `ptr.offset(b_offset)` cannot possibly fail to satisfy.
|
||||
let (a_size, b_size) = (a.size(self), b.size(self));
|
||||
let b_offset = a_size.align_to(b.align(self).abi);
|
||||
assert!(b_offset.bytes() > 0); // we later use the offset to tell apart the fields
|
||||
let a_val = alloc.read_scalar(alloc_range(Size::ZERO, a_size))?;
|
||||
let b_val = alloc.read_scalar(alloc_range(b_offset, b_size))?;
|
||||
return Ok(Some(ImmTy {
|
||||
imm: Immediate::ScalarPair(a_val, b_val),
|
||||
layout: mplace.layout,
|
||||
}));
|
||||
}
|
||||
// Neither a scalar nor scalar pair.
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
/// Try returning an immediate for the operand.
|
||||
|
@ -300,13 +318,18 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
|||
/// Note that for a given layout, this operation will either always fail or always
|
||||
/// succeed! Whether it succeeds depends on whether the layout can be represented
|
||||
/// in an `Immediate`, not on which data is stored there currently.
|
||||
///
|
||||
/// If `force` is `true`, then even scalars with fields that can be ununit will be
|
||||
/// read. This means the load is lossy and should not be written back!
|
||||
/// This flag exists only for validity checking.
|
||||
pub fn try_read_immediate(
|
||||
&self,
|
||||
src: &OpTy<'tcx, M::PointerTag>,
|
||||
force: bool,
|
||||
) -> InterpResult<'tcx, Result<ImmTy<'tcx, M::PointerTag>, MPlaceTy<'tcx, M::PointerTag>>> {
|
||||
Ok(match src.try_as_mplace() {
|
||||
Ok(ref mplace) => {
|
||||
if let Some(val) = self.try_read_immediate_from_mplace(mplace)? {
|
||||
if let Some(val) = self.try_read_immediate_from_mplace(mplace, force)? {
|
||||
Ok(val)
|
||||
} else {
|
||||
Err(*mplace)
|
||||
|
@ -322,7 +345,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
|||
&self,
|
||||
op: &OpTy<'tcx, M::PointerTag>,
|
||||
) -> InterpResult<'tcx, ImmTy<'tcx, M::PointerTag>> {
|
||||
if let Ok(imm) = self.try_read_immediate(op)? {
|
||||
if let Ok(imm) = self.try_read_immediate(op, /*force*/ false)? {
|
||||
Ok(imm)
|
||||
} else {
|
||||
span_bug!(self.cur_span(), "primitive read failed for type: {:?}", op.layout.ty);
|
||||
|
|
|
@ -879,7 +879,7 @@ where
|
|||
}
|
||||
|
||||
// Let us see if the layout is simple so we take a shortcut, avoid force_allocation.
|
||||
let src = match self.try_read_immediate(src)? {
|
||||
let src = match self.try_read_immediate(src, /*force*/ false)? {
|
||||
Ok(src_val) => {
|
||||
assert!(!src.layout.is_unsized(), "cannot have unsized immediates");
|
||||
// Yay, we got a value that we can write directly.
|
||||
|
|
|
@ -20,8 +20,8 @@ use rustc_target::abi::{Abi, Scalar as ScalarAbi, Size, VariantIdx, Variants, Wr
|
|||
use std::hash::Hash;
|
||||
|
||||
use super::{
|
||||
alloc_range, CheckInAllocMsg, GlobalAlloc, InterpCx, InterpResult, MPlaceTy, Machine,
|
||||
MemPlaceMeta, OpTy, Scalar, ScalarMaybeUninit, ValueVisitor,
|
||||
alloc_range, CheckInAllocMsg, GlobalAlloc, Immediate, InterpCx, InterpResult, MPlaceTy,
|
||||
Machine, MemPlaceMeta, OpTy, Scalar, ScalarMaybeUninit, ValueVisitor,
|
||||
};
|
||||
|
||||
macro_rules! throw_validation_failure {
|
||||
|
@ -487,6 +487,17 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
|
|||
))
|
||||
}
|
||||
|
||||
fn read_immediate_forced(
|
||||
&self,
|
||||
op: &OpTy<'tcx, M::PointerTag>,
|
||||
) -> InterpResult<'tcx, Immediate<M::PointerTag>> {
|
||||
Ok(*try_validation!(
|
||||
self.ecx.try_read_immediate(op, /*force*/ true),
|
||||
self.path,
|
||||
err_unsup!(ReadPointerAsBytes) => { "(potentially part of) a pointer" } expected { "plain (non-pointer) bytes" },
|
||||
).unwrap())
|
||||
}
|
||||
|
||||
/// Check if this is a value of primitive type, and if yes check the validity of the value
|
||||
/// at that type. Return `true` if the type is indeed primitive.
|
||||
fn try_visit_primitive(
|
||||
|
@ -626,18 +637,19 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
|
|||
|
||||
fn visit_scalar(
|
||||
&mut self,
|
||||
op: &OpTy<'tcx, M::PointerTag>,
|
||||
scalar: ScalarMaybeUninit<M::PointerTag>,
|
||||
scalar_layout: ScalarAbi,
|
||||
) -> InterpResult<'tcx> {
|
||||
// We check `is_full_range` in a slightly complicated way because *if* we are checking
|
||||
// number validity, then we want to ensure that `Scalar::Initialized` is indeed initialized,
|
||||
// i.e. that we go over the `check_init` below.
|
||||
let size = scalar_layout.size(self.ecx);
|
||||
let is_full_range = match scalar_layout {
|
||||
ScalarAbi::Initialized { valid_range, .. } => {
|
||||
if M::enforce_number_validity(self.ecx) {
|
||||
false // not "full" since uninit is not accepted
|
||||
} else {
|
||||
valid_range.is_full_for(op.layout.size)
|
||||
valid_range.is_full_for(size)
|
||||
}
|
||||
}
|
||||
ScalarAbi::Union { .. } => true,
|
||||
|
@ -646,21 +658,19 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
|
|||
// Nothing to check
|
||||
return Ok(());
|
||||
}
|
||||
// We have something to check.
|
||||
// We have something to check: it must at least be initialized.
|
||||
let valid_range = scalar_layout.valid_range(self.ecx);
|
||||
let WrappingRange { start, end } = valid_range;
|
||||
let max_value = op.layout.size.unsigned_int_max();
|
||||
let max_value = size.unsigned_int_max();
|
||||
assert!(end <= max_value);
|
||||
// Determine the allowed range
|
||||
let value = self.read_scalar(op)?;
|
||||
let value = try_validation!(
|
||||
value.check_init(),
|
||||
scalar.check_init(),
|
||||
self.path,
|
||||
err_ub!(InvalidUninitBytes(None)) => { "{:x}", value }
|
||||
err_ub!(InvalidUninitBytes(None)) => { "{:x}", scalar }
|
||||
expected { "something {}", wrapping_range_format(valid_range, max_value) },
|
||||
);
|
||||
let bits = match value.try_to_int() {
|
||||
Ok(int) => int.assert_bits(op.layout.size),
|
||||
Ok(int) => int.assert_bits(size),
|
||||
Err(_) => {
|
||||
// So this is a pointer then, and casting to an int failed.
|
||||
// Can only happen during CTFE.
|
||||
|
@ -678,7 +688,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
|
|||
} else {
|
||||
return Ok(());
|
||||
}
|
||||
} else if scalar_layout.valid_range(self.ecx).is_full_for(op.layout.size) {
|
||||
} else if scalar_layout.valid_range(self.ecx).is_full_for(size) {
|
||||
// Easy. (This is reachable if `enforce_number_validity` is set.)
|
||||
return Ok(());
|
||||
} else {
|
||||
|
@ -817,13 +827,23 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
|
|||
);
|
||||
}
|
||||
Abi::Scalar(scalar_layout) => {
|
||||
self.visit_scalar(op, scalar_layout)?;
|
||||
let scalar = self.read_immediate_forced(op)?.to_scalar_or_uninit();
|
||||
self.visit_scalar(scalar, scalar_layout)?;
|
||||
}
|
||||
Abi::ScalarPair { .. } | Abi::Vector { .. } => {
|
||||
// These have fields that we already visited above, so we already checked
|
||||
// all their scalar-level restrictions.
|
||||
// There is also no equivalent to `rustc_layout_scalar_valid_range_start`
|
||||
// that would make skipping them here an issue.
|
||||
Abi::ScalarPair(a_layout, b_layout) => {
|
||||
// We would validate these things as we descend into the fields,
|
||||
// but that can miss bugs in layout computation. Layout computation
|
||||
// is subtle due to enums having ScalarPair layout, where one field
|
||||
// is the discriminant.
|
||||
if cfg!(debug_assertions) {
|
||||
let (a, b) = self.read_immediate_forced(op)?.to_scalar_or_uninit_pair();
|
||||
self.visit_scalar(a, a_layout)?;
|
||||
self.visit_scalar(b, b_layout)?;
|
||||
}
|
||||
}
|
||||
Abi::Vector { .. } => {
|
||||
// No checks here, we assume layout computation gets this right.
|
||||
// (This is harder to check since Miri does not represent these as `Immediate`.)
|
||||
}
|
||||
Abi::Aggregate { .. } => {
|
||||
// Nothing to do.
|
||||
|
|
|
@ -415,7 +415,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
|
|||
|
||||
// Try to read the local as an immediate so that if it is representable as a scalar, we can
|
||||
// handle it as such, but otherwise, just return the value as is.
|
||||
Some(match self.ecx.try_read_immediate(&op) {
|
||||
Some(match self.ecx.try_read_immediate(&op, /*force*/ false) {
|
||||
Ok(Ok(imm)) => imm.into(),
|
||||
_ => op,
|
||||
})
|
||||
|
@ -710,7 +710,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
|
|||
}
|
||||
|
||||
// FIXME> figure out what to do when try_read_immediate fails
|
||||
let imm = self.use_ecx(|this| this.ecx.try_read_immediate(value));
|
||||
let imm = self.use_ecx(|this| this.ecx.try_read_immediate(value, /*force*/ false));
|
||||
|
||||
if let Some(Ok(imm)) = imm {
|
||||
match *imm {
|
||||
|
|
|
@ -412,7 +412,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
|
|||
|
||||
// Try to read the local as an immediate so that if it is representable as a scalar, we can
|
||||
// handle it as such, but otherwise, just return the value as is.
|
||||
Some(match self.ecx.try_read_immediate(&op) {
|
||||
Some(match self.ecx.try_read_immediate(&op, /*force*/ false) {
|
||||
Ok(Ok(imm)) => imm.into(),
|
||||
_ => op,
|
||||
})
|
||||
|
|
Loading…
Add table
Reference in a new issue