interpret: err instead of ICE on size mismatches in to_bits_or_ptr_internal
This commit is contained in:
parent
b6ab1fae73
commit
38004b72bc
16 changed files with 107 additions and 67 deletions
|
@ -167,17 +167,18 @@ pub(super) fn op_to_const<'tcx>(
|
|||
},
|
||||
Immediate::ScalarPair(a, b) => {
|
||||
// We know `offset` is relative to the allocation, so we can use `into_parts`.
|
||||
let (data, start) = match ecx.scalar_to_ptr(a.check_init().unwrap()).into_parts() {
|
||||
(Some(alloc_id), offset) => {
|
||||
(ecx.tcx.global_alloc(alloc_id).unwrap_memory(), offset.bytes())
|
||||
}
|
||||
(None, _offset) => (
|
||||
ecx.tcx.intern_const_alloc(Allocation::from_bytes_byte_aligned_immutable(
|
||||
b"" as &[u8],
|
||||
)),
|
||||
0,
|
||||
),
|
||||
};
|
||||
let (data, start) =
|
||||
match ecx.scalar_to_ptr(a.check_init().unwrap()).unwrap().into_parts() {
|
||||
(Some(alloc_id), offset) => {
|
||||
(ecx.tcx.global_alloc(alloc_id).unwrap_memory(), offset.bytes())
|
||||
}
|
||||
(None, _offset) => (
|
||||
ecx.tcx.intern_const_alloc(
|
||||
Allocation::from_bytes_byte_aligned_immutable(b"" as &[u8]),
|
||||
),
|
||||
0,
|
||||
),
|
||||
};
|
||||
let len = b.to_machine_usize(ecx).unwrap();
|
||||
let start = start.try_into().unwrap();
|
||||
let len: usize = len.try_into().unwrap();
|
||||
|
|
|
@ -197,8 +197,8 @@ impl interpret::MayLeak for ! {
|
|||
}
|
||||
|
||||
impl<'mir, 'tcx: 'mir> CompileTimeEvalContext<'mir, 'tcx> {
|
||||
fn guaranteed_eq(&mut self, a: Scalar, b: Scalar) -> bool {
|
||||
match (a, b) {
|
||||
fn guaranteed_eq(&mut self, a: Scalar, b: Scalar) -> InterpResult<'tcx, bool> {
|
||||
Ok(match (a, b) {
|
||||
// Comparisons between integers are always known.
|
||||
(Scalar::Int { .. }, Scalar::Int { .. }) => a == b,
|
||||
// Equality with integers can never be known for sure.
|
||||
|
@ -207,11 +207,11 @@ impl<'mir, 'tcx: 'mir> CompileTimeEvalContext<'mir, 'tcx> {
|
|||
// some things (like functions and vtables) do not have stable addresses
|
||||
// so we need to be careful around them (see e.g. #73722).
|
||||
(Scalar::Ptr(..), Scalar::Ptr(..)) => false,
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
fn guaranteed_ne(&mut self, a: Scalar, b: Scalar) -> bool {
|
||||
match (a, b) {
|
||||
fn guaranteed_ne(&mut self, a: Scalar, b: Scalar) -> InterpResult<'tcx, bool> {
|
||||
Ok(match (a, b) {
|
||||
// Comparisons between integers are always known.
|
||||
(Scalar::Int(_), Scalar::Int(_)) => a != b,
|
||||
// Comparisons of abstract pointers with null pointers are known if the pointer
|
||||
|
@ -219,13 +219,13 @@ impl<'mir, 'tcx: 'mir> CompileTimeEvalContext<'mir, 'tcx> {
|
|||
// Inequality with integers other than null can never be known for sure.
|
||||
(Scalar::Int(int), ptr @ Scalar::Ptr(..))
|
||||
| (ptr @ Scalar::Ptr(..), Scalar::Int(int)) => {
|
||||
int.is_null() && !self.scalar_may_be_null(ptr)
|
||||
int.is_null() && !self.scalar_may_be_null(ptr)?
|
||||
}
|
||||
// FIXME: return `true` for at least some comparisons where we can reliably
|
||||
// determine the result of runtime inequality tests at compile-time.
|
||||
// Examples include comparison of addresses in different static items.
|
||||
(Scalar::Ptr(..), Scalar::Ptr(..)) => false,
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -329,9 +329,9 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for CompileTimeInterpreter<'mir,
|
|||
let a = ecx.read_immediate(&args[0])?.to_scalar()?;
|
||||
let b = ecx.read_immediate(&args[1])?.to_scalar()?;
|
||||
let cmp = if intrinsic_name == sym::ptr_guaranteed_eq {
|
||||
ecx.guaranteed_eq(a, b)
|
||||
ecx.guaranteed_eq(a, b)?
|
||||
} else {
|
||||
ecx.guaranteed_ne(a, b)
|
||||
ecx.guaranteed_ne(a, b)?
|
||||
};
|
||||
ecx.write_scalar(Scalar::from_bool(cmp), dest)?;
|
||||
}
|
||||
|
|
|
@ -283,7 +283,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
|||
if let Some(entry_idx) = vptr_entry_idx {
|
||||
let entry_idx = u64::try_from(entry_idx).unwrap();
|
||||
let (old_data, old_vptr) = val.to_scalar_pair()?;
|
||||
let old_vptr = self.scalar_to_ptr(old_vptr);
|
||||
let old_vptr = self.scalar_to_ptr(old_vptr)?;
|
||||
let new_vptr = self
|
||||
.read_new_vtable_after_trait_upcasting_from_vtable(old_vptr, entry_idx)?;
|
||||
self.write_immediate(Immediate::new_dyn_trait(old_data, new_vptr, self), dest)
|
||||
|
|
|
@ -640,7 +640,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
|||
Ok(Some((size, align)))
|
||||
}
|
||||
ty::Dynamic(..) => {
|
||||
let vtable = self.scalar_to_ptr(metadata.unwrap_meta());
|
||||
let vtable = self.scalar_to_ptr(metadata.unwrap_meta())?;
|
||||
// Read size and align from vtable (already checks size).
|
||||
Ok(Some(self.read_size_and_align_from_vtable(vtable)?))
|
||||
}
|
||||
|
|
|
@ -202,7 +202,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: CompileTimeMachine<'mir, 'tcx, const_eval::Memory
|
|||
if let ty::Dynamic(..) =
|
||||
tcx.struct_tail_erasing_lifetimes(referenced_ty, self.ecx.param_env).kind()
|
||||
{
|
||||
let ptr = self.ecx.scalar_to_ptr(mplace.meta.unwrap_meta());
|
||||
let ptr = self.ecx.scalar_to_ptr(mplace.meta.unwrap_meta())?;
|
||||
if let Some(alloc_id) = ptr.provenance {
|
||||
// Explicitly choose const mode here, since vtables are immutable, even
|
||||
// if the reference of the fat pointer is mutable.
|
||||
|
|
|
@ -1102,30 +1102,38 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
|||
|
||||
/// Machine pointer introspection.
|
||||
impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
pub fn scalar_to_ptr(&self, scalar: Scalar<M::PointerTag>) -> Pointer<Option<M::PointerTag>> {
|
||||
pub fn scalar_to_ptr(
|
||||
&self,
|
||||
scalar: Scalar<M::PointerTag>,
|
||||
) -> InterpResult<'tcx, Pointer<Option<M::PointerTag>>> {
|
||||
// We use `to_bits_or_ptr_internal` since we are just implementing the method people need to
|
||||
// call to force getting out a pointer.
|
||||
match scalar.to_bits_or_ptr_internal(self.pointer_size()) {
|
||||
Err(ptr) => ptr.into(),
|
||||
Ok(bits) => {
|
||||
let addr = u64::try_from(bits).unwrap();
|
||||
let ptr = M::ptr_from_addr(&self, addr);
|
||||
if addr == 0 {
|
||||
assert!(ptr.provenance.is_none(), "null pointer can never have an AllocId");
|
||||
Ok(
|
||||
match scalar
|
||||
.to_bits_or_ptr_internal(self.pointer_size())
|
||||
.map_err(|s| err_ub!(ScalarSizeMismatch(s)))?
|
||||
{
|
||||
Err(ptr) => ptr.into(),
|
||||
Ok(bits) => {
|
||||
let addr = u64::try_from(bits).unwrap();
|
||||
let ptr = M::ptr_from_addr(&self, addr);
|
||||
if addr == 0 {
|
||||
assert!(ptr.provenance.is_none(), "null pointer can never have an AllocId");
|
||||
}
|
||||
ptr
|
||||
}
|
||||
ptr
|
||||
}
|
||||
}
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
/// Test if this value might be null.
|
||||
/// If the machine does not support ptr-to-int casts, this is conservative.
|
||||
pub fn scalar_may_be_null(&self, scalar: Scalar<M::PointerTag>) -> bool {
|
||||
match scalar.try_to_int() {
|
||||
pub fn scalar_may_be_null(&self, scalar: Scalar<M::PointerTag>) -> InterpResult<'tcx, bool> {
|
||||
Ok(match scalar.try_to_int() {
|
||||
Ok(int) => int.is_null(),
|
||||
Err(_) => {
|
||||
// Can only happen during CTFE.
|
||||
let ptr = self.scalar_to_ptr(scalar);
|
||||
let ptr = self.scalar_to_ptr(scalar)?;
|
||||
match self.ptr_try_get_alloc_id(ptr) {
|
||||
Ok((alloc_id, offset, _)) => {
|
||||
let (size, _align) = self
|
||||
|
@ -1138,7 +1146,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
|||
Err(_offset) => bug!("a non-int scalar is always a pointer"),
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
/// Turning a "maybe pointer" into a proper pointer (and some information
|
||||
|
|
|
@ -342,7 +342,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
|||
&self,
|
||||
op: &OpTy<'tcx, M::PointerTag>,
|
||||
) -> InterpResult<'tcx, Pointer<Option<M::PointerTag>>> {
|
||||
Ok(self.scalar_to_ptr(self.read_scalar(op)?.check_init()?))
|
||||
self.scalar_to_ptr(self.read_scalar(op)?.check_init()?)
|
||||
}
|
||||
|
||||
// Turn the wide MPlace into a string (must already be dereferenced!)
|
||||
|
@ -738,7 +738,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
|||
// okay. Everything else, we conservatively reject.
|
||||
let ptr_valid = niche_start == 0
|
||||
&& variants_start == variants_end
|
||||
&& !self.scalar_may_be_null(tag_val);
|
||||
&& !self.scalar_may_be_null(tag_val)?;
|
||||
if !ptr_valid {
|
||||
throw_ub!(InvalidTag(dbg_val))
|
||||
}
|
||||
|
|
|
@ -281,7 +281,7 @@ where
|
|||
};
|
||||
|
||||
let mplace = MemPlace {
|
||||
ptr: self.scalar_to_ptr(ptr.check_init()?),
|
||||
ptr: self.scalar_to_ptr(ptr.check_init()?)?,
|
||||
// We could use the run-time alignment here. For now, we do not, because
|
||||
// the point of tracking the alignment here is to make sure that the *static*
|
||||
// alignment information emitted with the loads is correct. The run-time
|
||||
|
@ -1104,7 +1104,7 @@ where
|
|||
&self,
|
||||
mplace: &MPlaceTy<'tcx, M::PointerTag>,
|
||||
) -> InterpResult<'tcx, (ty::Instance<'tcx>, MPlaceTy<'tcx, M::PointerTag>)> {
|
||||
let vtable = self.scalar_to_ptr(mplace.vtable()); // also sanity checks the type
|
||||
let vtable = self.scalar_to_ptr(mplace.vtable())?; // also sanity checks the type
|
||||
let (instance, ty) = self.read_drop_type_from_vtable(vtable)?;
|
||||
let layout = self.layout_of(ty)?;
|
||||
|
||||
|
|
|
@ -519,7 +519,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
|||
.kind(),
|
||||
ty::Dynamic(..)
|
||||
));
|
||||
let vtable = self.scalar_to_ptr(receiver_place.meta.unwrap_meta());
|
||||
let vtable = self.scalar_to_ptr(receiver_place.meta.unwrap_meta())?;
|
||||
let fn_val = self.get_vtable_slot(vtable, u64::try_from(idx).unwrap())?;
|
||||
|
||||
// `*mut receiver_place.layout.ty` is almost the layout that we
|
||||
|
|
|
@ -50,7 +50,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
|||
let vtable_slot = self
|
||||
.get_ptr_alloc(vtable_slot, ptr_size, self.tcx.data_layout.pointer_align.abi)?
|
||||
.expect("cannot be a ZST");
|
||||
let fn_ptr = self.scalar_to_ptr(vtable_slot.read_ptr_sized(Size::ZERO)?.check_init()?);
|
||||
let fn_ptr = self.scalar_to_ptr(vtable_slot.read_ptr_sized(Size::ZERO)?.check_init()?)?;
|
||||
self.get_ptr_fn(fn_ptr)
|
||||
}
|
||||
|
||||
|
@ -75,7 +75,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
|||
.check_init()?;
|
||||
// We *need* an instance here, no other kind of function value, to be able
|
||||
// to determine the type.
|
||||
let drop_instance = self.get_ptr_fn(self.scalar_to_ptr(drop_fn))?.as_instance()?;
|
||||
let drop_instance = self.get_ptr_fn(self.scalar_to_ptr(drop_fn)?)?.as_instance()?;
|
||||
trace!("Found drop fn: {:?}", drop_instance);
|
||||
let fn_sig = drop_instance.ty(*self.tcx, self.param_env).fn_sig(*self.tcx);
|
||||
let fn_sig = self.tcx.normalize_erasing_late_bound_regions(self.param_env, fn_sig);
|
||||
|
@ -132,7 +132,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
|||
.get_ptr_alloc(vtable_slot, pointer_size, self.tcx.data_layout.pointer_align.abi)?
|
||||
.expect("cannot be a ZST");
|
||||
|
||||
let new_vtable = self.scalar_to_ptr(new_vtable.read_ptr_sized(Size::ZERO)?.check_init()?);
|
||||
let new_vtable =
|
||||
self.scalar_to_ptr(new_vtable.read_ptr_sized(Size::ZERO)?.check_init()?)?;
|
||||
|
||||
Ok(new_vtable)
|
||||
}
|
||||
|
|
|
@ -312,7 +312,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
|
|||
let tail = self.ecx.tcx.struct_tail_erasing_lifetimes(pointee.ty, self.ecx.param_env);
|
||||
match tail.kind() {
|
||||
ty::Dynamic(..) => {
|
||||
let vtable = self.ecx.scalar_to_ptr(meta.unwrap_meta());
|
||||
let vtable = self.ecx.scalar_to_ptr(meta.unwrap_meta())?;
|
||||
// Direct call to `check_ptr_access_align` checks alignment even on CTFE machines.
|
||||
try_validation!(
|
||||
self.ecx.check_ptr_access_align(
|
||||
|
@ -577,7 +577,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
|
|||
|
||||
// If we check references recursively, also check that this points to a function.
|
||||
if let Some(_) = self.ref_tracking {
|
||||
let ptr = self.ecx.scalar_to_ptr(value);
|
||||
let ptr = self.ecx.scalar_to_ptr(value)?;
|
||||
let _fn = try_validation!(
|
||||
self.ecx.get_ptr_fn(ptr),
|
||||
self.path,
|
||||
|
@ -590,7 +590,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
|
|||
// FIXME: Check if the signature matches
|
||||
} else {
|
||||
// Otherwise (for standalone Miri), we have to still check it to be non-null.
|
||||
if self.ecx.scalar_may_be_null(value) {
|
||||
if self.ecx.scalar_may_be_null(value)? {
|
||||
throw_validation_failure!(self.path, { "a null function pointer" });
|
||||
}
|
||||
}
|
||||
|
@ -667,7 +667,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
|
|||
// We support 2 kinds of ranges here: full range, and excluding zero.
|
||||
if start == 1 && end == max_value {
|
||||
// Only null is the niche. So make sure the ptr is NOT null.
|
||||
if self.ecx.scalar_may_be_null(value) {
|
||||
if self.ecx.scalar_may_be_null(value)? {
|
||||
throw_validation_failure!(self.path,
|
||||
{ "a potentially null pointer" }
|
||||
expected {
|
||||
|
|
|
@ -15,8 +15,8 @@ use rustc_target::abi::{Align, HasDataLayout, Size};
|
|||
|
||||
use super::{
|
||||
read_target_uint, write_target_uint, AllocId, InterpError, InterpResult, Pointer, Provenance,
|
||||
ResourceExhaustionInfo, Scalar, ScalarMaybeUninit, UndefinedBehaviorInfo, UninitBytesAccess,
|
||||
UnsupportedOpInfo,
|
||||
ResourceExhaustionInfo, Scalar, ScalarMaybeUninit, ScalarSizeMismatch, UndefinedBehaviorInfo,
|
||||
UninitBytesAccess, UnsupportedOpInfo,
|
||||
};
|
||||
use crate::ty;
|
||||
|
||||
|
@ -81,6 +81,8 @@ impl<'tcx, Tag, Extra> ConstAllocation<'tcx, Tag, Extra> {
|
|||
/// is added when converting to `InterpError`.
|
||||
#[derive(Debug)]
|
||||
pub enum AllocError {
|
||||
/// A scalar had the wrong size.
|
||||
ScalarSizeMismatch(ScalarSizeMismatch),
|
||||
/// Encountered a pointer where we needed raw bytes.
|
||||
ReadPointerAsBytes,
|
||||
/// Partially overwriting a pointer.
|
||||
|
@ -90,10 +92,19 @@ pub enum AllocError {
|
|||
}
|
||||
pub type AllocResult<T = ()> = Result<T, AllocError>;
|
||||
|
||||
impl From<ScalarSizeMismatch> for AllocError {
|
||||
fn from(s: ScalarSizeMismatch) -> Self {
|
||||
AllocError::ScalarSizeMismatch(s)
|
||||
}
|
||||
}
|
||||
|
||||
impl AllocError {
|
||||
pub fn to_interp_error<'tcx>(self, alloc_id: AllocId) -> InterpError<'tcx> {
|
||||
use AllocError::*;
|
||||
match self {
|
||||
ScalarSizeMismatch(s) => {
|
||||
InterpError::UndefinedBehavior(UndefinedBehaviorInfo::ScalarSizeMismatch(s))
|
||||
}
|
||||
ReadPointerAsBytes => InterpError::Unsupported(UnsupportedOpInfo::ReadPointerAsBytes),
|
||||
PartialPointerOverwrite(offset) => InterpError::Unsupported(
|
||||
UnsupportedOpInfo::PartialPointerOverwrite(Pointer::new(alloc_id, offset)),
|
||||
|
@ -425,7 +436,7 @@ impl<Tag: Provenance, Extra> Allocation<Tag, Extra> {
|
|||
|
||||
// `to_bits_or_ptr_internal` is the right method because we just want to store this data
|
||||
// as-is into memory.
|
||||
let (bytes, provenance) = match val.to_bits_or_ptr_internal(range.size) {
|
||||
let (bytes, provenance) = match val.to_bits_or_ptr_internal(range.size)? {
|
||||
Err(val) => {
|
||||
let (provenance, offset) = val.into_parts();
|
||||
(u128::from(offset.bytes()), Some(provenance))
|
||||
|
|
|
@ -221,6 +221,13 @@ pub struct UninitBytesAccess {
|
|||
pub uninit_size: Size,
|
||||
}
|
||||
|
||||
/// Information about a size mismatch.
|
||||
#[derive(Debug)]
|
||||
pub struct ScalarSizeMismatch {
|
||||
pub target_size: u64,
|
||||
pub data_size: u64,
|
||||
}
|
||||
|
||||
/// Error information for when the program caused Undefined Behavior.
|
||||
pub enum UndefinedBehaviorInfo<'tcx> {
|
||||
/// Free-form case. Only for errors that are never caught!
|
||||
|
@ -298,10 +305,7 @@ pub enum UndefinedBehaviorInfo<'tcx> {
|
|||
/// Working with a local that is not currently live.
|
||||
DeadLocal,
|
||||
/// Data size is not equal to target size.
|
||||
ScalarSizeMismatch {
|
||||
target_size: u64,
|
||||
data_size: u64,
|
||||
},
|
||||
ScalarSizeMismatch(ScalarSizeMismatch),
|
||||
/// A discriminant of an uninhabited enum variant is written.
|
||||
UninhabitedEnumVariantWritten,
|
||||
}
|
||||
|
@ -408,7 +412,7 @@ impl fmt::Display for UndefinedBehaviorInfo<'_> {
|
|||
"using uninitialized data, but this operation requires initialized memory"
|
||||
),
|
||||
DeadLocal => write!(f, "accessing a dead local variable"),
|
||||
ScalarSizeMismatch { target_size, data_size } => write!(
|
||||
ScalarSizeMismatch(self::ScalarSizeMismatch { target_size, data_size }) => write!(
|
||||
f,
|
||||
"scalar size mismatch: expected {} bytes but got {} bytes instead",
|
||||
target_size, data_size
|
||||
|
|
|
@ -120,7 +120,8 @@ use crate::ty::{self, Instance, Ty, TyCtxt};
|
|||
pub use self::error::{
|
||||
struct_error, CheckInAllocMsg, ErrorHandled, EvalToAllocationRawResult, EvalToConstValueResult,
|
||||
InterpError, InterpErrorInfo, InterpResult, InvalidProgramInfo, MachineStopType,
|
||||
ResourceExhaustionInfo, UndefinedBehaviorInfo, UninitBytesAccess, UnsupportedOpInfo,
|
||||
ResourceExhaustionInfo, ScalarSizeMismatch, UndefinedBehaviorInfo, UninitBytesAccess,
|
||||
UnsupportedOpInfo,
|
||||
};
|
||||
|
||||
pub use self::value::{get_slice_bytes, ConstAlloc, ConstValue, Scalar, ScalarMaybeUninit};
|
||||
|
|
|
@ -12,6 +12,7 @@ use crate::ty::{Lift, ParamEnv, ScalarInt, Ty, TyCtxt};
|
|||
|
||||
use super::{
|
||||
AllocId, AllocRange, ConstAllocation, InterpResult, Pointer, PointerArithmetic, Provenance,
|
||||
ScalarSizeMismatch,
|
||||
};
|
||||
|
||||
/// Represents the result of const evaluation via the `eval_to_allocation` query.
|
||||
|
@ -300,16 +301,29 @@ impl<Tag> Scalar<Tag> {
|
|||
///
|
||||
/// This method only exists for the benefit of low-level operations that truly need to treat the
|
||||
/// scalar in whatever form it is.
|
||||
///
|
||||
/// This throws UB (instead of ICEing) on a size mismatch since size mismatches can arise in
|
||||
/// Miri when someone declares a function that we shim (such as `malloc`) with a wrong type.
|
||||
#[inline]
|
||||
pub fn to_bits_or_ptr_internal(self, target_size: Size) -> Result<u128, Pointer<Tag>> {
|
||||
pub fn to_bits_or_ptr_internal(
|
||||
self,
|
||||
target_size: Size,
|
||||
) -> Result<Result<u128, Pointer<Tag>>, ScalarSizeMismatch> {
|
||||
assert_ne!(target_size.bytes(), 0, "you should never look at the bits of a ZST");
|
||||
match self {
|
||||
Scalar::Int(int) => Ok(int.assert_bits(target_size)),
|
||||
Ok(match self {
|
||||
Scalar::Int(int) => Ok(int.to_bits(target_size).map_err(|size| {
|
||||
ScalarSizeMismatch { target_size: target_size.bytes(), data_size: size.bytes() }
|
||||
})?),
|
||||
Scalar::Ptr(ptr, sz) => {
|
||||
assert_eq!(target_size.bytes(), u64::from(sz));
|
||||
if target_size.bytes() != sz.into() {
|
||||
return Err(ScalarSizeMismatch {
|
||||
target_size: target_size.bytes(),
|
||||
data_size: sz.into(),
|
||||
});
|
||||
}
|
||||
Err(ptr)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -348,10 +362,10 @@ impl<'tcx, Tag: Provenance> Scalar<Tag> {
|
|||
assert_ne!(target_size.bytes(), 0, "you should never look at the bits of a ZST");
|
||||
self.try_to_int().map_err(|_| err_unsup!(ReadPointerAsBytes))?.to_bits(target_size).map_err(
|
||||
|size| {
|
||||
err_ub!(ScalarSizeMismatch {
|
||||
err_ub!(ScalarSizeMismatch(ScalarSizeMismatch {
|
||||
target_size: target_size.bytes(),
|
||||
data_size: size.bytes(),
|
||||
})
|
||||
}))
|
||||
.into()
|
||||
},
|
||||
)
|
||||
|
|
|
@ -146,7 +146,7 @@ impl IntRange {
|
|||
// straight to the result, after doing a bit of checking. (We
|
||||
// could remove this branch and just fall through, which
|
||||
// is more general but much slower.)
|
||||
if let Ok(bits) = scalar.to_bits_or_ptr_internal(target_size) {
|
||||
if let Ok(bits) = scalar.to_bits_or_ptr_internal(target_size).unwrap() {
|
||||
return Some(bits);
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Add table
Reference in a new issue