Auto merge of #55915 - oli-obk:miri_engine_refactoring, r=RalfJung

Miri engine refactoring

next small step of https://github.com/rust-lang/rust/pull/55293

r? @RalfJung
This commit is contained in:
bors 2018-11-25 00:00:17 +00:00
commit 2dd94c133e
13 changed files with 613 additions and 493 deletions

View file

@ -306,7 +306,7 @@ dependencies = [
"clippy-mini-macro-test 0.2.0",
"clippy_dev 0.0.1",
"clippy_lints 0.0.212",
"compiletest_rs 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)",
"compiletest_rs 0.3.17 (registry+https://github.com/rust-lang/crates.io-index)",
"derive-new 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)",
"lazy_static 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"regex 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)",
@ -423,7 +423,7 @@ dependencies = [
[[package]]
name = "compiletest_rs"
version = "0.3.16"
version = "0.3.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"diff 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)",
@ -1315,7 +1315,7 @@ dependencies = [
"byteorder 1.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
"cargo_metadata 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)",
"colored 1.6.0 (registry+https://github.com/rust-lang/crates.io-index)",
"compiletest_rs 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)",
"compiletest_rs 0.3.17 (registry+https://github.com/rust-lang/crates.io-index)",
"env_logger 0.5.12 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)",
"vergen 3.0.3 (registry+https://github.com/rust-lang/crates.io-index)",
@ -3274,7 +3274,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
"checksum colored 1.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b0aa3473e85a3161b59845d6096b289bb577874cafeaf75ea1b1beaa6572c7fc"
"checksum commoncrypto 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d056a8586ba25a1e4d61cb090900e495952c7886786fc55f909ab2f819b69007"
"checksum commoncrypto-sys 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1fed34f46747aa73dfaa578069fd8279d2818ade2b55f38f22a9401c7f4083e2"
"checksum compiletest_rs 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)" = "75e809f56d6aa9575b67924b0af686c4f4c1380314f47947e235e9ff7fa94bed"
"checksum compiletest_rs 0.3.17 (registry+https://github.com/rust-lang/crates.io-index)" = "89747fe073b7838343bd2c2445e7a7c2e0d415598f8925f0fa9205b9cdfc48cb"
"checksum core-foundation 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)" = "cc3532ec724375c7cb7ff0a097b714fde180bb1f6ed2ab27cfcd99ffca873cd2"
"checksum core-foundation-sys 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)" = "a3fb15cdbdd9cf8b82d97d0296bb5cd3631bba58d6e31650a002a8e7fb5721f9"
"checksum crossbeam 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "24ce9782d4d5c53674646a6a4c1863a21a8fc0cb649b3c94dfc16e45071dea19"

View file

@ -10,7 +10,10 @@
//! The virtual memory representation of the MIR interpreter
use super::{Pointer, EvalResult, AllocId};
use super::{
Pointer, EvalResult, AllocId, ScalarMaybeUndef, write_target_uint, read_target_uint, Scalar,
truncate,
};
use ty::layout::{Size, Align};
use syntax::ast::Mutability;
@ -18,6 +21,7 @@ use std::iter;
use mir;
use std::ops::{Deref, DerefMut};
use rustc_data_structures::sorted_map::SortedMap;
use rustc_target::abi::HasDataLayout;
/// Used by `check_bounds` to indicate whether the pointer needs to be just inbounds
/// or also inbounds of a *live* allocation.
@ -49,6 +53,418 @@ pub struct Allocation<Tag=(),Extra=()> {
pub extra: Extra,
}
/// Alignment and bounds checks
impl<'tcx, Tag, Extra> Allocation<Tag, Extra> {
/// Check if the pointer is "in-bounds". Notice that a pointer pointing at the end
/// of an allocation (i.e., at the first *inaccessible* location) *is* considered
/// in-bounds! This follows C's/LLVM's rules.
/// If you want to check bounds before doing a memory access, better use `check_bounds`.
pub fn check_bounds_ptr(
&self,
ptr: Pointer<Tag>,
) -> EvalResult<'tcx> {
let allocation_size = self.bytes.len() as u64;
ptr.check_in_alloc(Size::from_bytes(allocation_size), InboundsCheck::Live)
}
/// Check if the memory range beginning at `ptr` and of size `Size` is "in-bounds".
#[inline(always)]
pub fn check_bounds(
&self,
cx: &impl HasDataLayout,
ptr: Pointer<Tag>,
size: Size,
) -> EvalResult<'tcx> {
// if ptr.offset is in bounds, then so is ptr (because offset checks for overflow)
self.check_bounds_ptr(ptr.offset(size, cx)?)
}
}
/// Byte accessors
impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> {
/// The last argument controls whether we error out when there are undefined
/// or pointer bytes. You should never call this, call `get_bytes` or
/// `get_bytes_with_undef_and_ptr` instead,
///
/// This function also guarantees that the resulting pointer will remain stable
/// even when new allocations are pushed to the `HashMap`. `copy_repeatedly` relies
/// on that.
fn get_bytes_internal(
&self,
cx: &impl HasDataLayout,
ptr: Pointer<Tag>,
size: Size,
check_defined_and_ptr: bool,
) -> EvalResult<'tcx, &[u8]> {
self.check_bounds(cx, ptr, size)?;
if check_defined_and_ptr {
self.check_defined(ptr, size)?;
self.check_relocations(cx, ptr, size)?;
} else {
// We still don't want relocations on the *edges*
self.check_relocation_edges(cx, ptr, size)?;
}
AllocationExtra::memory_read(self, ptr, size)?;
assert_eq!(ptr.offset.bytes() as usize as u64, ptr.offset.bytes());
assert_eq!(size.bytes() as usize as u64, size.bytes());
let offset = ptr.offset.bytes() as usize;
Ok(&self.bytes[offset..offset + size.bytes() as usize])
}
#[inline]
pub fn get_bytes(
&self,
cx: &impl HasDataLayout,
ptr: Pointer<Tag>,
size: Size,
) -> EvalResult<'tcx, &[u8]> {
self.get_bytes_internal(cx, ptr, size, true)
}
/// It is the caller's responsibility to handle undefined and pointer bytes.
/// However, this still checks that there are no relocations on the *edges*.
#[inline]
pub fn get_bytes_with_undef_and_ptr(
&self,
cx: &impl HasDataLayout,
ptr: Pointer<Tag>,
size: Size,
) -> EvalResult<'tcx, &[u8]> {
self.get_bytes_internal(cx, ptr, size, false)
}
/// Just calling this already marks everything as defined and removes relocations,
/// so be sure to actually put data there!
pub fn get_bytes_mut(
&mut self,
cx: &impl HasDataLayout,
ptr: Pointer<Tag>,
size: Size,
) -> EvalResult<'tcx, &mut [u8]> {
assert_ne!(size.bytes(), 0, "0-sized accesses should never even get a `Pointer`");
self.check_bounds(cx, ptr, size)?;
self.mark_definedness(ptr, size, true)?;
self.clear_relocations(cx, ptr, size)?;
AllocationExtra::memory_written(self, ptr, size)?;
assert_eq!(ptr.offset.bytes() as usize as u64, ptr.offset.bytes());
assert_eq!(size.bytes() as usize as u64, size.bytes());
let offset = ptr.offset.bytes() as usize;
Ok(&mut self.bytes[offset..offset + size.bytes() as usize])
}
}
/// Reading and writing
impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> {
/// Reads bytes until a `0` is encountered. Will error if the end of the allocation is reached
/// before a `0` is found.
pub fn read_c_str(
&self,
cx: &impl HasDataLayout,
ptr: Pointer<Tag>,
) -> EvalResult<'tcx, &[u8]> {
assert_eq!(ptr.offset.bytes() as usize as u64, ptr.offset.bytes());
let offset = ptr.offset.bytes() as usize;
match self.bytes[offset..].iter().position(|&c| c == 0) {
Some(size) => {
let p1 = Size::from_bytes((size + 1) as u64);
self.check_relocations(cx, ptr, p1)?;
self.check_defined(ptr, p1)?;
Ok(&self.bytes[offset..offset + size])
}
None => err!(UnterminatedCString(ptr.erase_tag())),
}
}
/// Validates that `ptr.offset` and `ptr.offset + size` do not point to the middle of a
/// relocation. If `allow_ptr_and_undef` is `false`, also enforces that the memory in the
/// given range contains neither relocations nor undef bytes.
pub fn check_bytes(
&self,
cx: &impl HasDataLayout,
ptr: Pointer<Tag>,
size: Size,
allow_ptr_and_undef: bool,
) -> EvalResult<'tcx> {
// Check bounds and relocations on the edges
self.get_bytes_with_undef_and_ptr(cx, ptr, size)?;
// Check undef and ptr
if !allow_ptr_and_undef {
self.check_defined(ptr, size)?;
self.check_relocations(cx, ptr, size)?;
}
Ok(())
}
/// Writes `src` to the memory starting at `ptr.offset`.
///
/// Will do bounds checks on the allocation.
pub fn write_bytes(
&mut self,
cx: &impl HasDataLayout,
ptr: Pointer<Tag>,
src: &[u8],
) -> EvalResult<'tcx> {
let bytes = self.get_bytes_mut(cx, ptr, Size::from_bytes(src.len() as u64))?;
bytes.clone_from_slice(src);
Ok(())
}
/// Sets `count` bytes starting at `ptr.offset` with `val`. Basically `memset`.
pub fn write_repeat(
&mut self,
cx: &impl HasDataLayout,
ptr: Pointer<Tag>,
val: u8,
count: Size
) -> EvalResult<'tcx> {
let bytes = self.get_bytes_mut(cx, ptr, count)?;
for b in bytes {
*b = val;
}
Ok(())
}
/// Read a *non-ZST* scalar
///
/// zsts can't be read out of two reasons:
/// * byteorder cannot work with zero element buffers
/// * in oder to obtain a `Pointer` we need to check for ZSTness anyway due to integer pointers
/// being valid for ZSTs
///
/// Note: This function does not do *any* alignment checks, you need to do these before calling
pub fn read_scalar(
&self,
cx: &impl HasDataLayout,
ptr: Pointer<Tag>,
size: Size
) -> EvalResult<'tcx, ScalarMaybeUndef<Tag>> {
// get_bytes_unchecked tests relocation edges
let bytes = self.get_bytes_with_undef_and_ptr(cx, ptr, size)?;
// Undef check happens *after* we established that the alignment is correct.
// We must not return Ok() for unaligned pointers!
if self.check_defined(ptr, size).is_err() {
// this inflates undefined bytes to the entire scalar, even if only a few
// bytes are undefined
return Ok(ScalarMaybeUndef::Undef);
}
// Now we do the actual reading
let bits = read_target_uint(cx.data_layout().endian, bytes).unwrap();
// See if we got a pointer
if size != cx.data_layout().pointer_size {
// *Now* better make sure that the inside also is free of relocations.
self.check_relocations(cx, ptr, size)?;
} else {
match self.relocations.get(&ptr.offset) {
Some(&(tag, alloc_id)) => {
let ptr = Pointer::new_with_tag(alloc_id, Size::from_bytes(bits as u64), tag);
return Ok(ScalarMaybeUndef::Scalar(ptr.into()))
}
None => {},
}
}
// We don't. Just return the bits.
Ok(ScalarMaybeUndef::Scalar(Scalar::from_uint(bits, size)))
}
/// Note: This function does not do *any* alignment checks, you need to do these before calling
pub fn read_ptr_sized(
&self,
cx: &impl HasDataLayout,
ptr: Pointer<Tag>,
) -> EvalResult<'tcx, ScalarMaybeUndef<Tag>> {
self.read_scalar(cx, ptr, cx.data_layout().pointer_size)
}
/// Write a *non-ZST* scalar
///
/// zsts can't be read out of two reasons:
/// * byteorder cannot work with zero element buffers
/// * in oder to obtain a `Pointer` we need to check for ZSTness anyway due to integer pointers
/// being valid for ZSTs
///
/// Note: This function does not do *any* alignment checks, you need to do these before calling
pub fn write_scalar(
&mut self,
cx: &impl HasDataLayout,
ptr: Pointer<Tag>,
val: ScalarMaybeUndef<Tag>,
type_size: Size,
) -> EvalResult<'tcx> {
let val = match val {
ScalarMaybeUndef::Scalar(scalar) => scalar,
ScalarMaybeUndef::Undef => return self.mark_definedness(ptr, type_size, false),
};
let bytes = match val {
Scalar::Ptr(val) => {
assert_eq!(type_size, cx.data_layout().pointer_size);
val.offset.bytes() as u128
}
Scalar::Bits { bits, size } => {
assert_eq!(size as u64, type_size.bytes());
debug_assert_eq!(truncate(bits, Size::from_bytes(size.into())), bits,
"Unexpected value of size {} when writing to memory", size);
bits
},
};
{
let endian = cx.data_layout().endian;
let dst = self.get_bytes_mut(cx, ptr, type_size)?;
write_target_uint(endian, dst, bytes).unwrap();
}
// See if we have to also write a relocation
match val {
Scalar::Ptr(val) => {
self.relocations.insert(
ptr.offset,
(val.tag, val.alloc_id),
);
}
_ => {}
}
Ok(())
}
/// Note: This function does not do *any* alignment checks, you need to do these before calling
pub fn write_ptr_sized(
&mut self,
cx: &impl HasDataLayout,
ptr: Pointer<Tag>,
val: ScalarMaybeUndef<Tag>
) -> EvalResult<'tcx> {
let ptr_size = cx.data_layout().pointer_size;
self.write_scalar(cx, ptr.into(), val, ptr_size)
}
}
/// Relocations
impl<'tcx, Tag: Copy, Extra> Allocation<Tag, Extra> {
/// Return all relocations overlapping with the given ptr-offset pair.
pub fn relocations(
&self,
cx: &impl HasDataLayout,
ptr: Pointer<Tag>,
size: Size,
) -> &[(Size, (Tag, AllocId))] {
// We have to go back `pointer_size - 1` bytes, as that one would still overlap with
// the beginning of this range.
let start = ptr.offset.bytes().saturating_sub(cx.data_layout().pointer_size.bytes() - 1);
let end = ptr.offset + size; // this does overflow checking
self.relocations.range(Size::from_bytes(start)..end)
}
/// Check that there are no relocations overlapping with the given range.
#[inline(always)]
fn check_relocations(
&self,
cx: &impl HasDataLayout,
ptr: Pointer<Tag>,
size: Size,
) -> EvalResult<'tcx> {
if self.relocations(cx, ptr, size).is_empty() {
Ok(())
} else {
err!(ReadPointerAsBytes)
}
}
/// Remove all relocations inside the given range.
/// If there are relocations overlapping with the edges, they
/// are removed as well *and* the bytes they cover are marked as
/// uninitialized. This is a somewhat odd "spooky action at a distance",
/// but it allows strictly more code to run than if we would just error
/// immediately in that case.
fn clear_relocations(
&mut self,
cx: &impl HasDataLayout,
ptr: Pointer<Tag>,
size: Size,
) -> EvalResult<'tcx> {
// Find the start and end of the given range and its outermost relocations.
let (first, last) = {
// Find all relocations overlapping the given range.
let relocations = self.relocations(cx, ptr, size);
if relocations.is_empty() {
return Ok(());
}
(relocations.first().unwrap().0,
relocations.last().unwrap().0 + cx.data_layout().pointer_size)
};
let start = ptr.offset;
let end = start + size;
// Mark parts of the outermost relocations as undefined if they partially fall outside the
// given range.
if first < start {
self.undef_mask.set_range(first, start, false);
}
if last > end {
self.undef_mask.set_range(end, last, false);
}
// Forget all the relocations.
self.relocations.remove_range(first..last);
Ok(())
}
/// Error if there are relocations overlapping with the edges of the
/// given memory range.
#[inline]
fn check_relocation_edges(
&self,
cx: &impl HasDataLayout,
ptr: Pointer<Tag>,
size: Size,
) -> EvalResult<'tcx> {
self.check_relocations(cx, ptr, Size::ZERO)?;
self.check_relocations(cx, ptr.offset(size, cx)?, Size::ZERO)?;
Ok(())
}
}
/// Undefined bytes
impl<'tcx, Tag, Extra> Allocation<Tag, Extra> {
/// Checks that a range of bytes is defined. If not, returns the `ReadUndefBytes`
/// error which will report the first byte which is undefined.
#[inline]
fn check_defined(&self, ptr: Pointer<Tag>, size: Size) -> EvalResult<'tcx> {
self.undef_mask.is_range_defined(
ptr.offset,
ptr.offset + size,
).or_else(|idx| err!(ReadUndefBytes(idx)))
}
pub fn mark_definedness(
&mut self,
ptr: Pointer<Tag>,
size: Size,
new_state: bool,
) -> EvalResult<'tcx> {
if size.bytes() == 0 {
return Ok(());
}
self.undef_mask.set_range(
ptr.offset,
ptr.offset + size,
new_state,
);
Ok(())
}
}
pub trait AllocationExtra<Tag>: ::std::fmt::Debug + Default + Clone {
/// Hook for performing extra checks on a memory read access.
///

View file

@ -2,7 +2,7 @@ use mir;
use ty::layout::{self, HasDataLayout, Size};
use super::{
AllocId, EvalResult,
AllocId, EvalResult, InboundsCheck,
};
////////////////////////////////////////////////////////////////////////////////
@ -148,4 +148,21 @@ impl<'tcx, Tag> Pointer<Tag> {
pub fn erase_tag(self) -> Pointer {
Pointer { alloc_id: self.alloc_id, offset: self.offset, tag: () }
}
#[inline(always)]
pub fn check_in_alloc(
self,
allocation_size: Size,
check: InboundsCheck,
) -> EvalResult<'tcx, ()> {
if self.offset > allocation_size {
err!(PointerOutOfBounds {
ptr: self.erase_tag(),
check,
allocation_size,
})
} else {
Ok(())
}
}
}

View file

@ -21,16 +21,16 @@ use std::ptr;
use std::borrow::Cow;
use rustc::ty::{self, Instance, ParamEnv, query::TyCtxtAt};
use rustc::ty::layout::{self, Align, TargetDataLayout, Size, HasDataLayout};
use rustc::ty::layout::{Align, TargetDataLayout, Size, HasDataLayout};
pub use rustc::mir::interpret::{truncate, write_target_uint, read_target_uint};
use rustc_data_structures::fx::{FxHashSet, FxHashMap};
use syntax::ast::Mutability;
use super::{
Pointer, AllocId, Allocation, GlobalId, AllocationExtra, InboundsCheck,
Pointer, AllocId, Allocation, GlobalId, AllocationExtra,
EvalResult, Scalar, EvalErrorKind, AllocType, PointerArithmetic,
Machine, AllocMap, MayLeak, ScalarMaybeUndef, ErrorHandled,
Machine, AllocMap, MayLeak, ErrorHandled, InboundsCheck,
};
#[derive(Debug, PartialEq, Eq, Copy, Clone, Hash)]
@ -251,9 +251,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
Scalar::Ptr(ptr) => {
// check this is not NULL -- which we can ensure only if this is in-bounds
// of some (potentially dead) allocation.
self.check_bounds_ptr(ptr, InboundsCheck::MaybeDead)?;
// data required for alignment check
let (_, align) = self.get_size_and_align(ptr.alloc_id);
let align = self.check_bounds_ptr_maybe_dead(ptr)?;
(ptr.offset.bytes(), align)
}
Scalar::Bits { bits, size } => {
@ -287,44 +285,19 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
/// Check if the pointer is "in-bounds". Notice that a pointer pointing at the end
/// of an allocation (i.e., at the first *inaccessible* location) *is* considered
/// in-bounds! This follows C's/LLVM's rules. `check` indicates whether we
/// additionally require the pointer to be pointing to a *live* (still allocated)
/// allocation.
/// If you want to check bounds before doing a memory access, better use `check_bounds`.
pub fn check_bounds_ptr(
/// in-bounds! This follows C's/LLVM's rules.
/// This function also works for deallocated allocations.
/// Use `.get(ptr.alloc_id)?.check_bounds_ptr(ptr)` if you want to force the allocation
/// to still be live.
/// If you want to check bounds before doing a memory access, better first obtain
/// an `Allocation` and call `check_bounds`.
pub fn check_bounds_ptr_maybe_dead(
&self,
ptr: Pointer<M::PointerTag>,
check: InboundsCheck,
) -> EvalResult<'tcx> {
let allocation_size = match check {
InboundsCheck::Live => {
let alloc = self.get(ptr.alloc_id)?;
alloc.bytes.len() as u64
}
InboundsCheck::MaybeDead => {
self.get_size_and_align(ptr.alloc_id).0.bytes()
}
};
if ptr.offset.bytes() > allocation_size {
return err!(PointerOutOfBounds {
ptr: ptr.erase_tag(),
check,
allocation_size: Size::from_bytes(allocation_size),
});
}
Ok(())
}
/// Check if the memory range beginning at `ptr` and of size `Size` is "in-bounds".
#[inline(always)]
pub fn check_bounds(
&self,
ptr: Pointer<M::PointerTag>,
size: Size,
check: InboundsCheck,
) -> EvalResult<'tcx> {
// if ptr.offset is in bounds, then so is ptr (because offset checks for overflow)
self.check_bounds_ptr(ptr.offset(size, &*self)?, check)
) -> EvalResult<'tcx, Align> {
let (allocation_size, align) = self.get_size_and_align(ptr.alloc_id);
ptr.check_in_alloc(allocation_size, InboundsCheck::MaybeDead)?;
Ok(align)
}
}
@ -609,87 +582,19 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
}
}
/// Byte accessors
/// Byte Accessors
impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
/// The last argument controls whether we error out when there are undefined
/// or pointer bytes. You should never call this, call `get_bytes` or
/// `get_bytes_with_undef_and_ptr` instead,
///
/// This function also guarantees that the resulting pointer will remain stable
/// even when new allocations are pushed to the `HashMap`. `copy_repeatedly` relies
/// on that.
fn get_bytes_internal(
pub fn read_bytes(
&self,
ptr: Pointer<M::PointerTag>,
ptr: Scalar<M::PointerTag>,
size: Size,
align: Align,
check_defined_and_ptr: bool,
) -> EvalResult<'tcx, &[u8]> {
assert_ne!(size.bytes(), 0, "0-sized accesses should never even get a `Pointer`");
self.check_align(ptr.into(), align)?;
self.check_bounds(ptr, size, InboundsCheck::Live)?;
if check_defined_and_ptr {
self.check_defined(ptr, size)?;
self.check_relocations(ptr, size)?;
if size.bytes() == 0 {
Ok(&[])
} else {
// We still don't want relocations on the *edges*
self.check_relocation_edges(ptr, size)?;
let ptr = ptr.to_ptr()?;
self.get(ptr.alloc_id)?.get_bytes(self, ptr, size)
}
let alloc = self.get(ptr.alloc_id)?;
AllocationExtra::memory_read(alloc, ptr, size)?;
assert_eq!(ptr.offset.bytes() as usize as u64, ptr.offset.bytes());
assert_eq!(size.bytes() as usize as u64, size.bytes());
let offset = ptr.offset.bytes() as usize;
Ok(&alloc.bytes[offset..offset + size.bytes() as usize])
}
#[inline]
fn get_bytes(
&self,
ptr: Pointer<M::PointerTag>,
size: Size,
align: Align
) -> EvalResult<'tcx, &[u8]> {
self.get_bytes_internal(ptr, size, align, true)
}
/// It is the caller's responsibility to handle undefined and pointer bytes.
/// However, this still checks that there are no relocations on the *edges*.
#[inline]
fn get_bytes_with_undef_and_ptr(
&self,
ptr: Pointer<M::PointerTag>,
size: Size,
align: Align
) -> EvalResult<'tcx, &[u8]> {
self.get_bytes_internal(ptr, size, align, false)
}
/// Just calling this already marks everything as defined and removes relocations,
/// so be sure to actually put data there!
fn get_bytes_mut(
&mut self,
ptr: Pointer<M::PointerTag>,
size: Size,
align: Align,
) -> EvalResult<'tcx, &mut [u8]> {
assert_ne!(size.bytes(), 0, "0-sized accesses should never even get a `Pointer`");
self.check_align(ptr.into(), align)?;
self.check_bounds(ptr, size, InboundsCheck::Live)?;
self.mark_definedness(ptr, size, true)?;
self.clear_relocations(ptr, size)?;
let alloc = self.get_mut(ptr.alloc_id)?;
AllocationExtra::memory_written(alloc, ptr, size)?;
assert_eq!(ptr.offset.bytes() as usize as u64, ptr.offset.bytes());
assert_eq!(size.bytes() as usize as u64, size.bytes());
let offset = ptr.offset.bytes() as usize;
Ok(&mut alloc.bytes[offset..offset + size.bytes() as usize])
}
}
@ -766,10 +671,11 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
length: u64,
nonoverlapping: bool,
) -> EvalResult<'tcx> {
self.check_align(src, src_align)?;
self.check_align(dest, dest_align)?;
if size.bytes() == 0 {
// Nothing to do for ZST, other than checking alignment and non-NULLness.
self.check_align(src, src_align)?;
self.check_align(dest, dest_align)?;
// Nothing to do for ZST, other than checking alignment and
// non-NULLness which already happened.
return Ok(());
}
let src = src.to_ptr()?;
@ -781,7 +687,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
// (`get_bytes_with_undef_and_ptr` below checks that there are no
// relocations overlapping the edges; those would not be handled correctly).
let relocations = {
let relocations = self.relocations(src, size)?;
let relocations = self.get(src.alloc_id)?.relocations(self, src, size);
let mut new_relocations = Vec::with_capacity(relocations.len() * (length as usize));
for i in 0..length {
new_relocations.extend(
@ -797,9 +703,15 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
new_relocations
};
// This also checks alignment, and relocation edges on the src.
let src_bytes = self.get_bytes_with_undef_and_ptr(src, size, src_align)?.as_ptr();
let dest_bytes = self.get_bytes_mut(dest, size * length, dest_align)?.as_mut_ptr();
let tcx = self.tcx.tcx;
// This checks relocation edges on the src.
let src_bytes = self.get(src.alloc_id)?
.get_bytes_with_undef_and_ptr(&tcx, src, size)?
.as_ptr();
let dest_bytes = self.get_mut(dest.alloc_id)?
.get_bytes_mut(&tcx, dest, size * length)?
.as_mut_ptr();
// SAFE: The above indexing would have panicked if there weren't at least `size` bytes
// behind `src` and `dest`. Also, we use the overlapping-safe `ptr::copy` if `src` and
@ -840,276 +752,6 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
Ok(())
}
pub fn read_c_str(&self, ptr: Pointer<M::PointerTag>) -> EvalResult<'tcx, &[u8]> {
let alloc = self.get(ptr.alloc_id)?;
assert_eq!(ptr.offset.bytes() as usize as u64, ptr.offset.bytes());
let offset = ptr.offset.bytes() as usize;
match alloc.bytes[offset..].iter().position(|&c| c == 0) {
Some(size) => {
let p1 = Size::from_bytes((size + 1) as u64);
self.check_relocations(ptr, p1)?;
self.check_defined(ptr, p1)?;
Ok(&alloc.bytes[offset..offset + size])
}
None => err!(UnterminatedCString(ptr.erase_tag())),
}
}
pub fn check_bytes(
&self,
ptr: Scalar<M::PointerTag>,
size: Size,
allow_ptr_and_undef: bool,
) -> EvalResult<'tcx> {
// Empty accesses don't need to be valid pointers, but they should still be non-NULL
let align = Align::from_bytes(1).unwrap();
if size.bytes() == 0 {
self.check_align(ptr, align)?;
return Ok(());
}
let ptr = ptr.to_ptr()?;
// Check bounds, align and relocations on the edges
self.get_bytes_with_undef_and_ptr(ptr, size, align)?;
// Check undef and ptr
if !allow_ptr_and_undef {
self.check_defined(ptr, size)?;
self.check_relocations(ptr, size)?;
}
Ok(())
}
pub fn read_bytes(&self, ptr: Scalar<M::PointerTag>, size: Size) -> EvalResult<'tcx, &[u8]> {
// Empty accesses don't need to be valid pointers, but they should still be non-NULL
let align = Align::from_bytes(1).unwrap();
if size.bytes() == 0 {
self.check_align(ptr, align)?;
return Ok(&[]);
}
self.get_bytes(ptr.to_ptr()?, size, align)
}
pub fn write_bytes(&mut self, ptr: Scalar<M::PointerTag>, src: &[u8]) -> EvalResult<'tcx> {
// Empty accesses don't need to be valid pointers, but they should still be non-NULL
let align = Align::from_bytes(1).unwrap();
if src.is_empty() {
self.check_align(ptr, align)?;
return Ok(());
}
let bytes = self.get_bytes_mut(ptr.to_ptr()?, Size::from_bytes(src.len() as u64), align)?;
bytes.clone_from_slice(src);
Ok(())
}
pub fn write_repeat(
&mut self,
ptr: Scalar<M::PointerTag>,
val: u8,
count: Size
) -> EvalResult<'tcx> {
// Empty accesses don't need to be valid pointers, but they should still be non-NULL
let align = Align::from_bytes(1).unwrap();
if count.bytes() == 0 {
self.check_align(ptr, align)?;
return Ok(());
}
let bytes = self.get_bytes_mut(ptr.to_ptr()?, count, align)?;
for b in bytes {
*b = val;
}
Ok(())
}
/// Read a *non-ZST* scalar
pub fn read_scalar(
&self,
ptr: Pointer<M::PointerTag>,
ptr_align: Align,
size: Size
) -> EvalResult<'tcx, ScalarMaybeUndef<M::PointerTag>> {
// get_bytes_unchecked tests alignment and relocation edges
let bytes = self.get_bytes_with_undef_and_ptr(
ptr, size, ptr_align.min(self.int_align(size))
)?;
// Undef check happens *after* we established that the alignment is correct.
// We must not return Ok() for unaligned pointers!
if self.check_defined(ptr, size).is_err() {
// this inflates undefined bytes to the entire scalar, even if only a few
// bytes are undefined
return Ok(ScalarMaybeUndef::Undef);
}
// Now we do the actual reading
let bits = read_target_uint(self.tcx.data_layout.endian, bytes).unwrap();
// See if we got a pointer
if size != self.pointer_size() {
// *Now* better make sure that the inside also is free of relocations.
self.check_relocations(ptr, size)?;
} else {
let alloc = self.get(ptr.alloc_id)?;
match alloc.relocations.get(&ptr.offset) {
Some(&(tag, alloc_id)) => {
let ptr = Pointer::new_with_tag(alloc_id, Size::from_bytes(bits as u64), tag);
return Ok(ScalarMaybeUndef::Scalar(ptr.into()))
}
None => {},
}
}
// We don't. Just return the bits.
Ok(ScalarMaybeUndef::Scalar(Scalar::from_uint(bits, size)))
}
pub fn read_ptr_sized(
&self,
ptr: Pointer<M::PointerTag>,
ptr_align: Align
) -> EvalResult<'tcx, ScalarMaybeUndef<M::PointerTag>> {
self.read_scalar(ptr, ptr_align, self.pointer_size())
}
/// Write a *non-ZST* scalar
pub fn write_scalar(
&mut self,
ptr: Pointer<M::PointerTag>,
ptr_align: Align,
val: ScalarMaybeUndef<M::PointerTag>,
type_size: Size,
) -> EvalResult<'tcx> {
let val = match val {
ScalarMaybeUndef::Scalar(scalar) => scalar,
ScalarMaybeUndef::Undef => return self.mark_definedness(ptr, type_size, false),
};
let bytes = match val {
Scalar::Ptr(val) => {
assert_eq!(type_size, self.pointer_size());
val.offset.bytes() as u128
}
Scalar::Bits { bits, size } => {
assert_eq!(size as u64, type_size.bytes());
debug_assert_eq!(truncate(bits, Size::from_bytes(size.into())), bits,
"Unexpected value of size {} when writing to memory", size);
bits
},
};
{
// get_bytes_mut checks alignment
let endian = self.tcx.data_layout.endian;
let dst = self.get_bytes_mut(ptr, type_size, ptr_align)?;
write_target_uint(endian, dst, bytes).unwrap();
}
// See if we have to also write a relocation
match val {
Scalar::Ptr(val) => {
self.get_mut(ptr.alloc_id)?.relocations.insert(
ptr.offset,
(val.tag, val.alloc_id),
);
}
_ => {}
}
Ok(())
}
pub fn write_ptr_sized(
&mut self,
ptr: Pointer<M::PointerTag>,
ptr_align: Align,
val: ScalarMaybeUndef<M::PointerTag>
) -> EvalResult<'tcx> {
let ptr_size = self.pointer_size();
self.write_scalar(ptr.into(), ptr_align, val, ptr_size)
}
fn int_align(&self, size: Size) -> Align {
// We assume pointer-sized integers have the same alignment as pointers.
// We also assume signed and unsigned integers of the same size have the same alignment.
let ity = match size.bytes() {
1 => layout::I8,
2 => layout::I16,
4 => layout::I32,
8 => layout::I64,
16 => layout::I128,
_ => bug!("bad integer size: {}", size.bytes()),
};
ity.align(self).abi
}
}
/// Relocations
impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
/// Return all relocations overlapping with the given ptr-offset pair.
fn relocations(
&self,
ptr: Pointer<M::PointerTag>,
size: Size,
) -> EvalResult<'tcx, &[(Size, (M::PointerTag, AllocId))]> {
// We have to go back `pointer_size - 1` bytes, as that one would still overlap with
// the beginning of this range.
let start = ptr.offset.bytes().saturating_sub(self.pointer_size().bytes() - 1);
let end = ptr.offset + size; // this does overflow checking
Ok(self.get(ptr.alloc_id)?.relocations.range(Size::from_bytes(start)..end))
}
/// Check that there ar eno relocations overlapping with the given range.
#[inline(always)]
fn check_relocations(&self, ptr: Pointer<M::PointerTag>, size: Size) -> EvalResult<'tcx> {
if self.relocations(ptr, size)?.len() != 0 {
err!(ReadPointerAsBytes)
} else {
Ok(())
}
}
/// Remove all relocations inside the given range.
/// If there are relocations overlapping with the edges, they
/// are removed as well *and* the bytes they cover are marked as
/// uninitialized. This is a somewhat odd "spooky action at a distance",
/// but it allows strictly more code to run than if we would just error
/// immediately in that case.
fn clear_relocations(&mut self, ptr: Pointer<M::PointerTag>, size: Size) -> EvalResult<'tcx> {
// Find the start and end of the given range and its outermost relocations.
let (first, last) = {
// Find all relocations overlapping the given range.
let relocations = self.relocations(ptr, size)?;
if relocations.is_empty() {
return Ok(());
}
(relocations.first().unwrap().0,
relocations.last().unwrap().0 + self.pointer_size())
};
let start = ptr.offset;
let end = start + size;
let alloc = self.get_mut(ptr.alloc_id)?;
// Mark parts of the outermost relocations as undefined if they partially fall outside the
// given range.
if first < start {
alloc.undef_mask.set_range(first, start, false);
}
if last > end {
alloc.undef_mask.set_range(end, last, false);
}
// Forget all the relocations.
alloc.relocations.remove_range(first..last);
Ok(())
}
/// Error if there are relocations overlapping with the edges of the
/// given memory range.
#[inline]
fn check_relocation_edges(&self, ptr: Pointer<M::PointerTag>, size: Size) -> EvalResult<'tcx> {
self.check_relocations(ptr, Size::ZERO)?;
self.check_relocations(ptr.offset(size, self)?, Size::ZERO)?;
Ok(())
}
}
/// Undefined bytes
@ -1141,33 +783,4 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
Ok(())
}
/// Checks that a range of bytes is defined. If not, returns the `ReadUndefBytes`
/// error which will report the first byte which is undefined.
#[inline]
fn check_defined(&self, ptr: Pointer<M::PointerTag>, size: Size) -> EvalResult<'tcx> {
let alloc = self.get(ptr.alloc_id)?;
alloc.undef_mask.is_range_defined(
ptr.offset,
ptr.offset + size,
).or_else(|idx| err!(ReadUndefBytes(idx)))
}
pub fn mark_definedness(
&mut self,
ptr: Pointer<M::PointerTag>,
size: Size,
new_state: bool,
) -> EvalResult<'tcx> {
if size.bytes() == 0 {
return Ok(());
}
let alloc = self.get_mut(ptr.alloc_id)?;
alloc.undef_mask.set_range(
ptr.offset,
ptr.offset + size,
new_state,
);
Ok(())
}
}

View file

@ -19,7 +19,7 @@ use rustc::ty::layout::{self, Size, LayoutOf, TyLayout, HasDataLayout, IntegerEx
use rustc::mir::interpret::{
GlobalId, AllocId,
ConstValue, Pointer, Scalar,
EvalResult, EvalErrorKind, InboundsCheck,
EvalResult, EvalErrorKind,
};
use super::{EvalContext, Machine, MemPlace, MPlaceTy, MemoryKind};
pub use rustc::mir::interpret::ScalarMaybeUndef;
@ -275,10 +275,14 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
return Ok(Some(Immediate::Scalar(Scalar::zst().into())));
}
// check for integer pointers before alignment to report better errors
let ptr = ptr.to_ptr()?;
self.memory.check_align(ptr.into(), ptr_align)?;
match mplace.layout.abi {
layout::Abi::Scalar(..) => {
let scalar = self.memory.read_scalar(ptr, ptr_align, mplace.layout.size)?;
let scalar = self.memory
.get(ptr.alloc_id)?
.read_scalar(self, ptr, mplace.layout.size)?;
Ok(Some(Immediate::Scalar(scalar)))
}
layout::Abi::ScalarPair(ref a, ref b) => {
@ -287,9 +291,15 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
let a_ptr = ptr;
let b_offset = a_size.align_to(b.align(self).abi);
assert!(b_offset.bytes() > 0); // we later use the offset to test which field to use
let b_ptr = ptr.offset(b_offset, self)?.into();
let a_val = self.memory.read_scalar(a_ptr, ptr_align, a_size)?;
let b_val = self.memory.read_scalar(b_ptr, ptr_align, b_size)?;
let b_ptr = ptr.offset(b_offset, self)?;
let a_val = self.memory
.get(ptr.alloc_id)?
.read_scalar(self, a_ptr, a_size)?;
let b_align = ptr_align.restrict_for_offset(b_offset);
self.memory.check_align(b_ptr.into(), b_align)?;
let b_val = self.memory
.get(ptr.alloc_id)?
.read_scalar(self, b_ptr, b_size)?;
Ok(Some(Immediate::ScalarPair(a_val, b_val)))
}
_ => Ok(None),
@ -637,7 +647,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
ScalarMaybeUndef::Scalar(Scalar::Ptr(ptr)) => {
// The niche must be just 0 (which an inbounds pointer value never is)
let ptr_valid = niche_start == 0 && variants_start == variants_end &&
self.memory.check_bounds_ptr(ptr, InboundsCheck::MaybeDead).is_ok();
self.memory.check_bounds_ptr_maybe_dead(ptr).is_ok();
if !ptr_valid {
return err!(InvalidDiscriminant(raw_discr.erase_tag()));
}

View file

@ -159,6 +159,19 @@ impl<Tag> MemPlace<Tag> {
Some(meta) => Immediate::ScalarPair(self.ptr.into(), meta.into()),
}
}
pub fn offset(
self,
offset: Size,
meta: Option<Scalar<Tag>>,
cx: &impl HasDataLayout,
) -> EvalResult<'tcx, Self> {
Ok(MemPlace {
ptr: self.ptr.ptr_offset(offset, cx)?,
align: self.align.restrict_for_offset(offset),
meta,
})
}
}
impl<'tcx, Tag> MPlaceTy<'tcx, Tag> {
@ -174,6 +187,19 @@ impl<'tcx, Tag> MPlaceTy<'tcx, Tag> {
}
}
pub fn offset(
self,
offset: Size,
meta: Option<Scalar<Tag>>,
layout: TyLayout<'tcx>,
cx: &impl HasDataLayout,
) -> EvalResult<'tcx, Self> {
Ok(MPlaceTy {
mplace: self.mplace.offset(offset, meta, cx)?,
layout,
})
}
#[inline]
fn from_aligned_ptr(ptr: Pointer<Tag>, layout: TyLayout<'tcx>) -> Self {
MPlaceTy { mplace: MemPlace::from_ptr(ptr, layout.align.abi), layout }
@ -367,13 +393,9 @@ where
(None, offset)
};
let ptr = base.ptr.ptr_offset(offset, self)?;
let align = base.align
// We do not look at `base.layout.align` nor `field_layout.align`, unlike
// codegen -- mostly to see if we can get away with that
.restrict_for_offset(offset); // must be last thing that happens
Ok(MPlaceTy { mplace: MemPlace { ptr, align, meta }, layout: field_layout })
// We do not look at `base.layout.align` nor `field_layout.align`, unlike
// codegen -- mostly to see if we can get away with that
base.offset(offset, meta, field_layout, self)
}
// Iterates over all fields of an array. Much more efficient than doing the
@ -391,13 +413,7 @@ where
};
let layout = base.layout.field(self, 0)?;
let dl = &self.tcx.data_layout;
Ok((0..len).map(move |i| {
let ptr = base.ptr.ptr_offset(i * stride, dl)?;
Ok(MPlaceTy {
mplace: MemPlace { ptr, align: base.align, meta: None },
layout
})
}))
Ok((0..len).map(move |i| base.offset(i * stride, None, layout, dl)))
}
pub fn mplace_subslice(
@ -416,7 +432,6 @@ where
stride * from,
_ => bug!("Unexpected layout of index access: {:#?}", base.layout),
};
let ptr = base.ptr.ptr_offset(from_offset, self)?;
// Compute meta and new layout
let inner_len = len - to - from;
@ -433,11 +448,7 @@ where
bug!("cannot subslice non-array type: `{:?}`", base.layout.ty),
};
let layout = self.layout_of(ty)?;
Ok(MPlaceTy {
mplace: MemPlace { ptr, align: base.align, meta },
layout
})
base.offset(from_offset, meta, layout, self)
}
pub fn mplace_downcast(
@ -713,11 +724,13 @@ where
// Nothing to do for ZSTs, other than checking alignment
if dest.layout.is_zst() {
self.memory.check_align(ptr, ptr_align)?;
return Ok(());
return self.memory.check_align(ptr, ptr_align);
}
// check for integer pointers before alignment to report better errors
let ptr = ptr.to_ptr()?;
self.memory.check_align(ptr.into(), ptr_align)?;
let tcx = &*self.tcx;
// FIXME: We should check that there are dest.layout.size many bytes available in
// memory. The code below is not sufficient, with enough padding it might not
// cover all the bytes!
@ -728,9 +741,8 @@ where
_ => bug!("write_immediate_to_mplace: invalid Scalar layout: {:#?}",
dest.layout)
}
self.memory.write_scalar(
ptr, ptr_align.min(dest.layout.align.abi), scalar, dest.layout.size
self.memory.get_mut(ptr.alloc_id)?.write_scalar(
tcx, ptr, scalar, dest.layout.size
)
}
Immediate::ScalarPair(a_val, b_val) => {
@ -740,16 +752,22 @@ where
dest.layout)
};
let (a_size, b_size) = (a.size(self), b.size(self));
let (a_align, b_align) = (a.align(self).abi, b.align(self).abi);
let b_offset = a_size.align_to(b_align);
let b_ptr = ptr.offset(b_offset, self)?.into();
let b_offset = a_size.align_to(b.align(self).abi);
let b_align = ptr_align.restrict_for_offset(b_offset);
let b_ptr = ptr.offset(b_offset, self)?;
self.memory.check_align(b_ptr.into(), b_align)?;
// It is tempting to verify `b_offset` against `layout.fields.offset(1)`,
// but that does not work: We could be a newtype around a pair, then the
// fields do not match the `ScalarPair` components.
self.memory.write_scalar(ptr, ptr_align.min(a_align), a_val, a_size)?;
self.memory.write_scalar(b_ptr, ptr_align.min(b_align), b_val, b_size)
self.memory
.get_mut(ptr.alloc_id)?
.write_scalar(tcx, ptr, a_val, a_size)?;
self.memory
.get_mut(b_ptr.alloc_id)?
.write_scalar(tcx, b_ptr, b_val, b_size)
}
}
}

View file

@ -401,12 +401,12 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
// cannot use the shim here, because that will only result in infinite recursion
ty::InstanceDef::Virtual(_, idx) => {
let ptr_size = self.pointer_size();
let ptr_align = self.tcx.data_layout.pointer_align.abi;
let ptr = self.deref_operand(args[0])?;
let vtable = ptr.vtable()?;
let fn_ptr = self.memory.read_ptr_sized(
self.memory.check_align(vtable.into(), self.tcx.data_layout.pointer_align.abi)?;
let fn_ptr = self.memory.get(vtable.alloc_id)?.read_ptr_sized(
self,
vtable.offset(ptr_size * (idx as u64 + 3), self)?,
ptr_align
)?.to_ptr()?;
let instance = self.memory.get_fn(fn_ptr)?;

View file

@ -55,23 +55,34 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
ptr_align,
MemoryKind::Vtable,
)?.with_default_tag();
let tcx = &*self.tcx;
let drop = ::monomorphize::resolve_drop_in_place(*self.tcx, ty);
let drop = ::monomorphize::resolve_drop_in_place(*tcx, ty);
let drop = self.memory.create_fn_alloc(drop).with_default_tag();
self.memory.write_ptr_sized(vtable, ptr_align, Scalar::Ptr(drop).into())?;
// no need to do any alignment checks on the memory accesses below, because we know the
// allocation is correctly aligned as we created it above. Also we're only offsetting by
// multiples of `ptr_align`, which means that it will stay aligned to `ptr_align`.
self.memory
.get_mut(vtable.alloc_id)?
.write_ptr_sized(tcx, vtable, Scalar::Ptr(drop).into())?;
let size_ptr = vtable.offset(ptr_size, self)?;
self.memory.write_ptr_sized(size_ptr, ptr_align, Scalar::from_uint(size, ptr_size).into())?;
self.memory
.get_mut(size_ptr.alloc_id)?
.write_ptr_sized(tcx, size_ptr, Scalar::from_uint(size, ptr_size).into())?;
let align_ptr = vtable.offset(ptr_size * 2, self)?;
self.memory.write_ptr_sized(align_ptr, ptr_align,
Scalar::from_uint(align, ptr_size).into())?;
self.memory
.get_mut(align_ptr.alloc_id)?
.write_ptr_sized(tcx, align_ptr, Scalar::from_uint(align, ptr_size).into())?;
for (i, method) in methods.iter().enumerate() {
if let Some((def_id, substs)) = *method {
let instance = self.resolve(def_id, substs)?;
let fn_ptr = self.memory.create_fn_alloc(instance).with_default_tag();
let method_ptr = vtable.offset(ptr_size * (3 + i as u64), self)?;
self.memory.write_ptr_sized(method_ptr, ptr_align, Scalar::Ptr(fn_ptr).into())?;
self.memory
.get_mut(method_ptr.alloc_id)?
.write_ptr_sized(tcx, method_ptr, Scalar::Ptr(fn_ptr).into())?;
}
}
@ -87,8 +98,11 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
vtable: Pointer<M::PointerTag>,
) -> EvalResult<'tcx, (ty::Instance<'tcx>, ty::Ty<'tcx>)> {
// we don't care about the pointee type, we just want a pointer
let pointer_align = self.tcx.data_layout.pointer_align.abi;
let drop_fn = self.memory.read_ptr_sized(vtable, pointer_align)?.to_ptr()?;
self.memory.check_align(vtable.into(), self.tcx.data_layout.pointer_align.abi)?;
let drop_fn = self.memory
.get(vtable.alloc_id)?
.read_ptr_sized(self, vtable)?
.to_ptr()?;
let drop_instance = self.memory.get_fn(drop_fn)?;
trace!("Found drop fn: {:?}", drop_instance);
let fn_sig = drop_instance.ty(*self.tcx).fn_sig(*self.tcx);
@ -103,12 +117,13 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
vtable: Pointer<M::PointerTag>,
) -> EvalResult<'tcx, (Size, Align)> {
let pointer_size = self.pointer_size();
let pointer_align = self.tcx.data_layout.pointer_align.abi;
let size = self.memory.read_ptr_sized(vtable.offset(pointer_size, self)?,pointer_align)?
self.memory.check_align(vtable.into(), self.tcx.data_layout.pointer_align.abi)?;
let alloc = self.memory.get(vtable.alloc_id)?;
let size = alloc.read_ptr_sized(self, vtable.offset(pointer_size, self)?)?
.to_bits(pointer_size)? as u64;
let align = self.memory.read_ptr_sized(
let align = alloc.read_ptr_sized(
self,
vtable.offset(pointer_size * 2, self)?,
pointer_align
)?.to_bits(pointer_size)? as u64;
Ok((Size::from_bytes(size), Align::from_bytes(align).unwrap()))
}

View file

@ -17,11 +17,11 @@ use rustc::ty::layout::{self, Size, Align, TyLayout, LayoutOf, VariantIdx};
use rustc::ty;
use rustc_data_structures::fx::FxHashSet;
use rustc::mir::interpret::{
Scalar, AllocType, EvalResult, EvalErrorKind, InboundsCheck,
Scalar, AllocType, EvalResult, EvalErrorKind,
};
use super::{
OpTy, MPlaceTy, Machine, EvalContext, ValueVisitor
OpTy, Machine, EvalContext, ValueVisitor,
};
macro_rules! validation_failure {
@ -396,7 +396,9 @@ impl<'rt, 'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>>
// Maintain the invariant that the place we are checking is
// already verified to be in-bounds.
try_validation!(
self.ecx.memory.check_bounds(ptr, size, InboundsCheck::Live),
self.ecx.memory
.get(ptr.alloc_id)?
.check_bounds(self.ecx, ptr, size),
"dangling (not entirely in bounds) reference", self.path);
}
// Check if we have encountered this pointer+layout combination
@ -520,20 +522,25 @@ impl<'rt, 'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>>
_ => false,
}
} => {
let mplace = if op.layout.is_zst() {
// it's a ZST, the memory content cannot matter
MPlaceTy::dangling(op.layout, self.ecx)
} else {
// non-ZST array/slice/str cannot be immediate
op.to_mem_place()
};
// bailing out for zsts is ok, since the array element type can only be int/float
if op.layout.is_zst() {
return Ok(());
}
// non-ZST array cannot be immediate, slices are never immediate
let mplace = op.to_mem_place();
// This is the length of the array/slice.
let len = mplace.len(self.ecx)?;
// zero length slices have nothing to be checked
if len == 0 {
return Ok(());
}
// This is the element type size.
let ty_size = self.ecx.layout_of(tys)?.size;
// This is the size in bytes of the whole array.
let size = ty_size * len;
let ptr = mplace.ptr.to_ptr()?;
// NOTE: Keep this in sync with the handling of integer and float
// types above, in `visit_primitive`.
// In run-time mode, we accept pointers in here. This is actually more
@ -543,8 +550,9 @@ impl<'rt, 'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>>
// to reject those pointers, we just do not have the machinery to
// talk about parts of a pointer.
// We also accept undef, for consistency with the type-based checks.
match self.ecx.memory.check_bytes(
mplace.ptr,
match self.ecx.memory.get(ptr.alloc_id)?.check_bytes(
self.ecx,
ptr,
size,
/*allow_ptr_and_undef*/!self.const_mode,
) {

View file

@ -0,0 +1,7 @@
// compile-pass
#![feature(const_raw_ptr_deref)]
const FOO: &str = unsafe { &*(1_usize as *const [u8; 0] as *const [u8] as *const str) };
fn main() {}

View file

@ -0,0 +1,5 @@
#![feature(const_raw_ptr_deref, never_type)]
const FOO: &[!; 1] = unsafe { &*(1_usize as *const [!; 1]) }; //~ ERROR undefined behavior
fn main() {}

View file

@ -0,0 +1,11 @@
error[E0080]: it is undefined behavior to use this value
--> $DIR/validate_never_arrays.rs:3:1
|
LL | const FOO: &[!; 1] = unsafe { &*(1_usize as *const [!; 1]) }; //~ ERROR undefined behavior
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ type validation failed: encountered a value of an uninhabited type at .<deref>[0]
|
= note: The rules on what exactly is undefined behavior aren't clear, so this check might be overzealous. Please open an issue on the rust compiler repository if you believe it should not be considered undefined behavior
error: aborting due to previous error
For more information about this error, try `rustc --explain E0080`.

@ -1 +1 @@
Subproject commit bbb1d80703f272a5592ceeb3832a489776512251
Subproject commit 32e93ed7762e5aa1a721636096848fc3c7bc7218