Rollup merge of #134050 - RalfJung:miri-sync, r=RalfJung

Miri subtree update

r? `@ghost`
This commit is contained in:
Matthias Krüger 2024-12-09 01:56:35 +01:00 committed by GitHub
commit e01cb62c91
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
60 changed files with 1811 additions and 1220 deletions

View file

@ -2274,7 +2274,6 @@ dependencies = [
"chrono",
"chrono-tz",
"colored",
"ctrlc",
"directories",
"getrandom",
"libc",

View file

@ -1,6 +1,6 @@
# This file is automatically @generated by Cargo.
# It is not intended for manual editing.
version = 3
version = 4
[[package]]
name = "addr2line"
@ -150,12 +150,6 @@ version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
[[package]]
name = "cfg_aliases"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fd16c4719339c4530435d38e511904438d07cce7950afa3718a84ac36c10e89e"
[[package]]
name = "chrono"
version = "0.4.38"
@ -286,16 +280,6 @@ dependencies = [
"typenum",
]
[[package]]
name = "ctrlc"
version = "3.4.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "672465ae37dc1bc6380a6547a8883d5dd397b0f1faaad4f265726cc7042a5345"
dependencies = [
"nix",
"windows-sys 0.52.0",
]
[[package]]
name = "directories"
version = "5.0.1"
@ -419,16 +403,6 @@ version = "1.0.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b"
[[package]]
name = "jemalloc-sys"
version = "0.5.4+5.3.0-patched"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ac6c1946e1cea1788cbfde01c993b52a10e2da07f4bac608228d1bed20bfebf2"
dependencies = [
"cc",
"libc",
]
[[package]]
name = "lazy_static"
version = "1.5.0"
@ -554,10 +528,8 @@ dependencies = [
"chrono",
"chrono-tz",
"colored",
"ctrlc",
"directories",
"getrandom",
"jemalloc-sys",
"libc",
"libffi",
"libloading",
@ -567,22 +539,11 @@ dependencies = [
"rustc_version",
"smallvec",
"tempfile",
"tikv-jemalloc-sys",
"ui_test",
"windows-sys 0.52.0",
]
[[package]]
name = "nix"
version = "0.28.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ab2156c4fce2f8df6c499cc1c763e4394b7482525bf2a9701c9d79d215f519e4"
dependencies = [
"bitflags",
"cfg-if",
"cfg_aliases",
"libc",
]
[[package]]
name = "num-traits"
version = "0.2.19"
@ -1031,6 +992,16 @@ dependencies = [
"once_cell",
]
[[package]]
name = "tikv-jemalloc-sys"
version = "0.6.0+5.3.0-1-ge13ca993e8ccb9ba9847cc330696e02839f328f7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cd3c60906412afa9c2b5b5a48ca6a5abe5736aec9eb48ad05037a677e52e4e2d"
dependencies = [
"cc",
"libc",
]
[[package]]
name = "tracing"
version = "0.1.40"

View file

@ -23,7 +23,6 @@ rand = "0.8"
smallvec = { version = "1.7", features = ["drain_filter"] }
aes = { version = "0.8.3", features = ["hazmat"] }
measureme = "11"
ctrlc = "3.2.5"
chrono = { version = "0.4.38", default-features = false }
chrono-tz = "0.10"
directories = "5"

View file

@ -68,9 +68,10 @@ Further caveats that Miri users should be aware of:
not support networking. System API support varies between targets; if you run
on Windows it is a good idea to use `--target x86_64-unknown-linux-gnu` to get
better support.
* Weak memory emulation may [produce weak behaviors](https://github.com/rust-lang/miri/issues/2301)
when `SeqCst` fences are used that are not actually permitted by the Rust memory model, and it
cannot produce all behaviors possibly observable on real hardware.
* Weak memory emulation is not complete: there are legal behaviors that Miri will never produce.
However, Miri produces many behaviors that are hard to observe on real hardware, so it can help
quite a bit in finding weak memory concurrency bugs. To be really sure about complicated atomic
code, use specialized tools such as [loom](https://github.com/tokio-rs/loom).
Moreover, Miri fundamentally cannot ensure that your code is *sound*. [Soundness] is the property of
never causing undefined behavior when invoked from arbitrary safe code, even in combination with

View file

@ -0,0 +1,7 @@
# This file is automatically @generated by Cargo.
# It is not intended for manual editing.
version = 3
[[package]]
name = "string-replace"
version = "0.1.0"

View file

@ -0,0 +1,8 @@
[package]
name = "string-replace"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]

File diff suppressed because one or more lines are too long

View file

@ -0,0 +1,7 @@
const TCB_INFO_JSON: &str = include_str!("../data.json");
fn main() {
let tcb_json = TCB_INFO_JSON;
let bad_tcb_json = tcb_json.replace("female", "male");
std::hint::black_box(bad_tcb_json);
}

View file

@ -348,14 +348,17 @@ pub fn phase_rustc(mut args: impl Iterator<Item = String>, phase: RustcPhase) {
// Create a stub .d file to stop Cargo from "rebuilding" the crate:
// https://github.com/rust-lang/miri/issues/1724#issuecomment-787115693
// As we store a JSON file instead of building the crate here, an empty file is fine.
let dep_info_name = format!(
"{}/{}{}.d",
get_arg_flag_value("--out-dir").unwrap(),
let mut dep_info_name = PathBuf::from(get_arg_flag_value("--out-dir").unwrap());
dep_info_name.push(format!(
"{}{}.d",
get_arg_flag_value("--crate-name").unwrap(),
get_arg_flag_value("extra-filename").unwrap_or_default(),
);
));
if verbose > 0 {
eprintln!("[cargo-miri rustc] writing stub dep-info to `{dep_info_name}`");
eprintln!(
"[cargo-miri rustc] writing stub dep-info to `{}`",
dep_info_name.display()
);
}
File::create(dep_info_name).expect("failed to create fake .d file");
}

View file

@ -152,9 +152,9 @@ case $HOST_TARGET in
UNIX="hello panic/panic panic/unwind concurrency/simple atomic libc-mem libc-misc libc-random env num_cpus" # the things that are very similar across all Unixes, and hence easily supported there
TEST_TARGET=x86_64-unknown-freebsd run_tests_minimal $BASIC $UNIX time hashmap random threadname pthread fs libc-pipe
TEST_TARGET=i686-unknown-freebsd run_tests_minimal $BASIC $UNIX time hashmap random threadname pthread fs libc-pipe
TEST_TARGET=x86_64-unknown-illumos run_tests_minimal $BASIC $UNIX time hashmap random thread sync available-parallelism tls libc-pipe
TEST_TARGET=x86_64-pc-solaris run_tests_minimal $BASIC $UNIX time hashmap random thread sync available-parallelism tls libc-pipe
TEST_TARGET=aarch64-linux-android run_tests_minimal $BASIC $UNIX time hashmap random sync threadname pthread
TEST_TARGET=x86_64-unknown-illumos run_tests_minimal $BASIC $UNIX time hashmap random thread sync available-parallelism tls libc-pipe fs
TEST_TARGET=x86_64-pc-solaris run_tests_minimal $BASIC $UNIX time hashmap random thread sync available-parallelism tls libc-pipe fs
TEST_TARGET=aarch64-linux-android run_tests_minimal $BASIC $UNIX time hashmap random sync threadname pthread epoll eventfd
TEST_TARGET=wasm32-wasip2 run_tests_minimal $BASIC wasm
TEST_TARGET=wasm32-unknown-unknown run_tests_minimal no_std empty_main wasm # this target doesn't really have std
TEST_TARGET=thumbv7em-none-eabihf run_tests_minimal no_std

View file

@ -1 +1 @@
2d0ea7956c45de6e421fd579e2ded27be405dec6
728f2daab42ba8f1b3d5caab62495798d1eabfa1

View file

@ -3,6 +3,7 @@
clippy::manual_range_contains,
clippy::useless_format,
clippy::field_reassign_with_default,
clippy::needless_lifetimes,
rustc::diagnostic_outside_of_impl,
rustc::untranslatable_diagnostic
)]

View file

@ -0,0 +1,108 @@
use super::AccessKind;
use super::tree::AccessRelatedness;
/// To speed up tree traversals, we want to skip traversing subtrees when we know the traversal will have no effect.
/// This is often the case for foreign accesses, since usually foreign accesses happen several times in a row, but also
/// foreign accesses are idempotent. In particular, see tests `foreign_read_is_noop_after_foreign_write` and `all_transitions_idempotent`.
/// Thus, for each node we keep track of the "strongest idempotent foreign access" (SIFA), i.e. which foreign access can be skipped.
/// Note that for correctness, it is not required that this is the strongest access, just any access it is idempotent under. In particular, setting
/// it to `None` is always correct, but the point of this optimization is to have it be as strong as possible so that more accesses can be skipped.
/// This enum represents the kinds of values we store:
/// - `None` means that the node (and its subtrees) are not (guaranteed to be) idempotent under any foreign access.
/// - `Read` means that the node (and its subtrees) are idempotent under foreign reads, but not (yet / necessarily) under foreign writes.
/// - `Write` means that the node (and its subtrees) are idempotent under foreign writes. This also implies that it is idempotent under foreign
/// reads, since reads are stronger than writes (see test `foreign_read_is_noop_after_foreign_write`). In other words, this node can be skipped
/// for all foreign accesses.
///
/// Since a traversal does not just visit a node, but instead the entire subtree, the SIFA field for a given node indicates that the access to
/// *the entire subtree* rooted at that node can be skipped. In order for this to work, we maintain the global invariant that at
/// each location, the SIFA at each child must be stronger than that at the parent. For normal reads and writes, this is easily accomplished by
/// tracking each foreign access as it occurs, so that then the next access can be skipped. This also obviously maintains the invariant, because
/// if a node undergoes a foreign access, then all its children also see this as a foreign access. However, the invariant is broken during retags,
/// because retags act across the entire allocation, but only emit a read event across a specific range. This means that for all nodes outside that
/// range, the invariant is potentially broken, since a new child with a weaker SIFA is inserted. Thus, during retags, special care is taken to
/// "manually" reset the parent's SIFA to be at least as strong as the new child's. This is accomplished with the `ensure_no_stronger_than` method.
///
/// Note that we derive Ord and PartialOrd, so the order in which variants are listed below matters:
/// None < Read < Write. Do not change that order. See the `test_order` test.
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, Default)]
pub enum IdempotentForeignAccess {
#[default]
None,
Read,
Write,
}
impl IdempotentForeignAccess {
/// Returns true if a node where the strongest idempotent foreign access is `self`
/// can skip the access `happening_next`. Note that if this returns
/// `true`, then the entire subtree will be skipped.
pub fn can_skip_foreign_access(self, happening_next: IdempotentForeignAccess) -> bool {
debug_assert!(happening_next.is_foreign());
// This ordering is correct. Intuitively, if the last access here was
// a foreign write, everything can be skipped, since after a foreign write,
// all further foreign accesses are idempotent
happening_next <= self
}
/// Updates `self` to account for a foreign access.
pub fn record_new(&mut self, just_happened: IdempotentForeignAccess) {
if just_happened.is_local() {
// If the access is local, reset it.
*self = IdempotentForeignAccess::None;
} else {
// Otherwise, keep it or stengthen it.
*self = just_happened.max(*self);
}
}
/// Returns true if this access is local.
pub fn is_local(self) -> bool {
matches!(self, IdempotentForeignAccess::None)
}
/// Returns true if this access is foreign, i.e. not local.
pub fn is_foreign(self) -> bool {
!self.is_local()
}
/// Constructs a foreign access from an `AccessKind`
pub fn from_foreign(acc: AccessKind) -> IdempotentForeignAccess {
match acc {
AccessKind::Read => Self::Read,
AccessKind::Write => Self::Write,
}
}
/// Usually, tree traversals have an `AccessKind` and an `AccessRelatedness`.
/// This methods converts these into the corresponding `IdempotentForeignAccess`, to be used
/// to e.g. invoke `can_skip_foreign_access`.
pub fn from_acc_and_rel(acc: AccessKind, rel: AccessRelatedness) -> IdempotentForeignAccess {
if rel.is_foreign() { Self::from_foreign(acc) } else { Self::None }
}
/// During retags, the SIFA needs to be weakened to account for children with weaker SIFAs being inserted.
/// Thus, this method is called from the bottom up on each parent, until it returns false, which means the
/// "children have stronger SIFAs" invariant is restored.
pub fn ensure_no_stronger_than(&mut self, strongest_allowed: IdempotentForeignAccess) -> bool {
if *self > strongest_allowed {
*self = strongest_allowed;
true
} else {
false
}
}
}
#[cfg(test)]
mod tests {
use super::IdempotentForeignAccess;
#[test]
fn test_order() {
// The internal logic relies on this order.
// Do not change.
assert!(IdempotentForeignAccess::None < IdempotentForeignAccess::Read);
assert!(IdempotentForeignAccess::Read < IdempotentForeignAccess::Write);
}
}

View file

@ -9,6 +9,7 @@ use crate::concurrency::data_race::NaReadType;
use crate::*;
pub mod diagnostics;
mod foreign_access_skipping;
mod perms;
mod tree;
mod unimap;
@ -296,7 +297,14 @@ trait EvalContextPrivExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
this.machine.current_span(),
)?;
// Record the parent-child pair in the tree.
tree_borrows.new_child(orig_tag, new_tag, new_perm.initial_state, range, span)?;
tree_borrows.new_child(
orig_tag,
new_tag,
new_perm.initial_state,
range,
span,
new_perm.protector.is_some(),
)?;
drop(tree_borrows);
// Also inform the data race model (but only if any bytes are actually affected).

View file

@ -48,6 +48,7 @@ enum PermissionPriv {
Disabled,
}
use self::PermissionPriv::*;
use super::foreign_access_skipping::IdempotentForeignAccess;
impl PartialOrd for PermissionPriv {
/// PermissionPriv is ordered by the reflexive transitive closure of
@ -87,6 +88,28 @@ impl PermissionPriv {
fn compatible_with_protector(&self) -> bool {
!matches!(self, ReservedIM)
}
/// See `foreign_access_skipping.rs`. Computes the SIFA of a permission.
fn strongest_idempotent_foreign_access(&self, prot: bool) -> IdempotentForeignAccess {
match self {
// A protected non-conflicted Reserved will become conflicted under a foreign read,
// and is hence not idempotent under it.
ReservedFrz { conflicted } if prot && !conflicted => IdempotentForeignAccess::None,
// Otherwise, foreign reads do not affect Reserved
ReservedFrz { .. } => IdempotentForeignAccess::Read,
// Famously, ReservedIM survives foreign writes. It is never protected.
ReservedIM if prot => unreachable!("Protected ReservedIM should not exist!"),
ReservedIM => IdempotentForeignAccess::Write,
// Active changes on any foreign access (becomes Frozen/Disabled).
Active => IdempotentForeignAccess::None,
// Frozen survives foreign reads, but not writes.
Frozen => IdempotentForeignAccess::Read,
// Disabled survives foreign reads and writes. It survives them
// even if protected, because a protected `Disabled` is not initialized
// and does therefore not trigger UB.
Disabled => IdempotentForeignAccess::Write,
}
}
}
/// This module controls how each permission individually reacts to an access.
@ -305,6 +328,13 @@ impl Permission {
(Disabled, _) => false,
}
}
/// Returns the strongest foreign action this node survives (without change),
/// where `prot` indicates if it is protected.
/// See `foreign_access_skipping`
pub fn strongest_idempotent_foreign_access(&self, prot: bool) -> IdempotentForeignAccess {
self.inner.strongest_idempotent_foreign_access(prot)
}
}
impl PermTransition {
@ -575,7 +605,7 @@ mod propagation_optimization_checks {
impl Exhaustive for AccessRelatedness {
fn exhaustive() -> Box<dyn Iterator<Item = Self>> {
use AccessRelatedness::*;
Box::new(vec![This, StrictChildAccess, AncestorAccess, DistantAccess].into_iter())
Box::new(vec![This, StrictChildAccess, AncestorAccess, CousinAccess].into_iter())
}
}
@ -634,6 +664,35 @@ mod propagation_optimization_checks {
}
}
#[test]
#[rustfmt::skip]
fn permission_sifa_is_correct() {
// Tests that `strongest_idempotent_foreign_access` is correct. See `foreign_access_skipping.rs`.
for perm in PermissionPriv::exhaustive() {
// Assert that adding a protector makes it less idempotent.
if perm.compatible_with_protector() {
assert!(perm.strongest_idempotent_foreign_access(true) <= perm.strongest_idempotent_foreign_access(false));
}
for prot in bool::exhaustive() {
if prot {
precondition!(perm.compatible_with_protector());
}
let access = perm.strongest_idempotent_foreign_access(prot);
// We now assert it is idempotent, and never causes UB.
// First, if the SIFA includes foreign reads, assert it is idempotent under foreign reads.
if access >= IdempotentForeignAccess::Read {
// We use `CousinAccess` here. We could also use `AncestorAccess`, since `transition::perform_access` treats these the same.
// The only place they are treated differently is in protector end accesses, but these are not handled here.
assert_eq!(perm, transition::perform_access(AccessKind::Read, AccessRelatedness::CousinAccess, perm, prot).unwrap());
}
// Then, if the SIFA includes foreign writes, assert it is idempotent under foreign writes.
if access >= IdempotentForeignAccess::Write {
assert_eq!(perm, transition::perform_access(AccessKind::Write, AccessRelatedness::CousinAccess, perm, prot).unwrap());
}
}
}
}
#[test]
// Check that all transitions are consistent with the order on PermissionPriv,
// i.e. Reserved -> Active -> Frozen -> Disabled

View file

@ -21,6 +21,7 @@ use crate::borrow_tracker::tree_borrows::Permission;
use crate::borrow_tracker::tree_borrows::diagnostics::{
self, NodeDebugInfo, TbError, TransitionError,
};
use crate::borrow_tracker::tree_borrows::foreign_access_skipping::IdempotentForeignAccess;
use crate::borrow_tracker::tree_borrows::perms::PermTransition;
use crate::borrow_tracker::tree_borrows::unimap::{UniEntry, UniIndex, UniKeyMap, UniValMap};
use crate::borrow_tracker::{GlobalState, ProtectorKind};
@ -45,28 +46,28 @@ pub(super) struct LocationState {
initialized: bool,
/// This pointer's current permission / future initial permission.
permission: Permission,
/// Strongest foreign access whose effects have already been applied to
/// this node and all its children since the last child access.
/// This is `None` if the most recent access is a child access,
/// `Some(Write)` if at least one foreign write access has been applied
/// since the previous child access, and `Some(Read)` if at least one
/// foreign read and no foreign write have occurred since the last child access.
latest_foreign_access: Option<AccessKind>,
/// See `foreign_access_skipping.rs`.
/// Stores an idempotent foreign access for this location and its children.
/// For correctness, this must not be too strong, and the recorded idempotent foreign access
/// of all children must be at least as strong as this. For performance, it should be as strong as possible.
idempotent_foreign_access: IdempotentForeignAccess,
}
impl LocationState {
/// Constructs a new initial state. It has neither been accessed, nor been subjected
/// to any foreign access yet.
/// The permission is not allowed to be `Active`.
fn new_uninit(permission: Permission) -> Self {
/// `sifa` is the (strongest) idempotent foreign access, see `foreign_access_skipping.rs`
fn new_uninit(permission: Permission, sifa: IdempotentForeignAccess) -> Self {
assert!(permission.is_initial() || permission.is_disabled());
Self { permission, initialized: false, latest_foreign_access: None }
Self { permission, initialized: false, idempotent_foreign_access: sifa }
}
/// Constructs a new initial state. It has not yet been subjected
/// to any foreign access. However, it is already marked as having been accessed.
fn new_init(permission: Permission) -> Self {
Self { permission, initialized: true, latest_foreign_access: None }
/// `sifa` is the (strongest) idempotent foreign access, see `foreign_access_skipping.rs`
fn new_init(permission: Permission, sifa: IdempotentForeignAccess) -> Self {
Self { permission, initialized: true, idempotent_foreign_access: sifa }
}
/// Check if the location has been initialized, i.e. if it has
@ -143,52 +144,21 @@ impl LocationState {
}
}
// Helper to optimize the tree traversal.
// The optimization here consists of observing thanks to the tests
// `foreign_read_is_noop_after_foreign_write` and `all_transitions_idempotent`,
// that there are actually just three possible sequences of events that can occur
// in between two child accesses that produce different results.
//
// Indeed,
// - applying any number of foreign read accesses is the same as applying
// exactly one foreign read,
// - applying any number of foreign read or write accesses is the same
// as applying exactly one foreign write.
// therefore the three sequences of events that can produce different
// outcomes are
// - an empty sequence (`self.latest_foreign_access = None`)
// - a nonempty read-only sequence (`self.latest_foreign_access = Some(Read)`)
// - a nonempty sequence with at least one write (`self.latest_foreign_access = Some(Write)`)
//
// This function not only determines if skipping the propagation right now
// is possible, it also updates the internal state to keep track of whether
// the propagation can be skipped next time.
// It is a performance loss not to call this function when a foreign access occurs.
// FIXME: This optimization is wrong, and is currently disabled (by ignoring the
// result returned here). Since we presumably want an optimization like this,
// we should add it back. See #3864 for more information.
/// Tree traversal optimizations. See `foreign_access_skipping.rs`.
/// This checks if such a foreign access can be skipped.
fn skip_if_known_noop(
&self,
access_kind: AccessKind,
rel_pos: AccessRelatedness,
) -> ContinueTraversal {
if rel_pos.is_foreign() {
let new_access_noop = match (self.latest_foreign_access, access_kind) {
// Previously applied transition makes the new one a guaranteed
// noop in the two following cases:
// (1) justified by `foreign_read_is_noop_after_foreign_write`
(Some(AccessKind::Write), AccessKind::Read) => true,
// (2) justified by `all_transitions_idempotent`
(Some(old), new) if old == new => true,
// In all other cases there has been a recent enough
// child access that the effects of the new foreign access
// need to be applied to this subtree.
_ => false,
};
let happening_now = IdempotentForeignAccess::from_foreign(access_kind);
let new_access_noop =
self.idempotent_foreign_access.can_skip_foreign_access(happening_now);
if new_access_noop {
// Abort traversal if the new transition is indeed guaranteed
// Abort traversal if the new access is indeed guaranteed
// to be noop.
// No need to update `self.latest_foreign_access`,
// No need to update `self.idempotent_foreign_access`,
// the type of the current streak among nonempty read-only
// or nonempty with at least one write has not changed.
ContinueTraversal::SkipSelfAndChildren
@ -207,20 +177,17 @@ impl LocationState {
}
/// Records a new access, so that future access can potentially be skipped
/// by `skip_if_known_noop`.
/// The invariants for this function are closely coupled to the function above:
/// It MUST be called on child accesses, and on foreign accesses MUST be called
/// when `skip_if_know_noop` returns `Recurse`, and MUST NOT be called otherwise.
/// FIXME: This optimization is wrong, and is currently disabled (by ignoring the
/// result returned here). Since we presumably want an optimization like this,
/// we should add it back. See #3864 for more information.
#[allow(unused)]
/// by `skip_if_known_noop`. This must be called on child accesses, and otherwise
/// shoud be called on foreign accesses for increased performance. It should not be called
/// when `skip_if_known_noop` indicated skipping, since it then is a no-op.
/// See `foreign_access_skipping.rs`
fn record_new_access(&mut self, access_kind: AccessKind, rel_pos: AccessRelatedness) {
if rel_pos.is_foreign() {
self.latest_foreign_access = Some(access_kind);
} else {
self.latest_foreign_access = None;
}
debug_assert!(matches!(
self.skip_if_known_noop(access_kind, rel_pos),
ContinueTraversal::Recurse
));
self.idempotent_foreign_access
.record_new(IdempotentForeignAccess::from_acc_and_rel(access_kind, rel_pos));
}
}
@ -277,6 +244,11 @@ pub(super) struct Node {
/// It is only ever `Disabled` for a tree root, since the root is initialized to `Active` by
/// its own separate mechanism.
default_initial_perm: Permission,
/// The default initial (strongest) idempotent foreign access.
/// This participates in the invariant for `LocationState::idempotent_foreign_access`
/// in cases where there is no location state yet. See `foreign_access_skipping.rs`,
/// and `LocationState::idempotent_foreign_access` for more information
default_initial_idempotent_foreign_access: IdempotentForeignAccess,
/// Some extra information useful only for debugging purposes
pub debug_info: NodeDebugInfo,
}
@ -430,7 +402,7 @@ where
}
self.stack.push((
child,
AccessRelatedness::DistantAccess,
AccessRelatedness::CousinAccess,
RecursionState::BeforeChildren,
));
}
@ -591,6 +563,8 @@ impl Tree {
parent: None,
children: SmallVec::default(),
default_initial_perm: root_default_perm,
// The root may never be skipped, all accesses will be local.
default_initial_idempotent_foreign_access: IdempotentForeignAccess::None,
debug_info,
});
nodes
@ -601,7 +575,10 @@ impl Tree {
// We also ensure that it is initialized, so that no `Active` but
// not yet initialized nodes exist. Essentially, we pretend there
// was a write that initialized these to `Active`.
perms.insert(root_idx, LocationState::new_init(Permission::new_active()));
perms.insert(
root_idx,
LocationState::new_init(Permission::new_active(), IdempotentForeignAccess::None),
);
RangeMap::new(size, perms)
};
Self { root: root_idx, nodes, rperms, tag_mapping }
@ -617,29 +594,79 @@ impl<'tcx> Tree {
default_initial_perm: Permission,
reborrow_range: AllocRange,
span: Span,
prot: bool,
) -> InterpResult<'tcx> {
assert!(!self.tag_mapping.contains_key(&new_tag));
let idx = self.tag_mapping.insert(new_tag);
let parent_idx = self.tag_mapping.get(&parent_tag).unwrap();
let strongest_idempotent = default_initial_perm.strongest_idempotent_foreign_access(prot);
// Create the node
self.nodes.insert(idx, Node {
tag: new_tag,
parent: Some(parent_idx),
children: SmallVec::default(),
default_initial_perm,
default_initial_idempotent_foreign_access: strongest_idempotent,
debug_info: NodeDebugInfo::new(new_tag, default_initial_perm, span),
});
// Register new_tag as a child of parent_tag
self.nodes.get_mut(parent_idx).unwrap().children.push(idx);
// Initialize perms
let perm = LocationState::new_init(default_initial_perm);
let perm = LocationState::new_init(default_initial_perm, strongest_idempotent);
for (_perms_range, perms) in self.rperms.iter_mut(reborrow_range.start, reborrow_range.size)
{
perms.insert(idx, perm);
}
// Inserting the new perms might have broken the SIFA invariant (see `foreign_access_skipping.rs`).
// We now weaken the recorded SIFA for our parents, until the invariant is restored.
// We could weaken them all to `LocalAccess`, but it is more efficient to compute the SIFA
// for the new permission statically, and use that.
self.update_last_accessed_after_retag(parent_idx, strongest_idempotent);
interp_ok(())
}
/// Restores the SIFA "children are stronger" invariant after a retag.
/// See `foreign_access_skipping` and `new_child`.
fn update_last_accessed_after_retag(
&mut self,
mut current: UniIndex,
strongest_allowed: IdempotentForeignAccess,
) {
// We walk the tree upwards, until the invariant is restored
loop {
let current_node = self.nodes.get_mut(current).unwrap();
// Call `ensure_no_stronger_than` on all SIFAs for this node: the per-location SIFA, as well
// as the default SIFA for not-yet-initialized locations.
// Record whether we did any change; if not, the invariant is restored and we can stop the traversal.
let mut any_change = false;
for (_, map) in self.rperms.iter_mut_all() {
// Check if this node has a state for this location (or range of locations).
if let Some(perm) = map.get_mut(current) {
// Update the per-location SIFA, recording if it changed.
any_change |=
perm.idempotent_foreign_access.ensure_no_stronger_than(strongest_allowed);
}
}
// Now update `default_initial_idempotent_foreign_access`, which stores the default SIFA for not-yet-initialized locations.
any_change |= current_node
.default_initial_idempotent_foreign_access
.ensure_no_stronger_than(strongest_allowed);
if any_change {
let Some(next) = self.nodes.get(current).unwrap().parent else {
// We have arrived at the root.
break;
};
current = next;
continue;
} else {
break;
}
}
}
/// Deallocation requires
/// - a pointer that permits write accesses
/// - the absence of Strong Protectors anywhere in the allocation
@ -731,13 +758,8 @@ impl<'tcx> Tree {
let node_skipper = |access_kind: AccessKind, args: &NodeAppArgs<'_>| -> ContinueTraversal {
let NodeAppArgs { node, perm, rel_pos } = args;
let old_state = perm
.get()
.copied()
.unwrap_or_else(|| LocationState::new_uninit(node.default_initial_perm));
// FIXME: See #3684
let _would_skip_if_not_for_fixme = old_state.skip_if_known_noop(access_kind, *rel_pos);
ContinueTraversal::Recurse
let old_state = perm.get().copied().unwrap_or_else(|| node.default_location_state());
old_state.skip_if_known_noop(access_kind, *rel_pos)
};
let node_app = |perms_range: Range<u64>,
access_kind: AccessKind,
@ -746,10 +768,12 @@ impl<'tcx> Tree {
-> Result<(), TransitionError> {
let NodeAppArgs { node, mut perm, rel_pos } = args;
let old_state = perm.or_insert(LocationState::new_uninit(node.default_initial_perm));
let old_state = perm.or_insert(node.default_location_state());
// FIXME: See #3684
// old_state.record_new_access(access_kind, rel_pos);
// Call this function now, which ensures it is only called when
// `skip_if_known_noop` returns `Recurse`, due to the contract of
// `traverse_this_parents_children_other`.
old_state.record_new_access(access_kind, rel_pos);
let protected = global.borrow().protected_tags.contains_key(&node.tag);
let transition = old_state.perform_access(access_kind, rel_pos, protected)?;
@ -976,6 +1000,15 @@ impl Tree {
}
}
impl Node {
pub fn default_location_state(&self) -> LocationState {
LocationState::new_uninit(
self.default_initial_perm,
self.default_initial_idempotent_foreign_access,
)
}
}
impl VisitProvenance for Tree {
fn visit_provenance(&self, visit: &mut VisitWith<'_>) {
// To ensure that the root never gets removed, we visit it
@ -998,15 +1031,14 @@ pub enum AccessRelatedness {
AncestorAccess,
/// The accessed pointer is neither of the above.
// It's a cousin/uncle/etc., something in a side branch.
// FIXME: find a better name ?
DistantAccess,
CousinAccess,
}
impl AccessRelatedness {
/// Check that access is either Ancestor or Distant, i.e. not
/// a transitive child (initial pointer included).
pub fn is_foreign(self) -> bool {
matches!(self, AccessRelatedness::AncestorAccess | AccessRelatedness::DistantAccess)
matches!(self, AccessRelatedness::AncestorAccess | AccessRelatedness::CousinAccess)
}
/// Given the AccessRelatedness for the parent node, compute the AccessRelatedness
@ -1016,7 +1048,7 @@ impl AccessRelatedness {
use AccessRelatedness::*;
match self {
AncestorAccess | This => AncestorAccess,
StrictChildAccess | DistantAccess => DistantAccess,
StrictChildAccess | CousinAccess => CousinAccess,
}
}
}

View file

@ -10,7 +10,11 @@ impl Exhaustive for LocationState {
fn exhaustive() -> Box<dyn Iterator<Item = Self>> {
// We keep `latest_foreign_access` at `None` as that's just a cache.
Box::new(<(Permission, bool)>::exhaustive().map(|(permission, initialized)| {
Self { permission, initialized, latest_foreign_access: None }
Self {
permission,
initialized,
idempotent_foreign_access: IdempotentForeignAccess::default(),
}
}))
}
}
@ -260,15 +264,15 @@ mod spurious_read {
match xy_rel {
RelPosXY::MutuallyForeign =>
match self {
PtrSelector::X => (This, DistantAccess),
PtrSelector::Y => (DistantAccess, This),
PtrSelector::Other => (DistantAccess, DistantAccess),
PtrSelector::X => (This, CousinAccess),
PtrSelector::Y => (CousinAccess, This),
PtrSelector::Other => (CousinAccess, CousinAccess),
},
RelPosXY::XChildY =>
match self {
PtrSelector::X => (This, StrictChildAccess),
PtrSelector::Y => (AncestorAccess, This),
PtrSelector::Other => (DistantAccess, DistantAccess),
PtrSelector::Other => (CousinAccess, CousinAccess),
},
}
}
@ -598,13 +602,18 @@ mod spurious_read {
let source = LocStateProtPair {
xy_rel: RelPosXY::MutuallyForeign,
x: LocStateProt {
state: LocationState::new_init(Permission::new_frozen()),
// For the tests, the strongest idempotent foreign access does not matter, so we use `Default::default`
state: LocationState::new_init(
Permission::new_frozen(),
IdempotentForeignAccess::default(),
),
prot: true,
},
y: LocStateProt {
state: LocationState::new_uninit(Permission::new_reserved(
/* freeze */ true, /* protected */ true,
)),
state: LocationState::new_uninit(
Permission::new_reserved(/* freeze */ true, /* protected */ true),
IdempotentForeignAccess::default(),
),
prot: true,
},
};

View file

@ -108,19 +108,19 @@ pub(super) struct ThreadClockSet {
fence_acquire: VClock,
/// The last timestamp of happens-before relations that
/// have been released by this thread by a fence.
/// have been released by this thread by a release fence.
fence_release: VClock,
/// Timestamps of the last SC fence performed by each
/// thread, updated when this thread performs an SC fence
pub(super) fence_seqcst: VClock,
/// Timestamps of the last SC write performed by each
/// thread, updated when this thread performs an SC fence
/// thread, updated when this thread performs an SC fence.
/// This is never acquired into the thread's clock, it
/// just limits which old writes can be seen in weak memory emulation.
pub(super) write_seqcst: VClock,
/// Timestamps of the last SC fence performed by each
/// thread, updated when this thread performs an SC read
/// thread, updated when this thread performs an SC read.
/// This is never acquired into the thread's clock, it
/// just limits which old writes can be seen in weak memory emulation.
pub(super) read_seqcst: VClock,
}
@ -256,6 +256,106 @@ enum AccessType {
AtomicRmw,
}
/// Per-byte vector clock metadata for data-race detection.
#[derive(Clone, PartialEq, Eq, Debug)]
struct MemoryCellClocks {
/// The vector-clock timestamp and the thread that did the last non-atomic write. We don't need
/// a full `VClock` here, it's always a single thread and nothing synchronizes, so the effective
/// clock is all-0 except for the thread that did the write.
write: (VectorIdx, VTimestamp),
/// The type of operation that the write index represents,
/// either newly allocated memory, a non-atomic write or
/// a deallocation of memory.
write_type: NaWriteType,
/// The vector-clock of all non-atomic reads that happened since the last non-atomic write
/// (i.e., we join together the "singleton" clocks corresponding to each read). It is reset to
/// zero on each write operation.
read: VClock,
/// Atomic access, acquire, release sequence tracking clocks.
/// For non-atomic memory this value is set to None.
/// For atomic memory, each byte carries this information.
atomic_ops: Option<Box<AtomicMemoryCellClocks>>,
}
/// Extra metadata associated with a thread.
#[derive(Debug, Clone, Default)]
struct ThreadExtraState {
/// The current vector index in use by the
/// thread currently, this is set to None
/// after the vector index has been re-used
/// and hence the value will never need to be
/// read during data-race reporting.
vector_index: Option<VectorIdx>,
/// Thread termination vector clock, this
/// is set on thread termination and is used
/// for joining on threads since the vector_index
/// may be re-used when the join operation occurs.
termination_vector_clock: Option<VClock>,
}
/// Global data-race detection state, contains the currently
/// executing thread as well as the vector-clocks associated
/// with each of the threads.
// FIXME: it is probably better to have one large RefCell, than to have so many small ones.
#[derive(Debug, Clone)]
pub struct GlobalState {
/// Set to true once the first additional
/// thread has launched, due to the dependency
/// between before and after a thread launch.
/// Any data-races must be recorded after this
/// so concurrent execution can ignore recording
/// any data-races.
multi_threaded: Cell<bool>,
/// A flag to mark we are currently performing
/// a data race free action (such as atomic access)
/// to suppress the race detector
ongoing_action_data_race_free: Cell<bool>,
/// Mapping of a vector index to a known set of thread
/// clocks, this is not directly mapping from a thread id
/// since it may refer to multiple threads.
vector_clocks: RefCell<IndexVec<VectorIdx, ThreadClockSet>>,
/// Mapping of a given vector index to the current thread
/// that the execution is representing, this may change
/// if a vector index is re-assigned to a new thread.
vector_info: RefCell<IndexVec<VectorIdx, ThreadId>>,
/// The mapping of a given thread to associated thread metadata.
thread_info: RefCell<IndexVec<ThreadId, ThreadExtraState>>,
/// Potential vector indices that could be re-used on thread creation
/// values are inserted here on after the thread has terminated and
/// been joined with, and hence may potentially become free
/// for use as the index for a new thread.
/// Elements in this set may still require the vector index to
/// report data-races, and can only be re-used after all
/// active vector-clocks catch up with the threads timestamp.
reuse_candidates: RefCell<FxHashSet<VectorIdx>>,
/// We make SC fences act like RMWs on a global location.
/// To implement that, they all release and acquire into this clock.
last_sc_fence: RefCell<VClock>,
/// The timestamp of last SC write performed by each thread.
/// Threads only update their own index here!
last_sc_write_per_thread: RefCell<VClock>,
/// Track when an outdated (weak memory) load happens.
pub track_outdated_loads: bool,
}
impl VisitProvenance for GlobalState {
fn visit_provenance(&self, _visit: &mut VisitWith<'_>) {
// We don't have any tags.
}
}
impl AccessType {
fn description(self, ty: Option<Ty<'_>>, size: Option<Size>) -> String {
let mut msg = String::new();
@ -309,30 +409,6 @@ impl AccessType {
}
}
/// Per-byte vector clock metadata for data-race detection.
#[derive(Clone, PartialEq, Eq, Debug)]
struct MemoryCellClocks {
/// The vector-clock timestamp and the thread that did the last non-atomic write. We don't need
/// a full `VClock` here, it's always a single thread and nothing synchronizes, so the effective
/// clock is all-0 except for the thread that did the write.
write: (VectorIdx, VTimestamp),
/// The type of operation that the write index represents,
/// either newly allocated memory, a non-atomic write or
/// a deallocation of memory.
write_type: NaWriteType,
/// The vector-clock of all non-atomic reads that happened since the last non-atomic write
/// (i.e., we join together the "singleton" clocks corresponding to each read). It is reset to
/// zero on each write operation.
read: VClock,
/// Atomic access, acquire, release sequence tracking clocks.
/// For non-atomic memory this value is set to None.
/// For atomic memory, each byte carries this information.
atomic_ops: Option<Box<AtomicMemoryCellClocks>>,
}
impl AtomicMemoryCellClocks {
fn new(size: Size) -> Self {
AtomicMemoryCellClocks {
@ -798,15 +874,27 @@ pub trait EvalContextExt<'tcx>: MiriInterpCxExt<'tcx> {
// Either Acquire | AcqRel | SeqCst
clocks.apply_acquire_fence();
}
if atomic == AtomicFenceOrd::SeqCst {
// Behave like an RMW on the global fence location. This takes full care of
// all the SC fence requirements, including C++17 §32.4 [atomics.order]
// paragraph 6 (which would limit what future reads can see). It also rules
// out many legal behaviors, but we don't currently have a model that would
// be more precise.
// Also see the second bullet on page 10 of
// <https://www.cs.tau.ac.il/~orilahav/papers/popl21_robustness.pdf>.
let mut sc_fence_clock = data_race.last_sc_fence.borrow_mut();
sc_fence_clock.join(&clocks.clock);
clocks.clock.join(&sc_fence_clock);
// Also establish some sort of order with the last SC write that happened, globally
// (but this is only respected by future reads).
clocks.write_seqcst.join(&data_race.last_sc_write_per_thread.borrow());
}
// The release fence is last, since both of the above could alter our clock,
// which should be part of what is being released.
if atomic != AtomicFenceOrd::Acquire {
// Either Release | AcqRel | SeqCst
clocks.apply_release_fence();
}
if atomic == AtomicFenceOrd::SeqCst {
data_race.last_sc_fence.borrow_mut().set_at_index(&clocks.clock, index);
clocks.fence_seqcst.join(&data_race.last_sc_fence.borrow());
clocks.write_seqcst.join(&data_race.last_sc_write.borrow());
}
// Increment timestamp in case of release semantics.
interp_ok(atomic != AtomicFenceOrd::Acquire)
@ -1463,80 +1551,6 @@ trait EvalContextPrivExt<'tcx>: MiriInterpCxExt<'tcx> {
}
}
/// Extra metadata associated with a thread.
#[derive(Debug, Clone, Default)]
struct ThreadExtraState {
/// The current vector index in use by the
/// thread currently, this is set to None
/// after the vector index has been re-used
/// and hence the value will never need to be
/// read during data-race reporting.
vector_index: Option<VectorIdx>,
/// Thread termination vector clock, this
/// is set on thread termination and is used
/// for joining on threads since the vector_index
/// may be re-used when the join operation occurs.
termination_vector_clock: Option<VClock>,
}
/// Global data-race detection state, contains the currently
/// executing thread as well as the vector-clocks associated
/// with each of the threads.
// FIXME: it is probably better to have one large RefCell, than to have so many small ones.
#[derive(Debug, Clone)]
pub struct GlobalState {
/// Set to true once the first additional
/// thread has launched, due to the dependency
/// between before and after a thread launch.
/// Any data-races must be recorded after this
/// so concurrent execution can ignore recording
/// any data-races.
multi_threaded: Cell<bool>,
/// A flag to mark we are currently performing
/// a data race free action (such as atomic access)
/// to suppress the race detector
ongoing_action_data_race_free: Cell<bool>,
/// Mapping of a vector index to a known set of thread
/// clocks, this is not directly mapping from a thread id
/// since it may refer to multiple threads.
vector_clocks: RefCell<IndexVec<VectorIdx, ThreadClockSet>>,
/// Mapping of a given vector index to the current thread
/// that the execution is representing, this may change
/// if a vector index is re-assigned to a new thread.
vector_info: RefCell<IndexVec<VectorIdx, ThreadId>>,
/// The mapping of a given thread to associated thread metadata.
thread_info: RefCell<IndexVec<ThreadId, ThreadExtraState>>,
/// Potential vector indices that could be re-used on thread creation
/// values are inserted here on after the thread has terminated and
/// been joined with, and hence may potentially become free
/// for use as the index for a new thread.
/// Elements in this set may still require the vector index to
/// report data-races, and can only be re-used after all
/// active vector-clocks catch up with the threads timestamp.
reuse_candidates: RefCell<FxHashSet<VectorIdx>>,
/// The timestamp of last SC fence performed by each thread
last_sc_fence: RefCell<VClock>,
/// The timestamp of last SC write performed by each thread
last_sc_write: RefCell<VClock>,
/// Track when an outdated (weak memory) load happens.
pub track_outdated_loads: bool,
}
impl VisitProvenance for GlobalState {
fn visit_provenance(&self, _visit: &mut VisitWith<'_>) {
// We don't have any tags.
}
}
impl GlobalState {
/// Create a new global state, setup with just thread-id=0
/// advanced to timestamp = 1.
@ -1549,7 +1563,7 @@ impl GlobalState {
thread_info: RefCell::new(IndexVec::new()),
reuse_candidates: RefCell::new(FxHashSet::default()),
last_sc_fence: RefCell::new(VClock::default()),
last_sc_write: RefCell::new(VClock::default()),
last_sc_write_per_thread: RefCell::new(VClock::default()),
track_outdated_loads: config.track_outdated_loads,
};
@ -1851,7 +1865,7 @@ impl GlobalState {
// SC ATOMIC STORE rule in the paper.
pub(super) fn sc_write(&self, thread_mgr: &ThreadManager<'_>) {
let (index, clocks) = self.active_thread_state(thread_mgr);
self.last_sc_write.borrow_mut().set_at_index(&clocks.clock, index);
self.last_sc_write_per_thread.borrow_mut().set_at_index(&clocks.clock, index);
}
// SC ATOMIC READ rule in the paper.

View file

@ -11,10 +11,17 @@
//! This implementation is not fully correct under the revised C++20 model and may generate behaviours C++20
//! disallows (<https://github.com/rust-lang/miri/issues/2301>).
//!
//! A modification is made to the paper's model to partially address C++20 changes.
//! Specifically, if an SC load reads from an atomic store of any ordering, then a later SC load cannot read from
//! an earlier store in the location's modification order. This is to prevent creating a backwards S edge from the second
//! load to the first, as a result of C++20's coherence-ordered before rules.
//! Modifications are made to the paper's model to address C++20 changes:
//! - If an SC load reads from an atomic store of any ordering, then a later SC load cannot read
//! from an earlier store in the location's modification order. This is to prevent creating a
//! backwards S edge from the second load to the first, as a result of C++20's coherence-ordered
//! before rules. (This seems to rule out behaviors that were actually permitted by the RC11 model
//! that C++20 intended to copy (<https://plv.mpi-sws.org/scfix/paper.pdf>); a change was
//! introduced when translating the math to English. According to Viktor Vafeiadis, this
//! difference is harmless. So we stick to what the standard says, and allow fewer behaviors.)
//! - SC fences are treated like AcqRel RMWs to a global clock, to ensure they induce enough
//! synchronization with the surrounding accesses. This rules out legal behavior, but it is really
//! hard to be more precise here.
//!
//! Rust follows the C++20 memory model (except for the Consume ordering and some operations not performable through C++'s
//! `std::atomic<T>` API). It is therefore possible for this implementation to generate behaviours never observable when the
@ -138,6 +145,7 @@ struct StoreElement {
/// The timestamp of the storing thread when it performed the store
timestamp: VTimestamp,
/// The value of this store. `None` means uninitialized.
// FIXME: Currently, we cannot represent partial initialization.
val: Option<Scalar>,
@ -335,11 +343,6 @@ impl<'tcx> StoreBuffer {
// then we cannot read-from anything earlier in modification order.
// C++20 §6.9.2.2 [intro.races] paragraph 16
false
} else if store_elem.timestamp <= clocks.fence_seqcst[store_elem.store_index] {
// The current load, which may be sequenced-after an SC fence, cannot read-before
// the last store sequenced-before an SC fence in another thread.
// C++17 §32.4 [atomics.order] paragraph 6
false
} else if store_elem.timestamp <= clocks.write_seqcst[store_elem.store_index]
&& store_elem.is_seqcst
{
@ -356,9 +359,9 @@ impl<'tcx> StoreBuffer {
false
} else if is_seqcst && store_elem.load_info.borrow().sc_loaded {
// The current SC load cannot read-before a store that an earlier SC load has observed.
// See https://github.com/rust-lang/miri/issues/2301#issuecomment-1222720427
// See https://github.com/rust-lang/miri/issues/2301#issuecomment-1222720427.
// Consequences of C++20 §31.4 [atomics.order] paragraph 3.1, 3.3 (coherence-ordered before)
// and 4.1 (coherence-ordered before between SC makes global total order S)
// and 4.1 (coherence-ordered before between SC makes global total order S).
false
} else {
true

View file

@ -1,4 +1,5 @@
use either::Either;
use rand::Rng;
use rustc_abi::{Endian, HasDataLayout};
use rustc_apfloat::{Float, Round};
use rustc_middle::ty::FloatTy;
@ -286,7 +287,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
this.write_scalar(val, &dest)?;
}
}
"fma" => {
"fma" | "relaxed_fma" => {
let [a, b, c] = check_arg_count(args)?;
let (a, a_len) = this.project_to_simd(a)?;
let (b, b_len) = this.project_to_simd(b)?;
@ -303,6 +304,8 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
let c = this.read_scalar(&this.project_index(&c, i)?)?;
let dest = this.project_index(&dest, i)?;
let fuse: bool = intrinsic_name == "fma" || this.machine.rng.get_mut().gen();
// Works for f32 and f64.
// FIXME: using host floats to work around https://github.com/rust-lang/miri/issues/2468.
let ty::Float(float_ty) = dest.layout.ty.kind() else {
@ -314,7 +317,11 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
let a = a.to_f32()?;
let b = b.to_f32()?;
let c = c.to_f32()?;
let res = a.to_host().mul_add(b.to_host(), c.to_host()).to_soft();
let res = if fuse {
a.to_host().mul_add(b.to_host(), c.to_host()).to_soft()
} else {
((a * b).value + c).value
};
let res = this.adjust_nan(res, &[a, b, c]);
Scalar::from(res)
}
@ -322,7 +329,11 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
let a = a.to_f64()?;
let b = b.to_f64()?;
let c = c.to_f64()?;
let res = a.to_host().mul_add(b.to_host(), c.to_host()).to_soft();
let res = if fuse {
a.to_host().mul_add(b.to_host(), c.to_host()).to_soft()
} else {
((a * b).value + c).value
};
let res = this.adjust_nan(res, &[a, b, c]);
Scalar::from(res)
}

View file

@ -1245,7 +1245,10 @@ impl<'tcx> Machine<'tcx> for MiriMachine<'tcx> {
/// Called on `ptr as usize` casts.
/// (Actually computing the resulting `usize` doesn't need machine help,
/// that's just `Scalar::try_to_int`.)
fn expose_provenance(ecx: &InterpCx<'tcx, Self>, provenance: Self::Provenance) -> InterpResult<'tcx> {
fn expose_provenance(
ecx: &InterpCx<'tcx, Self>,
provenance: Self::Provenance,
) -> InterpResult<'tcx> {
match provenance {
Provenance::Concrete { alloc_id, tag } => ecx.expose_ptr(alloc_id, tag),
Provenance::Wildcard => {

View file

@ -0,0 +1,411 @@
use std::any::Any;
use std::collections::BTreeMap;
use std::io::{IsTerminal, Read, SeekFrom, Write};
use std::ops::Deref;
use std::rc::{Rc, Weak};
use std::{fs, io};
use rustc_abi::Size;
use crate::shims::unix::UnixFileDescription;
use crate::*;
/// Represents an open file description.
pub trait FileDescription: std::fmt::Debug + Any {
fn name(&self) -> &'static str;
/// Reads as much as possible into the given buffer `ptr`.
/// `len` indicates how many bytes we should try to read.
/// `dest` is where the return value should be stored: number of bytes read, or `-1` in case of error.
fn read<'tcx>(
&self,
_self_ref: &FileDescriptionRef,
_communicate_allowed: bool,
_ptr: Pointer,
_len: usize,
_dest: &MPlaceTy<'tcx>,
_ecx: &mut MiriInterpCx<'tcx>,
) -> InterpResult<'tcx> {
throw_unsup_format!("cannot read from {}", self.name());
}
/// Writes as much as possible from the given buffer `ptr`.
/// `len` indicates how many bytes we should try to write.
/// `dest` is where the return value should be stored: number of bytes written, or `-1` in case of error.
fn write<'tcx>(
&self,
_self_ref: &FileDescriptionRef,
_communicate_allowed: bool,
_ptr: Pointer,
_len: usize,
_dest: &MPlaceTy<'tcx>,
_ecx: &mut MiriInterpCx<'tcx>,
) -> InterpResult<'tcx> {
throw_unsup_format!("cannot write to {}", self.name());
}
/// Seeks to the given offset (which can be relative to the beginning, end, or current position).
/// Returns the new position from the start of the stream.
fn seek<'tcx>(
&self,
_communicate_allowed: bool,
_offset: SeekFrom,
) -> InterpResult<'tcx, io::Result<u64>> {
throw_unsup_format!("cannot seek on {}", self.name());
}
fn close<'tcx>(
self: Box<Self>,
_communicate_allowed: bool,
_ecx: &mut MiriInterpCx<'tcx>,
) -> InterpResult<'tcx, io::Result<()>> {
throw_unsup_format!("cannot close {}", self.name());
}
fn metadata<'tcx>(&self) -> InterpResult<'tcx, io::Result<fs::Metadata>> {
throw_unsup_format!("obtaining metadata is only supported on file-backed file descriptors");
}
fn is_tty(&self, _communicate_allowed: bool) -> bool {
// Most FDs are not tty's and the consequence of a wrong `false` are minor,
// so we use a default impl here.
false
}
fn as_unix(&self) -> &dyn UnixFileDescription {
panic!("Not a unix file descriptor: {}", self.name());
}
}
impl dyn FileDescription {
#[inline(always)]
pub fn downcast<T: Any>(&self) -> Option<&T> {
(self as &dyn Any).downcast_ref()
}
}
impl FileDescription for io::Stdin {
fn name(&self) -> &'static str {
"stdin"
}
fn read<'tcx>(
&self,
_self_ref: &FileDescriptionRef,
communicate_allowed: bool,
ptr: Pointer,
len: usize,
dest: &MPlaceTy<'tcx>,
ecx: &mut MiriInterpCx<'tcx>,
) -> InterpResult<'tcx> {
let mut bytes = vec![0; len];
if !communicate_allowed {
// We want isolation mode to be deterministic, so we have to disallow all reads, even stdin.
helpers::isolation_abort_error("`read` from stdin")?;
}
let result = Read::read(&mut { self }, &mut bytes);
match result {
Ok(read_size) => ecx.return_read_success(ptr, &bytes, read_size, dest),
Err(e) => ecx.set_last_error_and_return(e, dest),
}
}
fn is_tty(&self, communicate_allowed: bool) -> bool {
communicate_allowed && self.is_terminal()
}
}
impl FileDescription for io::Stdout {
fn name(&self) -> &'static str {
"stdout"
}
fn write<'tcx>(
&self,
_self_ref: &FileDescriptionRef,
_communicate_allowed: bool,
ptr: Pointer,
len: usize,
dest: &MPlaceTy<'tcx>,
ecx: &mut MiriInterpCx<'tcx>,
) -> InterpResult<'tcx> {
let bytes = ecx.read_bytes_ptr_strip_provenance(ptr, Size::from_bytes(len))?;
// We allow writing to stderr even with isolation enabled.
let result = Write::write(&mut { self }, bytes);
// Stdout is buffered, flush to make sure it appears on the
// screen. This is the write() syscall of the interpreted
// program, we want it to correspond to a write() syscall on
// the host -- there is no good in adding extra buffering
// here.
io::stdout().flush().unwrap();
match result {
Ok(write_size) => ecx.return_write_success(write_size, dest),
Err(e) => ecx.set_last_error_and_return(e, dest),
}
}
fn is_tty(&self, communicate_allowed: bool) -> bool {
communicate_allowed && self.is_terminal()
}
}
impl FileDescription for io::Stderr {
fn name(&self) -> &'static str {
"stderr"
}
fn write<'tcx>(
&self,
_self_ref: &FileDescriptionRef,
_communicate_allowed: bool,
ptr: Pointer,
len: usize,
dest: &MPlaceTy<'tcx>,
ecx: &mut MiriInterpCx<'tcx>,
) -> InterpResult<'tcx> {
let bytes = ecx.read_bytes_ptr_strip_provenance(ptr, Size::from_bytes(len))?;
// We allow writing to stderr even with isolation enabled.
// No need to flush, stderr is not buffered.
let result = Write::write(&mut { self }, bytes);
match result {
Ok(write_size) => ecx.return_write_success(write_size, dest),
Err(e) => ecx.set_last_error_and_return(e, dest),
}
}
fn is_tty(&self, communicate_allowed: bool) -> bool {
communicate_allowed && self.is_terminal()
}
}
/// Like /dev/null
#[derive(Debug)]
pub struct NullOutput;
impl FileDescription for NullOutput {
fn name(&self) -> &'static str {
"stderr and stdout"
}
fn write<'tcx>(
&self,
_self_ref: &FileDescriptionRef,
_communicate_allowed: bool,
_ptr: Pointer,
len: usize,
dest: &MPlaceTy<'tcx>,
ecx: &mut MiriInterpCx<'tcx>,
) -> InterpResult<'tcx> {
// We just don't write anything, but report to the user that we did.
ecx.return_write_success(len, dest)
}
}
/// Structure contains both the file description and its unique identifier.
#[derive(Clone, Debug)]
pub struct FileDescWithId<T: FileDescription + ?Sized> {
id: FdId,
file_description: Box<T>,
}
#[derive(Clone, Debug)]
pub struct FileDescriptionRef(Rc<FileDescWithId<dyn FileDescription>>);
impl Deref for FileDescriptionRef {
type Target = dyn FileDescription;
fn deref(&self) -> &Self::Target {
&*self.0.file_description
}
}
impl FileDescriptionRef {
fn new(fd: impl FileDescription, id: FdId) -> Self {
FileDescriptionRef(Rc::new(FileDescWithId { id, file_description: Box::new(fd) }))
}
pub fn close<'tcx>(
self,
communicate_allowed: bool,
ecx: &mut MiriInterpCx<'tcx>,
) -> InterpResult<'tcx, io::Result<()>> {
// Destroy this `Rc` using `into_inner` so we can call `close` instead of
// implicitly running the destructor of the file description.
let id = self.get_id();
match Rc::into_inner(self.0) {
Some(fd) => {
// Remove entry from the global epoll_event_interest table.
ecx.machine.epoll_interests.remove(id);
fd.file_description.close(communicate_allowed, ecx)
}
None => interp_ok(Ok(())),
}
}
pub fn downgrade(&self) -> WeakFileDescriptionRef {
WeakFileDescriptionRef { weak_ref: Rc::downgrade(&self.0) }
}
pub fn get_id(&self) -> FdId {
self.0.id
}
}
/// Holds a weak reference to the actual file description.
#[derive(Clone, Debug, Default)]
pub struct WeakFileDescriptionRef {
weak_ref: Weak<FileDescWithId<dyn FileDescription>>,
}
impl WeakFileDescriptionRef {
pub fn upgrade(&self) -> Option<FileDescriptionRef> {
if let Some(file_desc_with_id) = self.weak_ref.upgrade() {
return Some(FileDescriptionRef(file_desc_with_id));
}
None
}
}
impl VisitProvenance for WeakFileDescriptionRef {
fn visit_provenance(&self, _visit: &mut VisitWith<'_>) {
// A weak reference can never be the only reference to some pointer or place.
// Since the actual file description is tracked by strong ref somewhere,
// it is ok to make this a NOP operation.
}
}
/// A unique id for file descriptions. While we could use the address, considering that
/// is definitely unique, the address would expose interpreter internal state when used
/// for sorting things. So instead we generate a unique id per file description is the name
/// for all `dup`licates and is never reused.
#[derive(Debug, Copy, Clone, Default, Eq, PartialEq, Ord, PartialOrd)]
pub struct FdId(usize);
/// The file descriptor table
#[derive(Debug)]
pub struct FdTable {
pub fds: BTreeMap<i32, FileDescriptionRef>,
/// Unique identifier for file description, used to differentiate between various file description.
next_file_description_id: FdId,
}
impl VisitProvenance for FdTable {
fn visit_provenance(&self, _visit: &mut VisitWith<'_>) {
// All our FileDescription instances do not have any tags.
}
}
impl FdTable {
fn new() -> Self {
FdTable { fds: BTreeMap::new(), next_file_description_id: FdId(0) }
}
pub(crate) fn init(mute_stdout_stderr: bool) -> FdTable {
let mut fds = FdTable::new();
fds.insert_new(io::stdin());
if mute_stdout_stderr {
assert_eq!(fds.insert_new(NullOutput), 1);
assert_eq!(fds.insert_new(NullOutput), 2);
} else {
assert_eq!(fds.insert_new(io::stdout()), 1);
assert_eq!(fds.insert_new(io::stderr()), 2);
}
fds
}
pub fn new_ref(&mut self, fd: impl FileDescription) -> FileDescriptionRef {
let file_handle = FileDescriptionRef::new(fd, self.next_file_description_id);
self.next_file_description_id = FdId(self.next_file_description_id.0.strict_add(1));
file_handle
}
/// Insert a new file description to the FdTable.
pub fn insert_new(&mut self, fd: impl FileDescription) -> i32 {
let fd_ref = self.new_ref(fd);
self.insert(fd_ref)
}
pub fn insert(&mut self, fd_ref: FileDescriptionRef) -> i32 {
self.insert_with_min_num(fd_ref, 0)
}
/// Insert a file description, giving it a file descriptor that is at least `min_fd_num`.
pub fn insert_with_min_num(&mut self, file_handle: FileDescriptionRef, min_fd_num: i32) -> i32 {
// Find the lowest unused FD, starting from min_fd. If the first such unused FD is in
// between used FDs, the find_map combinator will return it. If the first such unused FD
// is after all other used FDs, the find_map combinator will return None, and we will use
// the FD following the greatest FD thus far.
let candidate_new_fd =
self.fds.range(min_fd_num..).zip(min_fd_num..).find_map(|((fd_num, _fd), counter)| {
if *fd_num != counter {
// There was a gap in the fds stored, return the first unused one
// (note that this relies on BTreeMap iterating in key order)
Some(counter)
} else {
// This fd is used, keep going
None
}
});
let new_fd_num = candidate_new_fd.unwrap_or_else(|| {
// find_map ran out of BTreeMap entries before finding a free fd, use one plus the
// maximum fd in the map
self.fds.last_key_value().map(|(fd_num, _)| fd_num.strict_add(1)).unwrap_or(min_fd_num)
});
self.fds.try_insert(new_fd_num, file_handle).unwrap();
new_fd_num
}
pub fn get(&self, fd_num: i32) -> Option<FileDescriptionRef> {
let fd = self.fds.get(&fd_num)?;
Some(fd.clone())
}
pub fn remove(&mut self, fd_num: i32) -> Option<FileDescriptionRef> {
self.fds.remove(&fd_num)
}
pub fn is_fd_num(&self, fd_num: i32) -> bool {
self.fds.contains_key(&fd_num)
}
}
impl<'tcx> EvalContextExt<'tcx> for crate::MiriInterpCx<'tcx> {}
pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
/// Helper to implement `FileDescription::read`:
/// This is only used when `read` is successful.
/// `actual_read_size` should be the return value of some underlying `read` call that used
/// `bytes` as its output buffer.
/// The length of `bytes` must not exceed either the host's or the target's `isize`.
/// `bytes` is written to `buf` and the size is written to `dest`.
fn return_read_success(
&mut self,
buf: Pointer,
bytes: &[u8],
actual_read_size: usize,
dest: &MPlaceTy<'tcx>,
) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
// If reading to `bytes` did not fail, we write those bytes to the buffer.
// Crucially, if fewer than `bytes.len()` bytes were read, only write
// that much into the output buffer!
this.write_bytes_ptr(buf, bytes[..actual_read_size].iter().copied())?;
// The actual read size is always less than what got originally requested so this cannot fail.
this.write_int(u64::try_from(actual_read_size).unwrap(), dest)?;
interp_ok(())
}
/// Helper to implement `FileDescription::write`:
/// This function is only used when `write` is successful, and writes `actual_write_size` to `dest`
fn return_write_success(
&mut self,
actual_write_size: usize,
dest: &MPlaceTy<'tcx>,
) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
// The actual write size is always less than what got originally requested so this cannot fail.
this.write_int(u64::try_from(actual_write_size).unwrap(), dest)?;
interp_ok(())
}
}

View file

@ -82,11 +82,72 @@ const UNIX_IO_ERROR_TABLE: &[(&str, std::io::ErrorKind)] = {
// <https://github.com/rust-lang/rust/blob/master/library/std/src/sys/pal/windows/mod.rs>.
const WINDOWS_IO_ERROR_TABLE: &[(&str, std::io::ErrorKind)] = {
use std::io::ErrorKind::*;
// FIXME: this is still incomplete.
// It's common for multiple error codes to map to the same io::ErrorKind. We have all for the
// forwards mapping; only the first one will be used for the backwards mapping.
// Slightly arbitrarily, we prefer non-WSA and the most generic sounding variant for backwards
// mapping.
&[
("ERROR_ACCESS_DENIED", PermissionDenied),
("ERROR_FILE_NOT_FOUND", NotFound),
("WSAEADDRINUSE", AddrInUse),
("WSAEADDRNOTAVAIL", AddrNotAvailable),
("ERROR_ALREADY_EXISTS", AlreadyExists),
("ERROR_FILE_EXISTS", AlreadyExists),
("ERROR_NO_DATA", BrokenPipe),
("WSAECONNABORTED", ConnectionAborted),
("WSAECONNREFUSED", ConnectionRefused),
("WSAECONNRESET", ConnectionReset),
("ERROR_NOT_SAME_DEVICE", CrossesDevices),
("ERROR_POSSIBLE_DEADLOCK", Deadlock),
("ERROR_DIR_NOT_EMPTY", DirectoryNotEmpty),
("ERROR_CANT_RESOLVE_FILENAME", FilesystemLoop),
("ERROR_DISK_QUOTA_EXCEEDED", QuotaExceeded),
("WSAEDQUOT", QuotaExceeded),
("ERROR_FILE_TOO_LARGE", FileTooLarge),
("ERROR_HOST_UNREACHABLE", HostUnreachable),
("WSAEHOSTUNREACH", HostUnreachable),
("ERROR_INVALID_NAME", InvalidFilename),
("ERROR_BAD_PATHNAME", InvalidFilename),
("ERROR_FILENAME_EXCED_RANGE", InvalidFilename),
("ERROR_INVALID_PARAMETER", InvalidInput),
("WSAEINVAL", InvalidInput),
("ERROR_DIRECTORY_NOT_SUPPORTED", IsADirectory),
("WSAENETDOWN", NetworkDown),
("ERROR_NETWORK_UNREACHABLE", NetworkUnreachable),
("WSAENETUNREACH", NetworkUnreachable),
("ERROR_DIRECTORY", NotADirectory),
("WSAENOTCONN", NotConnected),
("ERROR_FILE_NOT_FOUND", NotFound),
("ERROR_PATH_NOT_FOUND", NotFound),
("ERROR_INVALID_DRIVE", NotFound),
("ERROR_BAD_NETPATH", NotFound),
("ERROR_BAD_NET_NAME", NotFound),
("ERROR_SEEK_ON_DEVICE", NotSeekable),
("ERROR_NOT_ENOUGH_MEMORY", OutOfMemory),
("ERROR_OUTOFMEMORY", OutOfMemory),
("ERROR_ACCESS_DENIED", PermissionDenied),
("WSAEACCES", PermissionDenied),
("ERROR_WRITE_PROTECT", ReadOnlyFilesystem),
("ERROR_BUSY", ResourceBusy),
("ERROR_DISK_FULL", StorageFull),
("ERROR_HANDLE_DISK_FULL", StorageFull),
("WAIT_TIMEOUT", TimedOut),
("WSAETIMEDOUT", TimedOut),
("ERROR_DRIVER_CANCEL_TIMEOUT", TimedOut),
("ERROR_OPERATION_ABORTED", TimedOut),
("ERROR_SERVICE_REQUEST_TIMEOUT", TimedOut),
("ERROR_COUNTER_TIMEOUT", TimedOut),
("ERROR_TIMEOUT", TimedOut),
("ERROR_RESOURCE_CALL_TIMED_OUT", TimedOut),
("ERROR_CTX_MODEM_RESPONSE_TIMEOUT", TimedOut),
("ERROR_CTX_CLIENT_QUERY_TIMEOUT", TimedOut),
("FRS_ERR_SYSVOL_POPULATE_TIMEOUT", TimedOut),
("ERROR_DS_TIMELIMIT_EXCEEDED", TimedOut),
("DNS_ERROR_RECORD_TIMED_OUT", TimedOut),
("ERROR_IPSEC_IKE_TIMED_OUT", TimedOut),
("ERROR_RUNLEVEL_SWITCH_TIMEOUT", TimedOut),
("ERROR_RUNLEVEL_SWITCH_AGENT_TIMEOUT", TimedOut),
("ERROR_TOO_MANY_LINKS", TooManyLinks),
("ERROR_CALL_NOT_IMPLEMENTED", Unsupported),
("WSAEWOULDBLOCK", WouldBlock),
]
};

View file

@ -2,6 +2,7 @@
mod alloc;
mod backtrace;
mod files;
#[cfg(unix)]
mod native_lib;
mod unix;
@ -18,7 +19,8 @@ pub mod panic;
pub mod time;
pub mod tls;
pub use self::unix::{DirTable, EpollInterestTable, FdTable};
pub use self::files::FdTable;
pub use self::unix::{DirTable, EpollInterestTable};
/// What needs to be done after emulating an item (a shim or an intrinsic) is done.
pub enum EmulateItemResult {

View file

@ -4,10 +4,8 @@ use std::ops::Deref;
use libffi::high::call as ffi;
use libffi::low::CodePtr;
use rustc_abi::{BackendRepr, HasDataLayout, Size};
use rustc_middle::{
mir::interpret::Pointer,
ty::{self as ty, IntTy, UintTy},
};
use rustc_middle::mir::interpret::Pointer;
use rustc_middle::ty::{self as ty, IntTy, UintTy};
use rustc_span::Symbol;
use crate::*;
@ -177,7 +175,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
this.prepare_for_native_call(alloc_id, prov)?;
}
}
// FIXME: In the future, we should also call `prepare_for_native_call` on all previously
// exposed allocations, since C may access any of them.

View file

@ -2,7 +2,9 @@ use rustc_abi::ExternAbi;
use rustc_span::Symbol;
use crate::shims::unix::android::thread::prctl;
use crate::shims::unix::linux::syscall::syscall;
use crate::shims::unix::linux_like::epoll::EvalContextExt as _;
use crate::shims::unix::linux_like::eventfd::EvalContextExt as _;
use crate::shims::unix::linux_like::syscall::syscall;
use crate::*;
pub fn is_dyn_sym(_name: &str) -> bool {
@ -20,6 +22,31 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
) -> InterpResult<'tcx, EmulateItemResult> {
let this = self.eval_context_mut();
match link_name.as_str() {
// epoll, eventfd
"epoll_create1" => {
let [flag] =
this.check_shim(abi, ExternAbi::C { unwind: false }, link_name, args)?;
let result = this.epoll_create1(flag)?;
this.write_scalar(result, dest)?;
}
"epoll_ctl" => {
let [epfd, op, fd, event] =
this.check_shim(abi, ExternAbi::C { unwind: false }, link_name, args)?;
let result = this.epoll_ctl(epfd, op, fd, event)?;
this.write_scalar(result, dest)?;
}
"epoll_wait" => {
let [epfd, events, maxevents, timeout] =
this.check_shim(abi, ExternAbi::C { unwind: false }, link_name, args)?;
this.epoll_wait(epfd, events, maxevents, timeout, dest)?;
}
"eventfd" => {
let [val, flag] =
this.check_shim(abi, ExternAbi::C { unwind: false }, link_name, args)?;
let result = this.eventfd(val, flag)?;
this.write_scalar(result, dest)?;
}
// Miscellaneous
"__errno" => {
let [] = this.check_shim(abi, ExternAbi::C { unwind: false }, link_name, args)?;

View file

@ -1,16 +1,14 @@
//! General management of file descriptors, and support for
//! standard file descriptors (stdin/stdout/stderr).
use std::any::Any;
use std::collections::BTreeMap;
use std::io::{self, ErrorKind, IsTerminal, Read, SeekFrom, Write};
use std::ops::Deref;
use std::rc::{Rc, Weak};
use std::io;
use std::io::ErrorKind;
use rustc_abi::Size;
use crate::helpers::check_min_arg_count;
use crate::shims::unix::linux::epoll::EpollReadyEvents;
use crate::shims::files::FileDescription;
use crate::shims::unix::linux_like::epoll::EpollReadyEvents;
use crate::shims::unix::*;
use crate::*;
@ -21,40 +19,8 @@ pub(crate) enum FlockOp {
Unlock,
}
/// Represents an open file description.
pub trait FileDescription: std::fmt::Debug + Any {
fn name(&self) -> &'static str;
/// Reads as much as possible into the given buffer `ptr`.
/// `len` indicates how many bytes we should try to read.
/// `dest` is where the return value should be stored: number of bytes read, or `-1` in case of error.
fn read<'tcx>(
&self,
_self_ref: &FileDescriptionRef,
_communicate_allowed: bool,
_ptr: Pointer,
_len: usize,
_dest: &MPlaceTy<'tcx>,
_ecx: &mut MiriInterpCx<'tcx>,
) -> InterpResult<'tcx> {
throw_unsup_format!("cannot read from {}", self.name());
}
/// Writes as much as possible from the given buffer `ptr`.
/// `len` indicates how many bytes we should try to write.
/// `dest` is where the return value should be stored: number of bytes written, or `-1` in case of error.
fn write<'tcx>(
&self,
_self_ref: &FileDescriptionRef,
_communicate_allowed: bool,
_ptr: Pointer,
_len: usize,
_dest: &MPlaceTy<'tcx>,
_ecx: &mut MiriInterpCx<'tcx>,
) -> InterpResult<'tcx> {
throw_unsup_format!("cannot write to {}", self.name());
}
/// Represents unix-specific file descriptions.
pub trait UnixFileDescription: FileDescription {
/// Reads as much as possible into the given buffer `ptr` from a given offset.
/// `len` indicates how many bytes we should try to read.
/// `dest` is where the return value should be stored: number of bytes read, or `-1` in case of error.
@ -86,24 +52,6 @@ pub trait FileDescription: std::fmt::Debug + Any {
throw_unsup_format!("cannot pwrite to {}", self.name());
}
/// Seeks to the given offset (which can be relative to the beginning, end, or current position).
/// Returns the new position from the start of the stream.
fn seek<'tcx>(
&self,
_communicate_allowed: bool,
_offset: SeekFrom,
) -> InterpResult<'tcx, io::Result<u64>> {
throw_unsup_format!("cannot seek on {}", self.name());
}
fn close<'tcx>(
self: Box<Self>,
_communicate_allowed: bool,
_ecx: &mut MiriInterpCx<'tcx>,
) -> InterpResult<'tcx, io::Result<()>> {
throw_unsup_format!("cannot close {}", self.name());
}
fn flock<'tcx>(
&self,
_communicate_allowed: bool,
@ -112,311 +60,12 @@ pub trait FileDescription: std::fmt::Debug + Any {
throw_unsup_format!("cannot flock {}", self.name());
}
fn is_tty(&self, _communicate_allowed: bool) -> bool {
// Most FDs are not tty's and the consequence of a wrong `false` are minor,
// so we use a default impl here.
false
}
/// Check the readiness of file description.
fn get_epoll_ready_events<'tcx>(&self) -> InterpResult<'tcx, EpollReadyEvents> {
throw_unsup_format!("{}: epoll does not support this file description", self.name());
}
}
impl dyn FileDescription {
#[inline(always)]
pub fn downcast<T: Any>(&self) -> Option<&T> {
(self as &dyn Any).downcast_ref()
}
}
impl FileDescription for io::Stdin {
fn name(&self) -> &'static str {
"stdin"
}
fn read<'tcx>(
&self,
_self_ref: &FileDescriptionRef,
communicate_allowed: bool,
ptr: Pointer,
len: usize,
dest: &MPlaceTy<'tcx>,
ecx: &mut MiriInterpCx<'tcx>,
) -> InterpResult<'tcx> {
let mut bytes = vec![0; len];
if !communicate_allowed {
// We want isolation mode to be deterministic, so we have to disallow all reads, even stdin.
helpers::isolation_abort_error("`read` from stdin")?;
}
let result = Read::read(&mut { self }, &mut bytes);
match result {
Ok(read_size) => ecx.return_read_success(ptr, &bytes, read_size, dest),
Err(e) => ecx.set_last_error_and_return(e, dest),
}
}
fn is_tty(&self, communicate_allowed: bool) -> bool {
communicate_allowed && self.is_terminal()
}
}
impl FileDescription for io::Stdout {
fn name(&self) -> &'static str {
"stdout"
}
fn write<'tcx>(
&self,
_self_ref: &FileDescriptionRef,
_communicate_allowed: bool,
ptr: Pointer,
len: usize,
dest: &MPlaceTy<'tcx>,
ecx: &mut MiriInterpCx<'tcx>,
) -> InterpResult<'tcx> {
let bytes = ecx.read_bytes_ptr_strip_provenance(ptr, Size::from_bytes(len))?;
// We allow writing to stderr even with isolation enabled.
let result = Write::write(&mut { self }, bytes);
// Stdout is buffered, flush to make sure it appears on the
// screen. This is the write() syscall of the interpreted
// program, we want it to correspond to a write() syscall on
// the host -- there is no good in adding extra buffering
// here.
io::stdout().flush().unwrap();
match result {
Ok(write_size) => ecx.return_write_success(write_size, dest),
Err(e) => ecx.set_last_error_and_return(e, dest),
}
}
fn is_tty(&self, communicate_allowed: bool) -> bool {
communicate_allowed && self.is_terminal()
}
}
impl FileDescription for io::Stderr {
fn name(&self) -> &'static str {
"stderr"
}
fn write<'tcx>(
&self,
_self_ref: &FileDescriptionRef,
_communicate_allowed: bool,
ptr: Pointer,
len: usize,
dest: &MPlaceTy<'tcx>,
ecx: &mut MiriInterpCx<'tcx>,
) -> InterpResult<'tcx> {
let bytes = ecx.read_bytes_ptr_strip_provenance(ptr, Size::from_bytes(len))?;
// We allow writing to stderr even with isolation enabled.
// No need to flush, stderr is not buffered.
let result = Write::write(&mut { self }, bytes);
match result {
Ok(write_size) => ecx.return_write_success(write_size, dest),
Err(e) => ecx.set_last_error_and_return(e, dest),
}
}
fn is_tty(&self, communicate_allowed: bool) -> bool {
communicate_allowed && self.is_terminal()
}
}
/// Like /dev/null
#[derive(Debug)]
pub struct NullOutput;
impl FileDescription for NullOutput {
fn name(&self) -> &'static str {
"stderr and stdout"
}
fn write<'tcx>(
&self,
_self_ref: &FileDescriptionRef,
_communicate_allowed: bool,
_ptr: Pointer,
len: usize,
dest: &MPlaceTy<'tcx>,
ecx: &mut MiriInterpCx<'tcx>,
) -> InterpResult<'tcx> {
// We just don't write anything, but report to the user that we did.
ecx.return_write_success(len, dest)
}
}
/// Structure contains both the file description and its unique identifier.
#[derive(Clone, Debug)]
pub struct FileDescWithId<T: FileDescription + ?Sized> {
id: FdId,
file_description: Box<T>,
}
#[derive(Clone, Debug)]
pub struct FileDescriptionRef(Rc<FileDescWithId<dyn FileDescription>>);
impl Deref for FileDescriptionRef {
type Target = dyn FileDescription;
fn deref(&self) -> &Self::Target {
&*self.0.file_description
}
}
impl FileDescriptionRef {
fn new(fd: impl FileDescription, id: FdId) -> Self {
FileDescriptionRef(Rc::new(FileDescWithId { id, file_description: Box::new(fd) }))
}
pub fn close<'tcx>(
self,
communicate_allowed: bool,
ecx: &mut MiriInterpCx<'tcx>,
) -> InterpResult<'tcx, io::Result<()>> {
// Destroy this `Rc` using `into_inner` so we can call `close` instead of
// implicitly running the destructor of the file description.
let id = self.get_id();
match Rc::into_inner(self.0) {
Some(fd) => {
// Remove entry from the global epoll_event_interest table.
ecx.machine.epoll_interests.remove(id);
fd.file_description.close(communicate_allowed, ecx)
}
None => interp_ok(Ok(())),
}
}
pub fn downgrade(&self) -> WeakFileDescriptionRef {
WeakFileDescriptionRef { weak_ref: Rc::downgrade(&self.0) }
}
pub fn get_id(&self) -> FdId {
self.0.id
}
}
/// Holds a weak reference to the actual file description.
#[derive(Clone, Debug, Default)]
pub struct WeakFileDescriptionRef {
weak_ref: Weak<FileDescWithId<dyn FileDescription>>,
}
impl WeakFileDescriptionRef {
pub fn upgrade(&self) -> Option<FileDescriptionRef> {
if let Some(file_desc_with_id) = self.weak_ref.upgrade() {
return Some(FileDescriptionRef(file_desc_with_id));
}
None
}
}
impl VisitProvenance for WeakFileDescriptionRef {
fn visit_provenance(&self, _visit: &mut VisitWith<'_>) {
// A weak reference can never be the only reference to some pointer or place.
// Since the actual file description is tracked by strong ref somewhere,
// it is ok to make this a NOP operation.
}
}
/// A unique id for file descriptions. While we could use the address, considering that
/// is definitely unique, the address would expose interpreter internal state when used
/// for sorting things. So instead we generate a unique id per file description that stays
/// the same even if a file descriptor is duplicated and gets a new integer file descriptor.
#[derive(Debug, Copy, Clone, Default, Eq, PartialEq, Ord, PartialOrd)]
pub struct FdId(usize);
/// The file descriptor table
#[derive(Debug)]
pub struct FdTable {
pub fds: BTreeMap<i32, FileDescriptionRef>,
/// Unique identifier for file description, used to differentiate between various file description.
next_file_description_id: FdId,
}
impl VisitProvenance for FdTable {
fn visit_provenance(&self, _visit: &mut VisitWith<'_>) {
// All our FileDescription instances do not have any tags.
}
}
impl FdTable {
fn new() -> Self {
FdTable { fds: BTreeMap::new(), next_file_description_id: FdId(0) }
}
pub(crate) fn init(mute_stdout_stderr: bool) -> FdTable {
let mut fds = FdTable::new();
fds.insert_new(io::stdin());
if mute_stdout_stderr {
assert_eq!(fds.insert_new(NullOutput), 1);
assert_eq!(fds.insert_new(NullOutput), 2);
} else {
assert_eq!(fds.insert_new(io::stdout()), 1);
assert_eq!(fds.insert_new(io::stderr()), 2);
}
fds
}
pub fn new_ref(&mut self, fd: impl FileDescription) -> FileDescriptionRef {
let file_handle = FileDescriptionRef::new(fd, self.next_file_description_id);
self.next_file_description_id = FdId(self.next_file_description_id.0.strict_add(1));
file_handle
}
/// Insert a new file description to the FdTable.
pub fn insert_new(&mut self, fd: impl FileDescription) -> i32 {
let fd_ref = self.new_ref(fd);
self.insert(fd_ref)
}
pub fn insert(&mut self, fd_ref: FileDescriptionRef) -> i32 {
self.insert_with_min_num(fd_ref, 0)
}
/// Insert a file description, giving it a file descriptor that is at least `min_fd_num`.
fn insert_with_min_num(&mut self, file_handle: FileDescriptionRef, min_fd_num: i32) -> i32 {
// Find the lowest unused FD, starting from min_fd. If the first such unused FD is in
// between used FDs, the find_map combinator will return it. If the first such unused FD
// is after all other used FDs, the find_map combinator will return None, and we will use
// the FD following the greatest FD thus far.
let candidate_new_fd =
self.fds.range(min_fd_num..).zip(min_fd_num..).find_map(|((fd_num, _fd), counter)| {
if *fd_num != counter {
// There was a gap in the fds stored, return the first unused one
// (note that this relies on BTreeMap iterating in key order)
Some(counter)
} else {
// This fd is used, keep going
None
}
});
let new_fd_num = candidate_new_fd.unwrap_or_else(|| {
// find_map ran out of BTreeMap entries before finding a free fd, use one plus the
// maximum fd in the map
self.fds.last_key_value().map(|(fd_num, _)| fd_num.strict_add(1)).unwrap_or(min_fd_num)
});
self.fds.try_insert(new_fd_num, file_handle).unwrap();
new_fd_num
}
pub fn get(&self, fd_num: i32) -> Option<FileDescriptionRef> {
let fd = self.fds.get(&fd_num)?;
Some(fd.clone())
}
pub fn remove(&mut self, fd_num: i32) -> Option<FileDescriptionRef> {
self.fds.remove(&fd_num)
}
pub fn is_fd_num(&self, fd_num: i32) -> bool {
self.fds.contains_key(&fd_num)
}
}
impl<'tcx> EvalContextExt<'tcx> for crate::MiriInterpCx<'tcx> {}
pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
fn dup(&mut self, old_fd_num: i32) -> InterpResult<'tcx, Scalar> {
@ -472,7 +121,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
throw_unsup_format!("unsupported flags {:#x}", op);
};
let result = fd.flock(this.machine.communicate(), parsed_op)?;
let result = fd.as_unix().flock(this.machine.communicate(), parsed_op)?;
drop(fd);
// return `0` if flock is successful
let result = result.map(|()| 0i32);
@ -602,7 +251,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
let Ok(offset) = u64::try_from(offset) else {
return this.set_last_error_and_return(LibcError("EINVAL"), dest);
};
fd.pread(communicate, offset, buf, count, dest, this)?
fd.as_unix().pread(communicate, offset, buf, count, dest, this)?
}
};
interp_ok(())
@ -642,46 +291,9 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
let Ok(offset) = u64::try_from(offset) else {
return this.set_last_error_and_return(LibcError("EINVAL"), dest);
};
fd.pwrite(communicate, buf, count, offset, dest, this)?
fd.as_unix().pwrite(communicate, buf, count, offset, dest, this)?
}
};
interp_ok(())
}
/// Helper to implement `FileDescription::read`:
/// This is only used when `read` is successful.
/// `actual_read_size` should be the return value of some underlying `read` call that used
/// `bytes` as its output buffer.
/// The length of `bytes` must not exceed either the host's or the target's `isize`.
/// `bytes` is written to `buf` and the size is written to `dest`.
fn return_read_success(
&mut self,
buf: Pointer,
bytes: &[u8],
actual_read_size: usize,
dest: &MPlaceTy<'tcx>,
) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
// If reading to `bytes` did not fail, we write those bytes to the buffer.
// Crucially, if fewer than `bytes.len()` bytes were read, only write
// that much into the output buffer!
this.write_bytes_ptr(buf, bytes[..actual_read_size].iter().copied())?;
// The actual read size is always less than what got originally requested so this cannot fail.
this.write_int(u64::try_from(actual_read_size).unwrap(), dest)?;
interp_ok(())
}
/// Helper to implement `FileDescription::write`:
/// This function is only used when `write` is successful, and writes `actual_write_size` to `dest`
fn return_write_success(
&mut self,
actual_write_size: usize,
dest: &MPlaceTy<'tcx>,
) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
// The actual write size is always less than what got originally requested so this cannot fail.
this.write_int(u64::try_from(actual_write_size).unwrap(), dest)?;
interp_ok(())
}
}

View file

@ -39,6 +39,64 @@ pub fn is_dyn_sym(name: &str, target_os: &str) -> bool {
impl<'tcx> EvalContextExt<'tcx> for crate::MiriInterpCx<'tcx> {}
pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
// Querying system information
fn sysconf(&mut self, val: &OpTy<'tcx>) -> InterpResult<'tcx, Scalar> {
let this = self.eval_context_mut();
let name = this.read_scalar(val)?.to_i32()?;
// FIXME: Which of these are POSIX, and which are GNU/Linux?
// At least the names seem to all also exist on macOS.
let sysconfs: &[(&str, fn(&MiriInterpCx<'_>) -> Scalar)] = &[
("_SC_PAGESIZE", |this| Scalar::from_int(this.machine.page_size, this.pointer_size())),
("_SC_PAGE_SIZE", |this| Scalar::from_int(this.machine.page_size, this.pointer_size())),
("_SC_NPROCESSORS_CONF", |this| {
Scalar::from_int(this.machine.num_cpus, this.pointer_size())
}),
("_SC_NPROCESSORS_ONLN", |this| {
Scalar::from_int(this.machine.num_cpus, this.pointer_size())
}),
// 512 seems to be a reasonable default. The value is not critical, in
// the sense that getpwuid_r takes and checks the buffer length.
("_SC_GETPW_R_SIZE_MAX", |this| Scalar::from_int(512, this.pointer_size())),
// Miri doesn't have a fixed limit on FDs, but we may be limited in terms of how
// many *host* FDs we can open. Just use some arbitrary, pretty big value;
// this can be adjusted if it causes problems.
// The spec imposes a minimum of `_POSIX_OPEN_MAX` (20).
("_SC_OPEN_MAX", |this| Scalar::from_int(2_i32.pow(16), this.pointer_size())),
];
for &(sysconf_name, value) in sysconfs {
let sysconf_name = this.eval_libc_i32(sysconf_name);
if sysconf_name == name {
return interp_ok(value(this));
}
}
throw_unsup_format!("unimplemented sysconf name: {}", name)
}
fn strerror_r(
&mut self,
errnum: &OpTy<'tcx>,
buf: &OpTy<'tcx>,
buflen: &OpTy<'tcx>,
) -> InterpResult<'tcx, Scalar> {
let this = self.eval_context_mut();
let errnum = this.read_scalar(errnum)?;
let buf = this.read_pointer(buf)?;
let buflen = this.read_target_usize(buflen)?;
let error = this.try_errnum_to_io_error(errnum)?;
let formatted = match error {
Some(err) => format!("{err}"),
None => format!("<unknown errnum in strerror_r: {errnum}>"),
};
let (complete, _) = this.write_os_str_to_c_str(OsStr::new(&formatted), buf, buflen)?;
if complete {
interp_ok(Scalar::from_i32(0))
} else {
interp_ok(Scalar::from_i32(this.eval_libc_i32("ERANGE")))
}
}
fn emulate_foreign_item_inner(
&mut self,
link_name: Symbol,
@ -84,6 +142,13 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
this.write_scalar(result, dest)?;
}
"sysconf" => {
let [val] =
this.check_shim(abi, ExternAbi::C { unwind: false }, link_name, args)?;
let result = this.sysconf(val)?;
this.write_scalar(result, dest)?;
}
// File descriptors
"read" => {
let [fd, buf, count] = this.check_shim(abi, ExternAbi::C { unwind: false }, link_name, args)?;
@ -393,35 +458,6 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
}
}
// Querying system information
"sysconf" => {
let [name] = this.check_shim(abi, ExternAbi::C { unwind: false }, link_name, args)?;
let name = this.read_scalar(name)?.to_i32()?;
// FIXME: Which of these are POSIX, and which are GNU/Linux?
// At least the names seem to all also exist on macOS.
let sysconfs: &[(&str, fn(&MiriInterpCx<'_>) -> Scalar)] = &[
("_SC_PAGESIZE", |this| Scalar::from_int(this.machine.page_size, this.pointer_size())),
("_SC_NPROCESSORS_CONF", |this| Scalar::from_int(this.machine.num_cpus, this.pointer_size())),
("_SC_NPROCESSORS_ONLN", |this| Scalar::from_int(this.machine.num_cpus, this.pointer_size())),
// 512 seems to be a reasonable default. The value is not critical, in
// the sense that getpwuid_r takes and checks the buffer length.
("_SC_GETPW_R_SIZE_MAX", |this| Scalar::from_int(512, this.pointer_size()))
];
let mut result = None;
for &(sysconf_name, value) in sysconfs {
let sysconf_name = this.eval_libc_i32(sysconf_name);
if sysconf_name == name {
result = Some(value(this));
break;
}
}
if let Some(result) = result {
this.write_scalar(result, dest)?;
} else {
throw_unsup_format!("unimplemented sysconf name: {}", name)
}
}
// Thread-local storage
"pthread_key_create" => {
let [key, dtor] = this.check_shim(abi, ExternAbi::C { unwind: false }, link_name, args)?;
@ -724,21 +760,6 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
// We do not support forking, so there is nothing to do here.
this.write_null(dest)?;
}
"strerror_r" | "__xpg_strerror_r" => {
let [errnum, buf, buflen] = this.check_shim(abi, ExternAbi::C { unwind: false }, link_name, args)?;
let errnum = this.read_scalar(errnum)?;
let buf = this.read_pointer(buf)?;
let buflen = this.read_target_usize(buflen)?;
let error = this.try_errnum_to_io_error(errnum)?;
let formatted = match error {
Some(err) => format!("{err}"),
None => format!("<unknown errnum in strerror_r: {errnum}>"),
};
let (complete, _) = this.write_os_str_to_c_str(OsStr::new(&formatted), buf, buflen)?;
let ret = if complete { 0 } else { this.eval_libc_i32("ERANGE") };
this.write_int(ret, dest)?;
}
"getentropy" => {
// This function is non-standard but exists with the same signature and behavior on
// Linux, macOS, FreeBSD and Solaris/Illumos.
@ -766,6 +787,14 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
this.write_null(dest)?;
}
}
"strerror_r" => {
let [errnum, buf, buflen] =
this.check_shim(abi, ExternAbi::C { unwind: false }, link_name, args)?;
let result = this.strerror_r(errnum, buf, buflen)?;
this.write_scalar(result, dest)?;
}
"getrandom" => {
// This function is non-standard but exists with the same signature and behavior on
// Linux, FreeBSD and Solaris/Illumos.

View file

@ -53,19 +53,19 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
"stat" | "stat@FBSD_1.0" => {
let [path, buf] =
this.check_shim(abi, ExternAbi::C { unwind: false }, link_name, args)?;
let result = this.macos_fbsd_stat(path, buf)?;
let result = this.macos_fbsd_solaris_stat(path, buf)?;
this.write_scalar(result, dest)?;
}
"lstat" | "lstat@FBSD_1.0" => {
let [path, buf] =
this.check_shim(abi, ExternAbi::C { unwind: false }, link_name, args)?;
let result = this.macos_fbsd_lstat(path, buf)?;
let result = this.macos_fbsd_solaris_lstat(path, buf)?;
this.write_scalar(result, dest)?;
}
"fstat" | "fstat@FBSD_1.0" => {
let [fd, buf] =
this.check_shim(abi, ExternAbi::C { unwind: false }, link_name, args)?;
let result = this.macos_fbsd_fstat(fd, buf)?;
let result = this.macos_fbsd_solaris_fstat(fd, buf)?;
this.write_scalar(result, dest)?;
}
"readdir_r" | "readdir_r@FBSD_1.0" => {

View file

@ -2,7 +2,8 @@
use std::borrow::Cow;
use std::fs::{
DirBuilder, File, FileType, OpenOptions, ReadDir, read_dir, remove_dir, remove_file, rename,
DirBuilder, File, FileType, Metadata, OpenOptions, ReadDir, read_dir, remove_dir, remove_file,
rename,
};
use std::io::{self, ErrorKind, IsTerminal, Read, Seek, SeekFrom, Write};
use std::path::{Path, PathBuf};
@ -11,12 +12,11 @@ use std::time::SystemTime;
use rustc_abi::Size;
use rustc_data_structures::fx::FxHashMap;
use self::fd::FlockOp;
use self::shims::time::system_time_to_duration;
use crate::helpers::check_min_arg_count;
use crate::shims::files::{EvalContextExt as _, FileDescription, FileDescriptionRef};
use crate::shims::os_str::bytes_to_os_str;
use crate::shims::unix::fd::FileDescriptionRef;
use crate::shims::unix::*;
use crate::shims::unix::fd::{FlockOp, UnixFileDescription};
use crate::*;
#[derive(Debug)]
@ -66,6 +66,55 @@ impl FileDescription for FileHandle {
}
}
fn seek<'tcx>(
&self,
communicate_allowed: bool,
offset: SeekFrom,
) -> InterpResult<'tcx, io::Result<u64>> {
assert!(communicate_allowed, "isolation should have prevented even opening a file");
interp_ok((&mut &self.file).seek(offset))
}
fn close<'tcx>(
self: Box<Self>,
communicate_allowed: bool,
_ecx: &mut MiriInterpCx<'tcx>,
) -> InterpResult<'tcx, io::Result<()>> {
assert!(communicate_allowed, "isolation should have prevented even opening a file");
// We sync the file if it was opened in a mode different than read-only.
if self.writable {
// `File::sync_all` does the checks that are done when closing a file. We do this to
// to handle possible errors correctly.
let result = self.file.sync_all();
// Now we actually close the file and return the result.
drop(*self);
interp_ok(result)
} else {
// We drop the file, this closes it but ignores any errors
// produced when closing it. This is done because
// `File::sync_all` cannot be done over files like
// `/dev/urandom` which are read-only. Check
// https://github.com/rust-lang/miri/issues/999#issuecomment-568920439
// for a deeper discussion.
drop(*self);
interp_ok(Ok(()))
}
}
fn metadata<'tcx>(&self) -> InterpResult<'tcx, io::Result<Metadata>> {
interp_ok(self.file.metadata())
}
fn is_tty(&self, communicate_allowed: bool) -> bool {
communicate_allowed && self.file.is_terminal()
}
fn as_unix(&self) -> &dyn UnixFileDescription {
self
}
}
impl UnixFileDescription for FileHandle {
fn pread<'tcx>(
&self,
communicate_allowed: bool,
@ -128,41 +177,6 @@ impl FileDescription for FileHandle {
}
}
fn seek<'tcx>(
&self,
communicate_allowed: bool,
offset: SeekFrom,
) -> InterpResult<'tcx, io::Result<u64>> {
assert!(communicate_allowed, "isolation should have prevented even opening a file");
interp_ok((&mut &self.file).seek(offset))
}
fn close<'tcx>(
self: Box<Self>,
communicate_allowed: bool,
_ecx: &mut MiriInterpCx<'tcx>,
) -> InterpResult<'tcx, io::Result<()>> {
assert!(communicate_allowed, "isolation should have prevented even opening a file");
// We sync the file if it was opened in a mode different than read-only.
if self.writable {
// `File::sync_all` does the checks that are done when closing a file. We do this to
// to handle possible errors correctly.
let result = self.file.sync_all();
// Now we actually close the file and return the result.
drop(*self);
interp_ok(result)
} else {
// We drop the file, this closes it but ignores any errors
// produced when closing it. This is done because
// `File::sync_all` cannot be done over files like
// `/dev/urandom` which are read-only. Check
// https://github.com/rust-lang/miri/issues/999#issuecomment-568920439
// for a deeper discussion.
drop(*self);
interp_ok(Ok(()))
}
}
fn flock<'tcx>(
&self,
communicate_allowed: bool,
@ -257,55 +271,63 @@ impl FileDescription for FileHandle {
compile_error!("flock is supported only on UNIX and Windows hosts");
}
}
fn is_tty(&self, communicate_allowed: bool) -> bool {
communicate_allowed && self.file.is_terminal()
}
}
impl<'tcx> EvalContextExtPrivate<'tcx> for crate::MiriInterpCx<'tcx> {}
trait EvalContextExtPrivate<'tcx>: crate::MiriInterpCxExt<'tcx> {
fn macos_stat_write_buf(
fn macos_fbsd_solaris_write_buf(
&mut self,
metadata: FileMetadata,
buf_op: &OpTy<'tcx>,
) -> InterpResult<'tcx, i32> {
let this = self.eval_context_mut();
let mode: u16 = metadata.mode.to_u16()?;
let (access_sec, access_nsec) = metadata.accessed.unwrap_or((0, 0));
let (created_sec, created_nsec) = metadata.created.unwrap_or((0, 0));
let (modified_sec, modified_nsec) = metadata.modified.unwrap_or((0, 0));
let mode = metadata.mode.to_uint(this.libc_ty_layout("mode_t").size)?;
let buf = this.deref_pointer_as(buf_op, this.libc_ty_layout("stat"))?;
this.write_int_fields_named(
&[
("st_dev", 0),
("st_mode", mode.into()),
("st_mode", mode.try_into().unwrap()),
("st_nlink", 0),
("st_ino", 0),
("st_uid", 0),
("st_gid", 0),
("st_rdev", 0),
("st_atime", access_sec.into()),
("st_atime_nsec", access_nsec.into()),
("st_mtime", modified_sec.into()),
("st_mtime_nsec", modified_nsec.into()),
("st_ctime", 0),
("st_ctime_nsec", 0),
("st_birthtime", created_sec.into()),
("st_birthtime_nsec", created_nsec.into()),
("st_size", metadata.size.into()),
("st_blocks", 0),
("st_blksize", 0),
("st_flags", 0),
("st_gen", 0),
],
&buf,
)?;
if matches!(&*this.tcx.sess.target.os, "macos" | "freebsd") {
this.write_int_fields_named(
&[
("st_atime_nsec", access_nsec.into()),
("st_mtime_nsec", modified_nsec.into()),
("st_ctime_nsec", 0),
("st_birthtime", created_sec.into()),
("st_birthtime_nsec", created_nsec.into()),
("st_flags", 0),
("st_gen", 0),
],
&buf,
)?;
}
if matches!(&*this.tcx.sess.target.os, "solaris" | "illumos") {
// FIXME: write st_fstype field once libc is updated.
// https://github.com/rust-lang/libc/pull/4145
//this.write_int_fields_named(&[("st_fstype", 0)], &buf)?;
}
interp_ok(0)
}
@ -648,15 +670,15 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
interp_ok(Scalar::from_i32(this.try_unwrap_io_result(result)?))
}
fn macos_fbsd_stat(
fn macos_fbsd_solaris_stat(
&mut self,
path_op: &OpTy<'tcx>,
buf_op: &OpTy<'tcx>,
) -> InterpResult<'tcx, Scalar> {
let this = self.eval_context_mut();
if !matches!(&*this.tcx.sess.target.os, "macos" | "freebsd") {
panic!("`macos_fbsd_stat` should not be called on {}", this.tcx.sess.target.os);
if !matches!(&*this.tcx.sess.target.os, "macos" | "freebsd" | "solaris" | "illumos") {
panic!("`macos_fbsd_solaris_stat` should not be called on {}", this.tcx.sess.target.os);
}
let path_scalar = this.read_pointer(path_op)?;
@ -674,19 +696,22 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
Err(err) => return this.set_last_error_and_return_i32(err),
};
interp_ok(Scalar::from_i32(this.macos_stat_write_buf(metadata, buf_op)?))
interp_ok(Scalar::from_i32(this.macos_fbsd_solaris_write_buf(metadata, buf_op)?))
}
// `lstat` is used to get symlink metadata.
fn macos_fbsd_lstat(
fn macos_fbsd_solaris_lstat(
&mut self,
path_op: &OpTy<'tcx>,
buf_op: &OpTy<'tcx>,
) -> InterpResult<'tcx, Scalar> {
let this = self.eval_context_mut();
if !matches!(&*this.tcx.sess.target.os, "macos" | "freebsd") {
panic!("`macos_fbsd_lstat` should not be called on {}", this.tcx.sess.target.os);
if !matches!(&*this.tcx.sess.target.os, "macos" | "freebsd" | "solaris" | "illumos") {
panic!(
"`macos_fbsd_solaris_lstat` should not be called on {}",
this.tcx.sess.target.os
);
}
let path_scalar = this.read_pointer(path_op)?;
@ -703,18 +728,21 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
Err(err) => return this.set_last_error_and_return_i32(err),
};
interp_ok(Scalar::from_i32(this.macos_stat_write_buf(metadata, buf_op)?))
interp_ok(Scalar::from_i32(this.macos_fbsd_solaris_write_buf(metadata, buf_op)?))
}
fn macos_fbsd_fstat(
fn macos_fbsd_solaris_fstat(
&mut self,
fd_op: &OpTy<'tcx>,
buf_op: &OpTy<'tcx>,
) -> InterpResult<'tcx, Scalar> {
let this = self.eval_context_mut();
if !matches!(&*this.tcx.sess.target.os, "macos" | "freebsd") {
panic!("`macos_fbsd_fstat` should not be called on {}", this.tcx.sess.target.os);
if !matches!(&*this.tcx.sess.target.os, "macos" | "freebsd" | "solaris" | "illumos") {
panic!(
"`macos_fbsd_solaris_fstat` should not be called on {}",
this.tcx.sess.target.os
);
}
let fd = this.read_scalar(fd_op)?.to_i32()?;
@ -730,7 +758,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
Ok(metadata) => metadata,
Err(err) => return this.set_last_error_and_return_i32(err),
};
interp_ok(Scalar::from_i32(this.macos_stat_write_buf(metadata, buf_op)?))
interp_ok(Scalar::from_i32(this.macos_fbsd_solaris_write_buf(metadata, buf_op)?))
}
fn linux_statx(
@ -1675,16 +1703,7 @@ impl FileMetadata {
return interp_ok(Err(LibcError("EBADF")));
};
let file = &fd
.downcast::<FileHandle>()
.ok_or_else(|| {
err_unsup_format!(
"obtaining metadata is only supported on file-backed file descriptors"
)
})?
.file;
let metadata = file.metadata();
let metadata = fd.metadata()?;
drop(fd);
FileMetadata::from_meta(ecx, metadata)
}

View file

@ -1,11 +1,12 @@
use rustc_abi::ExternAbi;
use rustc_span::Symbol;
use self::shims::unix::linux::epoll::EvalContextExt as _;
use self::shims::unix::linux::eventfd::EvalContextExt as _;
use self::shims::unix::linux::mem::EvalContextExt as _;
use self::shims::unix::linux::syscall::syscall;
use self::shims::unix::linux_like::epoll::EvalContextExt as _;
use self::shims::unix::linux_like::eventfd::EvalContextExt as _;
use self::shims::unix::linux_like::syscall::syscall;
use crate::machine::{SIGRTMAX, SIGRTMIN};
use crate::shims::unix::foreign_items::EvalContextExt as _;
use crate::shims::unix::*;
use crate::*;
@ -143,6 +144,12 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
let ptr = this.mremap(old_address, old_size, new_size, flags)?;
this.write_scalar(ptr, dest)?;
}
"__xpg_strerror_r" => {
let [errnum, buf, buflen] =
this.check_shim(abi, ExternAbi::C { unwind: false }, link_name, args)?;
let result = this.strerror_r(errnum, buf, buflen)?;
this.write_scalar(result, dest)?;
}
"__errno_location" => {
let [] = this.check_shim(abi, ExternAbi::C { unwind: false }, link_name, args)?;
let errno_place = this.last_error_place()?;

View file

@ -1,6 +1,2 @@
pub mod epoll;
pub mod eventfd;
pub mod foreign_items;
pub mod mem;
pub mod sync;
pub mod syscall;

View file

@ -5,8 +5,8 @@ use std::rc::{Rc, Weak};
use std::time::Duration;
use crate::concurrency::VClock;
use crate::shims::unix::fd::{FdId, FileDescriptionRef, WeakFileDescriptionRef};
use crate::shims::unix::*;
use crate::shims::files::{FdId, FileDescription, FileDescriptionRef, WeakFileDescriptionRef};
use crate::shims::unix::UnixFileDescription;
use crate::*;
/// An `Epoll` file descriptor connects file handles and epoll events
@ -152,8 +152,14 @@ impl FileDescription for Epoll {
) -> InterpResult<'tcx, io::Result<()>> {
interp_ok(Ok(()))
}
fn as_unix(&self) -> &dyn UnixFileDescription {
self
}
}
impl UnixFileDescription for Epoll {}
/// The table of all EpollEventInterest.
/// The BTreeMap key is the FdId of an active file description registered with
/// any epoll instance. The value is a list of EpollEventInterest associated
@ -539,7 +545,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
)?;
if is_updated {
// Edge-triggered notification only notify one thread even if there are
// multiple threads block on the same epfd.
// multiple threads blocked on the same epfd.
// This unwrap can never fail because if the current epoll instance were
// closed, the upgrade of weak_epoll_interest
@ -595,7 +601,7 @@ fn check_and_update_one_event_interest<'tcx>(
ecx: &MiriInterpCx<'tcx>,
) -> InterpResult<'tcx, bool> {
// Get the bitmask of ready events for a file description.
let ready_events_bitmask = fd_ref.get_epoll_ready_events()?.get_event_bitmask(ecx);
let ready_events_bitmask = fd_ref.as_unix().get_epoll_ready_events()?.get_event_bitmask(ecx);
let epoll_event_interest = interest.borrow();
// This checks if any of the events specified in epoll_event_interest.events
// match those in ready_events.

View file

@ -4,9 +4,9 @@ use std::io;
use std::io::ErrorKind;
use crate::concurrency::VClock;
use crate::shims::unix::fd::{FileDescriptionRef, WeakFileDescriptionRef};
use crate::shims::unix::linux::epoll::{EpollReadyEvents, EvalContextExt as _};
use crate::shims::unix::*;
use crate::shims::files::{FileDescription, FileDescriptionRef, WeakFileDescriptionRef};
use crate::shims::unix::UnixFileDescription;
use crate::shims::unix::linux_like::epoll::{EpollReadyEvents, EvalContextExt as _};
use crate::*;
/// Maximum value that the eventfd counter can hold.
@ -37,17 +37,6 @@ impl FileDescription for Event {
"event"
}
fn get_epoll_ready_events<'tcx>(&self) -> InterpResult<'tcx, EpollReadyEvents> {
// We only check the status of EPOLLIN and EPOLLOUT flags for eventfd. If other event flags
// need to be supported in the future, the check should be added here.
interp_ok(EpollReadyEvents {
epollin: self.counter.get() != 0,
epollout: self.counter.get() != MAX_COUNTER,
..EpollReadyEvents::new()
})
}
fn close<'tcx>(
self: Box<Self>,
_communicate_allowed: bool,
@ -121,6 +110,23 @@ impl FileDescription for Event {
let weak_eventfd = self_ref.downgrade();
eventfd_write(num, buf_place, dest, weak_eventfd, ecx)
}
fn as_unix(&self) -> &dyn UnixFileDescription {
self
}
}
impl UnixFileDescription for Event {
fn get_epoll_ready_events<'tcx>(&self) -> InterpResult<'tcx, EpollReadyEvents> {
// We only check the status of EPOLLIN and EPOLLOUT flags for eventfd. If other event flags
// need to be supported in the future, the check should be added here.
interp_ok(EpollReadyEvents {
epollin: self.counter.get() != 0,
epollout: self.counter.get() != MAX_COUNTER,
..EpollReadyEvents::new()
})
}
}
impl<'tcx> EvalContextExt<'tcx> for crate::MiriInterpCx<'tcx> {}
@ -144,9 +150,6 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
fn eventfd(&mut self, val: &OpTy<'tcx>, flags: &OpTy<'tcx>) -> InterpResult<'tcx, Scalar> {
let this = self.eval_context_mut();
// eventfd is Linux specific.
this.assert_target_os("linux", "eventfd");
let val = this.read_scalar(val)?.to_u32()?;
let mut flags = this.read_scalar(flags)?.to_i32()?;
@ -211,10 +214,10 @@ fn eventfd_write<'tcx>(
eventfd.clock.borrow_mut().join(clock);
});
// When this function is called, the addition is guaranteed to not exceed u64::MAX - 1.
// Store new counter value.
eventfd.counter.set(new_count);
// When any of the event happened, we check and update the status of all supported event
// The state changed; we check and update the status of all supported event
// types for current file description.
ecx.check_and_update_readiness(&eventfd_ref)?;
@ -228,10 +231,11 @@ fn eventfd_write<'tcx>(
ecx.unblock_thread(thread_id, BlockReason::Eventfd)?;
}
// Return how many bytes we wrote.
// Return how many bytes we consumed from the user-provided buffer.
return ecx.write_int(buf_place.layout.size.bytes(), dest);
}
None | Some(u64::MAX) => {
// We can't update the state, so we have to block.
if eventfd.is_nonblock {
return ecx.set_last_error_and_return(ErrorKind::WouldBlock, dest);
}
@ -251,6 +255,7 @@ fn eventfd_write<'tcx>(
weak_eventfd: WeakFileDescriptionRef,
}
@unblock = |this| {
// When we get unblocked, try again.
eventfd_write(num, buf_place, &dest, weak_eventfd, this)
}
),
@ -276,9 +281,10 @@ fn eventfd_read<'tcx>(
// an eventfd file description.
let eventfd = eventfd_ref.downcast::<Event>().unwrap();
// Block when counter == 0.
// Set counter to 0, get old value.
let counter = eventfd.counter.replace(0);
// Block when counter == 0.
if counter == 0 {
if eventfd.is_nonblock {
return ecx.set_last_error_and_return(ErrorKind::WouldBlock, dest);
@ -297,6 +303,7 @@ fn eventfd_read<'tcx>(
weak_eventfd: WeakFileDescriptionRef,
}
@unblock = |this| {
// When we get unblocked, try again.
eventfd_read(buf_place, &dest, weak_eventfd, this)
}
),
@ -305,10 +312,10 @@ fn eventfd_read<'tcx>(
// Synchronize with all prior `write` calls to this FD.
ecx.acquire_clock(&eventfd.clock.borrow());
// Give old counter value to userspace, and set counter value to 0.
// Return old counter value into user-space buffer.
ecx.write_int(counter, &buf_place)?;
// When any of the events happened, we check and update the status of all supported event
// The state changed; we check and update the status of all supported event
// types for current file description.
ecx.check_and_update_readiness(&eventfd_ref)?;
@ -322,7 +329,7 @@ fn eventfd_read<'tcx>(
ecx.unblock_thread(thread_id, BlockReason::Eventfd)?;
}
// Tell userspace how many bytes we read.
// Tell userspace how many bytes we put into the buffer.
return ecx.write_int(buf_place.layout.size.bytes(), dest);
}
interp_ok(())

View file

@ -0,0 +1,4 @@
pub mod epoll;
pub mod eventfd;
pub mod sync;
pub mod syscall;

View file

@ -1,9 +1,9 @@
use rustc_abi::ExternAbi;
use rustc_span::Symbol;
use self::shims::unix::linux::eventfd::EvalContextExt as _;
use crate::helpers::check_min_arg_count;
use crate::shims::unix::linux::sync::futex;
use crate::shims::unix::linux_like::eventfd::EvalContextExt as _;
use crate::shims::unix::linux_like::sync::futex;
use crate::*;
pub fn syscall<'tcx>(

View file

@ -40,19 +40,19 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
"stat" | "stat64" | "stat$INODE64" => {
let [path, buf] =
this.check_shim(abi, ExternAbi::C { unwind: false }, link_name, args)?;
let result = this.macos_fbsd_stat(path, buf)?;
let result = this.macos_fbsd_solaris_stat(path, buf)?;
this.write_scalar(result, dest)?;
}
"lstat" | "lstat64" | "lstat$INODE64" => {
let [path, buf] =
this.check_shim(abi, ExternAbi::C { unwind: false }, link_name, args)?;
let result = this.macos_fbsd_lstat(path, buf)?;
let result = this.macos_fbsd_solaris_lstat(path, buf)?;
this.write_scalar(result, dest)?;
}
"fstat" | "fstat64" | "fstat$INODE64" => {
let [fd, buf] =
this.check_shim(abi, ExternAbi::C { unwind: false }, link_name, args)?;
let result = this.macos_fbsd_fstat(fd, buf)?;
let result = this.macos_fbsd_solaris_fstat(fd, buf)?;
this.write_scalar(result, dest)?;
}
"opendir$INODE64" => {

View file

@ -10,15 +10,16 @@ mod unnamed_socket;
mod android;
mod freebsd;
mod linux;
pub mod linux;
mod linux_like;
mod macos;
mod solarish;
// All the Unix-specific extension traits
pub use self::env::{EvalContextExt as _, UnixEnvVars};
pub use self::fd::{EvalContextExt as _, FdTable, FileDescription};
pub use self::fd::{EvalContextExt as _, UnixFileDescription};
pub use self::fs::{DirTable, EvalContextExt as _};
pub use self::linux::epoll::EpollInterestTable;
pub use self::linux_like::epoll::EpollInterestTable;
pub use self::mem::EvalContextExt as _;
pub use self::sync::EvalContextExt as _;
pub use self::thread::{EvalContextExt as _, ThreadNameResult};

View file

@ -1,6 +1,7 @@
use rustc_abi::ExternAbi;
use rustc_span::Symbol;
use crate::shims::unix::foreign_items::EvalContextExt as _;
use crate::shims::unix::*;
use crate::*;
@ -56,6 +57,26 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
this.write_scalar(res, dest)?;
}
// File related shims
"stat" | "stat64" => {
let [path, buf] =
this.check_shim(abi, ExternAbi::C { unwind: false }, link_name, args)?;
let result = this.macos_fbsd_solaris_stat(path, buf)?;
this.write_scalar(result, dest)?;
}
"lstat" | "lstat64" => {
let [path, buf] =
this.check_shim(abi, ExternAbi::C { unwind: false }, link_name, args)?;
let result = this.macos_fbsd_solaris_lstat(path, buf)?;
this.write_scalar(result, dest)?;
}
"fstat" | "fstat64" => {
let [fd, buf] =
this.check_shim(abi, ExternAbi::C { unwind: false }, link_name, args)?;
let result = this.macos_fbsd_solaris_fstat(fd, buf)?;
this.write_scalar(result, dest)?;
}
// Miscellaneous
"___errno" => {
let [] = this.check_shim(abi, ExternAbi::C { unwind: false }, link_name, args)?;
@ -112,6 +133,13 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
this.write_null(dest)?;
}
"__sysconf_xpg7" => {
let [val] =
this.check_shim(abi, ExternAbi::C { unwind: false }, link_name, args)?;
let result = this.sysconf(val)?;
this.write_scalar(result, dest)?;
}
_ => return interp_ok(EmulateItemResult::NotSupported),
}
interp_ok(EmulateItemResult::NeedsReturn)

View file

@ -10,9 +10,11 @@ use std::io::{ErrorKind, Read};
use rustc_abi::Size;
use crate::concurrency::VClock;
use crate::shims::unix::fd::{FileDescriptionRef, WeakFileDescriptionRef};
use crate::shims::unix::linux::epoll::{EpollReadyEvents, EvalContextExt as _};
use crate::shims::unix::*;
use crate::shims::files::{
EvalContextExt as _, FileDescription, FileDescriptionRef, WeakFileDescriptionRef,
};
use crate::shims::unix::UnixFileDescription;
use crate::shims::unix::linux_like::epoll::{EpollReadyEvents, EvalContextExt as _};
use crate::*;
/// The maximum capacity of the socketpair buffer in bytes.
@ -60,6 +62,194 @@ impl FileDescription for AnonSocket {
"socketpair"
}
fn close<'tcx>(
self: Box<Self>,
_communicate_allowed: bool,
ecx: &mut MiriInterpCx<'tcx>,
) -> InterpResult<'tcx, io::Result<()>> {
if let Some(peer_fd) = self.peer_fd().upgrade() {
// If the current readbuf is non-empty when the file description is closed,
// notify the peer that data lost has happened in current file description.
if let Some(readbuf) = &self.readbuf {
if !readbuf.borrow().buf.is_empty() {
peer_fd.downcast::<AnonSocket>().unwrap().peer_lost_data.set(true);
}
}
// Notify peer fd that close has happened, since that can unblock reads and writes.
ecx.check_and_update_readiness(&peer_fd)?;
}
interp_ok(Ok(()))
}
fn read<'tcx>(
&self,
_self_ref: &FileDescriptionRef,
_communicate_allowed: bool,
ptr: Pointer,
len: usize,
dest: &MPlaceTy<'tcx>,
ecx: &mut MiriInterpCx<'tcx>,
) -> InterpResult<'tcx> {
// Always succeed on read size 0.
if len == 0 {
return ecx.return_read_success(ptr, &[], 0, dest);
}
let Some(readbuf) = &self.readbuf else {
// FIXME: This should return EBADF, but there's no nice way to do that as there's no
// corresponding ErrorKind variant.
throw_unsup_format!("reading from the write end of a pipe");
};
if readbuf.borrow().buf.is_empty() {
if self.peer_fd().upgrade().is_none() {
// Socketpair with no peer and empty buffer.
// 0 bytes successfully read indicates end-of-file.
return ecx.return_read_success(ptr, &[], 0, dest);
} else {
if self.is_nonblock {
// Non-blocking socketpair with writer and empty buffer.
// https://linux.die.net/man/2/read
// EAGAIN or EWOULDBLOCK can be returned for socket,
// POSIX.1-2001 allows either error to be returned for this case.
// Since there is no ErrorKind for EAGAIN, WouldBlock is used.
return ecx.set_last_error_and_return(ErrorKind::WouldBlock, dest);
} else {
// Blocking socketpair with writer and empty buffer.
// FIXME: blocking is currently not supported
throw_unsup_format!("socketpair/pipe/pipe2 read: blocking isn't supported yet");
}
}
}
// TODO: We might need to decide what to do if peer_fd is closed when read is blocked.
anonsocket_read(self, self.peer_fd().upgrade(), len, ptr, dest, ecx)
}
fn write<'tcx>(
&self,
_self_ref: &FileDescriptionRef,
_communicate_allowed: bool,
ptr: Pointer,
len: usize,
dest: &MPlaceTy<'tcx>,
ecx: &mut MiriInterpCx<'tcx>,
) -> InterpResult<'tcx> {
// Always succeed on write size 0.
// ("If count is zero and fd refers to a file other than a regular file, the results are not specified.")
if len == 0 {
return ecx.return_write_success(0, dest);
}
// We are writing to our peer's readbuf.
let Some(peer_fd) = self.peer_fd().upgrade() else {
// If the upgrade from Weak to Rc fails, it indicates that all read ends have been
// closed.
return ecx.set_last_error_and_return(ErrorKind::BrokenPipe, dest);
};
let Some(writebuf) = &peer_fd.downcast::<AnonSocket>().unwrap().readbuf else {
// FIXME: This should return EBADF, but there's no nice way to do that as there's no
// corresponding ErrorKind variant.
throw_unsup_format!("writing to the reading end of a pipe");
};
let available_space =
MAX_SOCKETPAIR_BUFFER_CAPACITY.strict_sub(writebuf.borrow().buf.len());
if available_space == 0 {
if self.is_nonblock {
// Non-blocking socketpair with a full buffer.
return ecx.set_last_error_and_return(ErrorKind::WouldBlock, dest);
} else {
// Blocking socketpair with a full buffer.
throw_unsup_format!("socketpair/pipe/pipe2 write: blocking isn't supported yet");
}
}
anonsocket_write(available_space, &peer_fd, ptr, len, dest, ecx)
}
fn as_unix(&self) -> &dyn UnixFileDescription {
self
}
}
/// Write to AnonSocket based on the space available and return the written byte size.
fn anonsocket_write<'tcx>(
available_space: usize,
peer_fd: &FileDescriptionRef,
ptr: Pointer,
len: usize,
dest: &MPlaceTy<'tcx>,
ecx: &mut MiriInterpCx<'tcx>,
) -> InterpResult<'tcx> {
let Some(writebuf) = &peer_fd.downcast::<AnonSocket>().unwrap().readbuf else {
// FIXME: This should return EBADF, but there's no nice way to do that as there's no
// corresponding ErrorKind variant.
throw_unsup_format!("writing to the reading end of a pipe")
};
let mut writebuf = writebuf.borrow_mut();
// Remember this clock so `read` can synchronize with us.
ecx.release_clock(|clock| {
writebuf.clock.join(clock);
});
// Do full write / partial write based on the space available.
let actual_write_size = len.min(available_space);
let bytes = ecx.read_bytes_ptr_strip_provenance(ptr, Size::from_bytes(len))?;
writebuf.buf.extend(&bytes[..actual_write_size]);
// Need to stop accessing peer_fd so that it can be notified.
drop(writebuf);
// Notification should be provided for peer fd as it became readable.
// The kernel does this even if the fd was already readable before, so we follow suit.
ecx.check_and_update_readiness(peer_fd)?;
ecx.return_write_success(actual_write_size, dest)
}
/// Read from AnonSocket and return the number of bytes read.
fn anonsocket_read<'tcx>(
anonsocket: &AnonSocket,
peer_fd: Option<FileDescriptionRef>,
len: usize,
ptr: Pointer,
dest: &MPlaceTy<'tcx>,
ecx: &mut MiriInterpCx<'tcx>,
) -> InterpResult<'tcx> {
let mut bytes = vec![0; len];
let Some(readbuf) = &anonsocket.readbuf else {
// FIXME: This should return EBADF, but there's no nice way to do that as there's no
// corresponding ErrorKind variant.
throw_unsup_format!("reading from the write end of a pipe")
};
let mut readbuf = readbuf.borrow_mut();
// Synchronize with all previous writes to this buffer.
// FIXME: this over-synchronizes; a more precise approach would be to
// only sync with the writes whose data we will read.
ecx.acquire_clock(&readbuf.clock);
// Do full read / partial read based on the space available.
// Conveniently, `read` exists on `VecDeque` and has exactly the desired behavior.
let actual_read_size = readbuf.buf.read(&mut bytes[..]).unwrap();
// Need to drop before others can access the readbuf again.
drop(readbuf);
// A notification should be provided for the peer file description even when it can
// only write 1 byte. This implementation is not compliant with the actual Linux kernel
// implementation. For optimization reasons, the kernel will only mark the file description
// as "writable" when it can write more than a certain number of bytes. Since we
// don't know what that *certain number* is, we will provide a notification every time
// a read is successful. This might result in our epoll emulation providing more
// notifications than the real system.
if let Some(peer_fd) = peer_fd {
ecx.check_and_update_readiness(&peer_fd)?;
}
ecx.return_read_success(ptr, &bytes, actual_read_size, dest)
}
impl UnixFileDescription for AnonSocket {
fn get_epoll_ready_events<'tcx>(&self) -> InterpResult<'tcx, EpollReadyEvents> {
// We only check the status of EPOLLIN, EPOLLOUT, EPOLLHUP and EPOLLRDHUP flags.
// If other event flags need to be supported in the future, the check should be added here.
@ -105,152 +295,6 @@ impl FileDescription for AnonSocket {
}
interp_ok(epoll_ready_events)
}
fn close<'tcx>(
self: Box<Self>,
_communicate_allowed: bool,
ecx: &mut MiriInterpCx<'tcx>,
) -> InterpResult<'tcx, io::Result<()>> {
if let Some(peer_fd) = self.peer_fd().upgrade() {
// If the current readbuf is non-empty when the file description is closed,
// notify the peer that data lost has happened in current file description.
if let Some(readbuf) = &self.readbuf {
if !readbuf.borrow().buf.is_empty() {
peer_fd.downcast::<AnonSocket>().unwrap().peer_lost_data.set(true);
}
}
// Notify peer fd that close has happened, since that can unblock reads and writes.
ecx.check_and_update_readiness(&peer_fd)?;
}
interp_ok(Ok(()))
}
fn read<'tcx>(
&self,
_self_ref: &FileDescriptionRef,
_communicate_allowed: bool,
ptr: Pointer,
len: usize,
dest: &MPlaceTy<'tcx>,
ecx: &mut MiriInterpCx<'tcx>,
) -> InterpResult<'tcx> {
let mut bytes = vec![0; len];
// Always succeed on read size 0.
if len == 0 {
return ecx.return_read_success(ptr, &bytes, 0, dest);
}
let Some(readbuf) = &self.readbuf else {
// FIXME: This should return EBADF, but there's no nice way to do that as there's no
// corresponding ErrorKind variant.
throw_unsup_format!("reading from the write end of a pipe");
};
let mut readbuf = readbuf.borrow_mut();
if readbuf.buf.is_empty() {
if self.peer_fd().upgrade().is_none() {
// Socketpair with no peer and empty buffer.
// 0 bytes successfully read indicates end-of-file.
return ecx.return_read_success(ptr, &bytes, 0, dest);
} else {
if self.is_nonblock {
// Non-blocking socketpair with writer and empty buffer.
// https://linux.die.net/man/2/read
// EAGAIN or EWOULDBLOCK can be returned for socket,
// POSIX.1-2001 allows either error to be returned for this case.
// Since there is no ErrorKind for EAGAIN, WouldBlock is used.
return ecx.set_last_error_and_return(ErrorKind::WouldBlock, dest);
} else {
// Blocking socketpair with writer and empty buffer.
// FIXME: blocking is currently not supported
throw_unsup_format!("socketpair/pipe/pipe2 read: blocking isn't supported yet");
}
}
}
// Synchronize with all previous writes to this buffer.
// FIXME: this over-synchronizes; a more precise approach would be to
// only sync with the writes whose data we will read.
ecx.acquire_clock(&readbuf.clock);
// Do full read / partial read based on the space available.
// Conveniently, `read` exists on `VecDeque` and has exactly the desired behavior.
let actual_read_size = readbuf.buf.read(&mut bytes).unwrap();
// Need to drop before others can access the readbuf again.
drop(readbuf);
// A notification should be provided for the peer file description even when it can
// only write 1 byte. This implementation is not compliant with the actual Linux kernel
// implementation. For optimization reasons, the kernel will only mark the file description
// as "writable" when it can write more than a certain number of bytes. Since we
// don't know what that *certain number* is, we will provide a notification every time
// a read is successful. This might result in our epoll emulation providing more
// notifications than the real system.
if let Some(peer_fd) = self.peer_fd().upgrade() {
ecx.check_and_update_readiness(&peer_fd)?;
}
ecx.return_read_success(ptr, &bytes, actual_read_size, dest)
}
fn write<'tcx>(
&self,
_self_ref: &FileDescriptionRef,
_communicate_allowed: bool,
ptr: Pointer,
len: usize,
dest: &MPlaceTy<'tcx>,
ecx: &mut MiriInterpCx<'tcx>,
) -> InterpResult<'tcx> {
// Always succeed on write size 0.
// ("If count is zero and fd refers to a file other than a regular file, the results are not specified.")
if len == 0 {
return ecx.return_write_success(0, dest);
}
// We are writing to our peer's readbuf.
let Some(peer_fd) = self.peer_fd().upgrade() else {
// If the upgrade from Weak to Rc fails, it indicates that all read ends have been
// closed.
return ecx.set_last_error_and_return(ErrorKind::BrokenPipe, dest);
};
let Some(writebuf) = &peer_fd.downcast::<AnonSocket>().unwrap().readbuf else {
// FIXME: This should return EBADF, but there's no nice way to do that as there's no
// corresponding ErrorKind variant.
throw_unsup_format!("writing to the reading end of a pipe");
};
let mut writebuf = writebuf.borrow_mut();
let data_size = writebuf.buf.len();
let available_space = MAX_SOCKETPAIR_BUFFER_CAPACITY.strict_sub(data_size);
if available_space == 0 {
if self.is_nonblock {
// Non-blocking socketpair with a full buffer.
return ecx.set_last_error_and_return(ErrorKind::WouldBlock, dest);
} else {
// Blocking socketpair with a full buffer.
throw_unsup_format!("socketpair/pipe/pipe2 write: blocking isn't supported yet");
}
}
// Remember this clock so `read` can synchronize with us.
ecx.release_clock(|clock| {
writebuf.clock.join(clock);
});
// Do full write / partial write based on the space available.
let actual_write_size = len.min(available_space);
let bytes = ecx.read_bytes_ptr_strip_provenance(ptr, Size::from_bytes(len))?;
writebuf.buf.extend(&bytes[..actual_write_size]);
// Need to stop accessing peer_fd so that it can be notified.
drop(writebuf);
// Notification should be provided for peer fd as it became readable.
// The kernel does this even if the fd was already readable before, so we follow suit.
ecx.check_and_update_readiness(&peer_fd)?;
ecx.return_write_success(actual_write_size, dest)
}
}
impl<'tcx> EvalContextExt<'tcx> for crate::MiriInterpCx<'tcx> {}

View file

@ -383,7 +383,8 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
this.write_int(1, dest)?;
}
"TlsFree" => {
let [key] = this.check_shim(abi, ExternAbi::System { unwind: false }, link_name, args)?;
let [key] =
this.check_shim(abi, ExternAbi::System { unwind: false }, link_name, args)?;
let key = u128::from(this.read_scalar(key)?.to_u32()?);
this.machine.tls.delete_tls_key(key)?;

View file

@ -14,7 +14,7 @@ use std::thread;
// 2. Thread 2 blocks.
// 3. Thread 3 unblocks both thread 1 and thread 2.
// 4. Thread 1 reads.
// 5. Thread 2's `read` deadlocked.
// 5. Thread 2's `read` can never complete -> deadlocked.
fn main() {
// eventfd write will block when EFD_NONBLOCK flag is clear
@ -23,7 +23,6 @@ fn main() {
let fd = unsafe { libc::eventfd(0, flags) };
let thread1 = thread::spawn(move || {
thread::park();
let mut buf: [u8; 8] = [0; 8];
// This read will block initially.
let res: i64 = unsafe { libc::read(fd, buf.as_mut_ptr().cast(), 8).try_into().unwrap() };
@ -33,7 +32,6 @@ fn main() {
});
let thread2 = thread::spawn(move || {
thread::park();
let mut buf: [u8; 8] = [0; 8];
// This read will block initially, then get unblocked by thread3, then get blocked again
// because the `read` in thread1 executes first and set the counter to 0 again.
@ -45,7 +43,6 @@ fn main() {
});
let thread3 = thread::spawn(move || {
thread::park();
let sized_8_data = 1_u64.to_ne_bytes();
// Write 1 to the counter, so both thread1 and thread2 will unblock.
let res: i64 = unsafe {
@ -55,10 +52,6 @@ fn main() {
assert_eq!(res, 8);
});
thread1.thread().unpark();
thread2.thread().unpark();
thread3.thread().unpark();
thread1.join().unwrap();
thread2.join().unwrap();
thread3.join().unwrap();

View file

@ -14,7 +14,7 @@ use std::thread;
// 2. Thread 2 blocks.
// 3. Thread 3 unblocks both thread 1 and thread 2.
// 4. Thread 1 writes u64::MAX.
// 5. Thread 2's `write` deadlocked.
// 5. Thread 2's `write` can never complete -> deadlocked.
fn main() {
// eventfd write will block when EFD_NONBLOCK flag is clear
// and the addition caused counter to exceed u64::MAX - 1.
@ -28,7 +28,6 @@ fn main() {
assert_eq!(res, 8);
let thread1 = thread::spawn(move || {
thread::park();
let sized_8_data = (u64::MAX - 1).to_ne_bytes();
let res: i64 = unsafe {
libc::write(fd, sized_8_data.as_ptr() as *const libc::c_void, 8).try_into().unwrap()
@ -38,7 +37,6 @@ fn main() {
});
let thread2 = thread::spawn(move || {
thread::park();
let sized_8_data = (u64::MAX - 1).to_ne_bytes();
// Write u64::MAX - 1, so the all subsequent write will block.
let res: i64 = unsafe {
@ -52,7 +50,6 @@ fn main() {
});
let thread3 = thread::spawn(move || {
thread::park();
let mut buf: [u8; 8] = [0; 8];
// This will unblock both `write` in thread1 and thread2.
let res: i64 = unsafe { libc::read(fd, buf.as_mut_ptr().cast(), 8).try_into().unwrap() };
@ -61,10 +58,6 @@ fn main() {
assert_eq!(counter, (u64::MAX - 1));
});
thread1.thread().unpark();
thread2.thread().unpark();
thread3.thread().unpark();
thread1.join().unwrap();
thread2.join().unwrap();
thread3.join().unwrap();

View file

@ -2,7 +2,7 @@
//! and we only read one of them, we do not synchronize with the other events
//! and therefore still report a data race for things that need to see the second event
//! to be considered synchronized.
//@only-target: linux
//@only-target: linux android
// ensure deterministic schedule
//@compile-flags: -Zmiri-preemption-rate=0

View file

@ -5,7 +5,6 @@
//@error-in-other-file: deadlock
use std::convert::TryInto;
use std::thread;
use std::thread::spawn;
// Using `as` cast since `EPOLLET` wraps around
@ -41,10 +40,10 @@ fn check_epoll_wait<const N: usize>(
// Test if only one thread is unblocked if multiple threads blocked on same epfd.
// Expected execution:
// 1. Thread 2 blocks.
// 2. Thread 3 blocks.
// 3. Thread 1 unblocks thread 3.
// 4. Thread 2 deadlocks.
// 1. Thread 1 blocks.
// 2. Thread 2 blocks.
// 3. Thread 3 unblocks thread 2.
// 4. Thread 1 deadlocks.
fn main() {
// Create an epoll instance.
let epfd = unsafe { libc::epoll_create1(0) };
@ -65,31 +64,22 @@ fn main() {
let expected_value = fds[0] as u64;
check_epoll_wait::<1>(epfd, &[(expected_event, expected_value)], 0);
let expected_event = u32::try_from(libc::EPOLLIN | libc::EPOLLOUT).unwrap();
let expected_value = fds[0] as u64;
let thread1 = spawn(move || {
thread::park();
check_epoll_wait::<1>(epfd, &[(expected_event, expected_value)], -1);
//~^ERROR: deadlocked
});
let thread2 = spawn(move || {
check_epoll_wait::<1>(epfd, &[(expected_event, expected_value)], -1);
});
let thread3 = spawn(move || {
let data = "abcde".as_bytes().as_ptr();
let res = unsafe { libc::write(fds[1], data as *const libc::c_void, 5) };
assert_eq!(res, 5);
});
let expected_event = u32::try_from(libc::EPOLLIN | libc::EPOLLOUT).unwrap();
let expected_value = fds[0] as u64;
let thread2 = spawn(move || {
thread::park();
check_epoll_wait::<1>(epfd, &[(expected_event, expected_value)], -1);
//~^ERROR: deadlocked
});
let thread3 = spawn(move || {
thread::park();
check_epoll_wait::<1>(epfd, &[(expected_event, expected_value)], -1);
});
thread2.thread().unpark();
thread::yield_now();
thread3.thread().unpark();
thread::yield_now();
thread1.thread().unpark();
thread1.join().unwrap();
thread2.join().unwrap();
thread3.join().unwrap();

View file

@ -1,3 +1,9 @@
error: deadlock: the evaluated program deadlocked
|
= note: the evaluated program deadlocked
= note: (no span available)
= note: BACKTRACE on thread `unnamed-ID`:
error: deadlock: the evaluated program deadlocked
--> RUSTLIB/std/src/sys/pal/PLATFORM/thread.rs:LL:CC
|
@ -11,15 +17,9 @@ LL | let ret = unsafe { libc::pthread_join(id, ptr::null_mut()) };
note: inside `main`
--> tests/fail-dep/libc/libc_epoll_block_two_thread.rs:LL:CC
|
LL | thread2.join().unwrap();
LL | thread1.join().unwrap();
| ^^^^^^^^^^^^^^
error: deadlock: the evaluated program deadlocked
|
= note: the evaluated program deadlocked
= note: (no span available)
= note: BACKTRACE on thread `unnamed-ID`:
error: deadlock: the evaluated program deadlocked
--> tests/fail-dep/libc/libc_epoll_block_two_thread.rs:LL:CC
|

View file

@ -1,87 +0,0 @@
//@compile-flags: -Zmiri-ignore-leaks
// https://plv.mpi-sws.org/scfix/paper.pdf
// 2.2 Second Problem: SC Fences are Too Weak
// This test should pass under the C++20 model Rust is using.
// Unfortunately, Miri's weak memory emulation only follows the C++11 model
// as we don't know how to correctly emulate C++20's revised SC semantics,
// so we have to stick to C++11 emulation from existing research.
use std::sync::atomic::Ordering::*;
use std::sync::atomic::{AtomicUsize, fence};
use std::thread::spawn;
// Spins until it reads the given value
fn reads_value(loc: &AtomicUsize, val: usize) -> usize {
while loc.load(Relaxed) != val {
std::hint::spin_loop();
}
val
}
// We can't create static items because we need to run each test
// multiple tests
fn static_atomic(val: usize) -> &'static AtomicUsize {
let ret = Box::leak(Box::new(AtomicUsize::new(val)));
// A workaround to put the initialization value in the store buffer.
// See https://github.com/rust-lang/miri/issues/2164
ret.load(Relaxed);
ret
}
fn test_cpp20_rwc_syncs() {
/*
int main() {
atomic_int x = 0;
atomic_int y = 0;
{{{ x.store(1,mo_relaxed);
||| { r1=x.load(mo_relaxed).readsvalue(1);
fence(mo_seq_cst);
r2=y.load(mo_relaxed); }
||| { y.store(1,mo_relaxed);
fence(mo_seq_cst);
r3=x.load(mo_relaxed); }
}}}
return 0;
}
*/
let x = static_atomic(0);
let y = static_atomic(0);
let j1 = spawn(move || {
x.store(1, Relaxed);
});
let j2 = spawn(move || {
reads_value(&x, 1);
fence(SeqCst);
y.load(Relaxed)
});
let j3 = spawn(move || {
y.store(1, Relaxed);
fence(SeqCst);
x.load(Relaxed)
});
j1.join().unwrap();
let b = j2.join().unwrap();
let c = j3.join().unwrap();
// We cannot write assert_ne!() since ui_test's fail
// tests expect exit status 1, whereas panics produce 101.
// Our ui_test does not yet support overriding failure status codes.
if (b, c) == (0, 0) {
// This *should* be unreachable, but Miri will reach it.
unsafe {
std::hint::unreachable_unchecked(); //~ERROR: unreachable
}
}
}
pub fn main() {
for _ in 0..500 {
test_cpp20_rwc_syncs();
}
}

View file

@ -1,20 +0,0 @@
error: Undefined Behavior: entering unreachable code
--> tests/fail/should-pass/cpp20_rwc_syncs.rs:LL:CC
|
LL | std::hint::unreachable_unchecked();
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ entering unreachable code
|
= help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior
= help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information
= note: BACKTRACE:
= note: inside `test_cpp20_rwc_syncs` at tests/fail/should-pass/cpp20_rwc_syncs.rs:LL:CC
note: inside `main`
--> tests/fail/should-pass/cpp20_rwc_syncs.rs:LL:CC
|
LL | test_cpp20_rwc_syncs();
| ^^^^^^^^^^^^^^^^^^^^^^
note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace
error: aborting due to 1 previous error

View file

@ -3,7 +3,6 @@
//@only-on-host
//@compile-flags: -Zmiri-permissive-provenance
#![feature(box_as_ptr)]
use std::mem::MaybeUninit;
@ -60,7 +59,7 @@ fn test_init_array() {
const LEN: usize = 3;
let mut array = MaybeUninit::<[i32; LEN]>::uninit();
let val = 31;
let array = unsafe {
init_array(array.as_mut_ptr().cast::<i32>(), LEN, val);
array.assume_init()
@ -72,7 +71,7 @@ fn test_init_array() {
fn test_init_static_inner() {
#[repr(C)]
struct SyncPtr {
ptr: *mut i32
ptr: *mut i32,
}
unsafe impl Sync for SyncPtr {}
@ -183,17 +182,12 @@ fn test_swap_ptr_triple_dangling() {
let ptr = Box::as_ptr(&b);
drop(b);
let z = 121;
let triple = Triple {
ptr0: &raw const x,
ptr1: ptr,
ptr2: &raw const z
};
let triple = Triple { ptr0: &raw const x, ptr1: ptr, ptr2: &raw const z };
unsafe { swap_ptr_triple_dangling(&triple) }
assert_eq!(unsafe { *triple.ptr2 }, x);
}
/// Test function that directly returns its pointer argument.
fn test_return_ptr() {
extern "C" {

View file

@ -1,4 +1,4 @@
//@only-target: linux
//@only-target: linux android
// test_epoll_block_then_unblock and test_epoll_race depend on a deterministic schedule.
//@compile-flags: -Zmiri-preemption-rate=0

View file

@ -1,4 +1,4 @@
//@only-target: linux
//@only-target: linux android
use std::convert::TryInto;

View file

@ -1,4 +1,4 @@
//@only-target: linux
//@only-target: linux android
// test_race, test_blocking_read and test_blocking_write depend on a deterministic schedule.
//@compile-flags: -Zmiri-preemption-rate=0
@ -197,7 +197,6 @@ fn test_two_threads_blocked_on_eventfd() {
assert_eq!(res, 8);
let thread1 = thread::spawn(move || {
thread::park();
let sized_8_data = 1_u64.to_ne_bytes();
let res: i64 = unsafe {
libc::write(fd, sized_8_data.as_ptr() as *const libc::c_void, 8).try_into().unwrap()
@ -207,7 +206,6 @@ fn test_two_threads_blocked_on_eventfd() {
});
let thread2 = thread::spawn(move || {
thread::park();
let sized_8_data = 1_u64.to_ne_bytes();
let res: i64 = unsafe {
libc::write(fd, sized_8_data.as_ptr() as *const libc::c_void, 8).try_into().unwrap()
@ -217,7 +215,6 @@ fn test_two_threads_blocked_on_eventfd() {
});
let thread3 = thread::spawn(move || {
thread::park();
let mut buf: [u8; 8] = [0; 8];
// This will unblock previously blocked eventfd read.
let res = read_bytes(fd, &mut buf);
@ -227,10 +224,6 @@ fn test_two_threads_blocked_on_eventfd() {
assert_eq!(counter, (u64::MAX - 1));
});
thread1.thread().unpark();
thread2.thread().unpark();
thread3.thread().unpark();
thread1.join().unwrap();
thread2.join().unwrap();
thread3.join().unwrap();

View file

@ -48,7 +48,11 @@ fn test_readlink() {
// Test that we report a proper error for a missing path.
let res = unsafe {
libc::readlink(c"MIRI_MISSING_FILE_NAME".as_ptr(), small_buf.as_mut_ptr().cast(), small_buf.len())
libc::readlink(
c"MIRI_MISSING_FILE_NAME".as_ptr(),
small_buf.as_mut_ptr().cast(),
small_buf.len(),
)
};
assert_eq!(res, -1);
assert_eq!(Error::last_os_error().kind(), ErrorKind::NotFound);

View file

@ -0,0 +1,16 @@
//@ignore-target: windows # Supported only on unixes
fn main() {
unsafe {
let mut buf = vec![0u8; 32];
assert_eq!(libc::strerror_r(libc::EPERM, buf.as_mut_ptr().cast(), buf.len()), 0);
let mut buf2 = vec![0u8; 64];
assert_eq!(libc::strerror_r(-1i32, buf2.as_mut_ptr().cast(), buf2.len()), 0);
// This buffer is deliberately too small so this triggers ERANGE.
let mut buf3 = vec![0u8; 2];
assert_eq!(
libc::strerror_r(libc::E2BIG, buf3.as_mut_ptr().cast(), buf3.len()),
libc::ERANGE
);
}
}

View file

@ -0,0 +1,19 @@
//@ignore-target: windows # Supported only on unixes
fn test_sysconfbasic() {
unsafe {
let ncpus = libc::sysconf(libc::_SC_NPROCESSORS_CONF);
assert!(ncpus >= 1);
let psz = libc::sysconf(libc::_SC_PAGESIZE);
assert!(psz % 4096 == 0);
// note that in reality it can return -1 (no hard limit) on some platforms.
let gwmax = libc::sysconf(libc::_SC_GETPW_R_SIZE_MAX);
assert!(gwmax >= 512);
let omax = libc::sysconf(libc::_SC_OPEN_MAX);
assert_eq!(omax, 65536);
}
}
fn main() {
test_sysconfbasic();
}

View file

@ -1,4 +1,4 @@
//@compile-flags: -Zmiri-ignore-leaks -Zmiri-disable-stacked-borrows -Zmiri-provenance-gc=10000
//@compile-flags: -Zmiri-ignore-leaks -Zmiri-disable-stacked-borrows -Zmiri-disable-validation -Zmiri-provenance-gc=10000
// This test's runtime explodes if the GC interval is set to 1 (which we do in CI), so we
// override it internally back to the default frequency.
@ -22,7 +22,7 @@
// Available: https://ss265.host.cs.st-andrews.ac.uk/papers/n3132.pdf.
use std::sync::atomic::Ordering::*;
use std::sync::atomic::{AtomicBool, AtomicI32, fence};
use std::sync::atomic::{AtomicBool, AtomicI32, Ordering, fence};
use std::thread::spawn;
#[derive(Copy, Clone)]
@ -34,19 +34,15 @@ unsafe impl<T> Sync for EvilSend<T> {}
// We can't create static items because we need to run each test
// multiple times
fn static_atomic(val: i32) -> &'static AtomicI32 {
let ret = Box::leak(Box::new(AtomicI32::new(val)));
ret.store(val, Relaxed); // work around https://github.com/rust-lang/miri/issues/2164
ret
Box::leak(Box::new(AtomicI32::new(val)))
}
fn static_atomic_bool(val: bool) -> &'static AtomicBool {
let ret = Box::leak(Box::new(AtomicBool::new(val)));
ret.store(val, Relaxed); // work around https://github.com/rust-lang/miri/issues/2164
ret
Box::leak(Box::new(AtomicBool::new(val)))
}
// Spins until it acquires a pre-determined value.
fn acquires_value(loc: &AtomicI32, val: i32) -> i32 {
while loc.load(Acquire) != val {
fn loads_value(loc: &AtomicI32, ord: Ordering, val: i32) -> i32 {
while loc.load(ord) != val {
std::hint::spin_loop();
}
val
@ -69,7 +65,7 @@ fn test_corr() {
}); // | |
#[rustfmt::skip] // |synchronizes-with |happens-before
let j3 = spawn(move || { // | |
acquires_value(&y, 1); // <------------------+ |
loads_value(&y, Acquire, 1); // <------------+ |
x.load(Relaxed) // <----------------------------------------------+
// The two reads on x are ordered by hb, so they cannot observe values
// differently from the modification order. If the first read observed
@ -94,12 +90,12 @@ fn test_wrc() {
}); // | |
#[rustfmt::skip] // |synchronizes-with |
let j2 = spawn(move || { // | |
acquires_value(&x, 1); // <------------------+ |
loads_value(&x, Acquire, 1); // <------------+ |
y.store(1, Release); // ---------------------+ |happens-before
}); // | |
#[rustfmt::skip] // |synchronizes-with |
let j3 = spawn(move || { // | |
acquires_value(&y, 1); // <------------------+ |
loads_value(&y, Acquire, 1); // <------------+ |
x.load(Relaxed) // <-----------------------------------------------+
});
@ -125,7 +121,7 @@ fn test_message_passing() {
#[rustfmt::skip] // |synchronizes-with | happens-before
let j2 = spawn(move || { // | |
let x = x; // avoid field capturing | |
acquires_value(&y, 1); // <------------------+ |
loads_value(&y, Acquire, 1); // <------------+ |
unsafe { *x.0 } // <---------------------------------------------+
});
@ -268,9 +264,6 @@ fn test_iriw_sc_rlx() {
let x = static_atomic_bool(false);
let y = static_atomic_bool(false);
x.store(false, Relaxed);
y.store(false, Relaxed);
let a = spawn(move || x.store(true, Relaxed));
let b = spawn(move || y.store(true, Relaxed));
let c = spawn(move || {
@ -290,6 +283,152 @@ fn test_iriw_sc_rlx() {
assert!(c || d);
}
// Similar to `test_iriw_sc_rlx` but with fences instead of SC accesses.
fn test_cpp20_sc_fence_fix() {
let x = static_atomic_bool(false);
let y = static_atomic_bool(false);
let thread1 = spawn(|| {
let a = x.load(Relaxed);
fence(SeqCst);
let b = y.load(Relaxed);
(a, b)
});
let thread2 = spawn(|| {
x.store(true, Relaxed);
});
let thread3 = spawn(|| {
y.store(true, Relaxed);
});
let thread4 = spawn(|| {
let c = y.load(Relaxed);
fence(SeqCst);
let d = x.load(Relaxed);
(c, d)
});
let (a, b) = thread1.join().unwrap();
thread2.join().unwrap();
thread3.join().unwrap();
let (c, d) = thread4.join().unwrap();
let bad = a == true && b == false && c == true && d == false;
assert!(!bad);
}
// https://plv.mpi-sws.org/scfix/paper.pdf
// 2.2 Second Problem: SC Fences are Too Weak
fn test_cpp20_rwc_syncs() {
/*
int main() {
atomic_int x = 0;
atomic_int y = 0;
{{{ x.store(1,mo_relaxed);
||| { r1=x.load(mo_relaxed).readsvalue(1);
fence(mo_seq_cst);
r2=y.load(mo_relaxed); }
||| { y.store(1,mo_relaxed);
fence(mo_seq_cst);
r3=x.load(mo_relaxed); }
}}}
return 0;
}
*/
let x = static_atomic(0);
let y = static_atomic(0);
let j1 = spawn(move || {
x.store(1, Relaxed);
});
let j2 = spawn(move || {
loads_value(&x, Relaxed, 1);
fence(SeqCst);
y.load(Relaxed)
});
let j3 = spawn(move || {
y.store(1, Relaxed);
fence(SeqCst);
x.load(Relaxed)
});
j1.join().unwrap();
let b = j2.join().unwrap();
let c = j3.join().unwrap();
assert!((b, c) != (0, 0));
}
/// This checks that the *last* thing the SC fence does is act like a release fence.
/// See <https://github.com/rust-lang/miri/pull/4057#issuecomment-2522296601>.
fn test_sc_fence_release() {
let x = static_atomic(0);
let y = static_atomic(0);
let z = static_atomic(0);
let k = static_atomic(0);
let j1 = spawn(move || {
x.store(1, Relaxed);
fence(SeqCst);
k.store(1, Relaxed);
});
let j2 = spawn(move || {
y.store(1, Relaxed);
fence(SeqCst);
z.store(1, Relaxed);
});
let j3 = spawn(move || {
let kval = k.load(Acquire);
let yval = y.load(Relaxed);
(kval, yval)
});
let j4 = spawn(move || {
let zval = z.load(Acquire);
let xval = x.load(Relaxed);
(zval, xval)
});
j1.join().unwrap();
j2.join().unwrap();
let (kval, yval) = j3.join().unwrap();
let (zval, xval) = j4.join().unwrap();
let bad = kval == 1 && yval == 0 && zval == 1 && xval == 0;
assert!(!bad);
}
/// Test that SC fences and accesses sync correctly with each other.
fn test_sc_fence_access() {
/*
Wx1 sc
Ry0 sc
||
Wy1 rlx
SC-fence
Rx0 rlx
*/
let x = static_atomic(0);
let y = static_atomic(0);
let j1 = spawn(move || {
x.store(1, SeqCst);
y.load(SeqCst)
});
let j2 = spawn(move || {
y.store(1, Relaxed);
fence(SeqCst);
x.load(Relaxed)
});
let v1 = j1.join().unwrap();
let v2 = j2.join().unwrap();
let bad = v1 == 0 && v2 == 0;
assert!(!bad);
}
pub fn main() {
for _ in 0..50 {
test_single_thread();
@ -301,5 +440,9 @@ pub fn main() {
test_sc_store_buffering();
test_sync_through_rmw_and_fences();
test_iriw_sc_rlx();
test_cpp20_sc_fence_fix();
test_cpp20_rwc_syncs();
test_sc_fence_release();
test_sc_fence_access();
}
}

View file

@ -1,44 +1,75 @@
#![feature(core_intrinsics)]
#![feature(core_intrinsics, portable_simd)]
use std::intrinsics::simd::simd_relaxed_fma;
use std::intrinsics::{fmuladdf32, fmuladdf64};
use std::simd::prelude::*;
fn main() {
let mut saw_zero = false;
let mut saw_nonzero = false;
fn ensure_both_happen(f: impl Fn() -> bool) -> bool {
let mut saw_true = false;
let mut saw_false = false;
for _ in 0..50 {
let a = std::hint::black_box(0.1_f64);
let b = std::hint::black_box(0.2);
let c = std::hint::black_box(-a * b);
// It is unspecified whether the following operation is fused or not. The
// following evaluates to 0.0 if unfused, and nonzero (-1.66e-18) if fused.
let x = unsafe { fmuladdf64(a, b, c) };
if x == 0.0 {
saw_zero = true;
let b = f();
if b {
saw_true = true;
} else {
saw_nonzero = true;
saw_false = true;
}
if saw_true && saw_false {
return true;
}
}
false
}
fn main() {
assert!(
saw_zero && saw_nonzero,
ensure_both_happen(|| {
let a = std::hint::black_box(0.1_f64);
let b = std::hint::black_box(0.2);
let c = std::hint::black_box(-a * b);
// It is unspecified whether the following operation is fused or not. The
// following evaluates to 0.0 if unfused, and nonzero (-1.66e-18) if fused.
let x = unsafe { fmuladdf64(a, b, c) };
x == 0.0
}),
"`fmuladdf64` failed to be evaluated as both fused and unfused"
);
let mut saw_zero = false;
let mut saw_nonzero = false;
for _ in 0..50 {
let a = std::hint::black_box(0.1_f32);
let b = std::hint::black_box(0.2);
let c = std::hint::black_box(-a * b);
// It is unspecified whether the following operation is fused or not. The
// following evaluates to 0.0 if unfused, and nonzero (-8.1956386e-10) if fused.
let x = unsafe { fmuladdf32(a, b, c) };
if x == 0.0 {
saw_zero = true;
} else {
saw_nonzero = true;
}
}
assert!(
saw_zero && saw_nonzero,
ensure_both_happen(|| {
let a = std::hint::black_box(0.1_f32);
let b = std::hint::black_box(0.2);
let c = std::hint::black_box(-a * b);
// It is unspecified whether the following operation is fused or not. The
// following evaluates to 0.0 if unfused, and nonzero (-8.1956386e-10) if fused.
let x = unsafe { fmuladdf32(a, b, c) };
x == 0.0
}),
"`fmuladdf32` failed to be evaluated as both fused and unfused"
);
assert!(
ensure_both_happen(|| {
let a = f32x4::splat(std::hint::black_box(0.1));
let b = f32x4::splat(std::hint::black_box(0.2));
let c = std::hint::black_box(-a * b);
let x = unsafe { simd_relaxed_fma(a, b, c) };
// Whether we fuse or not is a per-element decision, so sometimes these should be
// the same and sometimes not.
x[0] == x[1]
}),
"`simd_relaxed_fma` failed to be evaluated as both fused and unfused"
);
assert!(
ensure_both_happen(|| {
let a = f64x4::splat(std::hint::black_box(0.1));
let b = f64x4::splat(std::hint::black_box(0.2));
let c = std::hint::black_box(-a * b);
let x = unsafe { simd_relaxed_fma(a, b, c) };
// Whether we fuse or not is a per-element decision, so sometimes these should be
// the same and sometimes not.
x[0] == x[1]
}),
"`simd_relaxed_fma` failed to be evaluated as both fused and unfused"
);
}

View file

@ -40,6 +40,17 @@ fn simd_ops_f32() {
f32x4::splat(-3.2).mul_add(b, f32x4::splat(f32::NEG_INFINITY)),
f32x4::splat(f32::NEG_INFINITY)
);
unsafe {
assert_eq!(intrinsics::simd_relaxed_fma(a, b, a), (a * b) + a);
assert_eq!(intrinsics::simd_relaxed_fma(b, b, a), (b * b) + a);
assert_eq!(intrinsics::simd_relaxed_fma(a, b, b), (a * b) + b);
assert_eq!(
intrinsics::simd_relaxed_fma(f32x4::splat(-3.2), b, f32x4::splat(f32::NEG_INFINITY)),
f32x4::splat(f32::NEG_INFINITY)
);
}
assert_eq!((a * a).sqrt(), a);
assert_eq!((b * b).sqrt(), b.abs());
@ -94,6 +105,17 @@ fn simd_ops_f64() {
f64x4::splat(-3.2).mul_add(b, f64x4::splat(f64::NEG_INFINITY)),
f64x4::splat(f64::NEG_INFINITY)
);
unsafe {
assert_eq!(intrinsics::simd_relaxed_fma(a, b, a), (a * b) + a);
assert_eq!(intrinsics::simd_relaxed_fma(b, b, a), (b * b) + a);
assert_eq!(intrinsics::simd_relaxed_fma(a, b, b), (a * b) + b);
assert_eq!(
intrinsics::simd_relaxed_fma(f64x4::splat(-3.2), b, f64x4::splat(f64::NEG_INFINITY)),
f64x4::splat(f64::NEG_INFINITY)
);
}
assert_eq!((a * a).sqrt(), a);
assert_eq!((b * b).sqrt(), b.abs());

View file

@ -27,8 +27,11 @@ fn main() {
test_file_sync();
test_errors();
test_rename();
test_directory();
test_canonicalize();
// solarish needs to support readdir/readdir64 for these tests.
if cfg!(not(any(target_os = "solaris", target_os = "illumos"))) {
test_directory();
test_canonicalize();
}
test_from_raw_os_error();
#[cfg(unix)]
test_pread_pwrite();