Rollup merge of #130175 - nnethercote:rustc_mir_transform-cleanups-3, r=saethlin
`rustc_mir_transform` cleanups 3 More cleanups in the style of https://github.com/rust-lang/rust/pull/129929. r? `@saethlin`
This commit is contained in:
commit
accd77ebd6
39 changed files with 566 additions and 567 deletions
|
@ -32,12 +32,6 @@ pub(super) use self::AddCallGuards::*;
|
||||||
|
|
||||||
impl<'tcx> crate::MirPass<'tcx> for AddCallGuards {
|
impl<'tcx> crate::MirPass<'tcx> for AddCallGuards {
|
||||||
fn run_pass(&self, _tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
|
fn run_pass(&self, _tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
|
||||||
self.add_call_guards(body);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl AddCallGuards {
|
|
||||||
pub(super) fn add_call_guards(&self, body: &mut Body<'_>) {
|
|
||||||
let mut pred_count: IndexVec<_, _> =
|
let mut pred_count: IndexVec<_, _> =
|
||||||
body.basic_blocks.predecessors().iter().map(|ps| ps.len()).collect();
|
body.basic_blocks.predecessors().iter().map(|ps| ps.len()).collect();
|
||||||
pred_count[START_BLOCK] += 1;
|
pred_count[START_BLOCK] += 1;
|
||||||
|
|
|
@ -40,35 +40,34 @@ pub(super) struct AddMovesForPackedDrops;
|
||||||
impl<'tcx> crate::MirPass<'tcx> for AddMovesForPackedDrops {
|
impl<'tcx> crate::MirPass<'tcx> for AddMovesForPackedDrops {
|
||||||
fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
|
fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
|
||||||
debug!("add_moves_for_packed_drops({:?} @ {:?})", body.source, body.span);
|
debug!("add_moves_for_packed_drops({:?} @ {:?})", body.source, body.span);
|
||||||
add_moves_for_packed_drops(tcx, body);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn add_moves_for_packed_drops<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
|
let def_id = body.source.def_id();
|
||||||
let patch = add_moves_for_packed_drops_patch(tcx, body);
|
let mut patch = MirPatch::new(body);
|
||||||
patch.apply(body);
|
let param_env = tcx.param_env(def_id);
|
||||||
}
|
|
||||||
|
|
||||||
fn add_moves_for_packed_drops_patch<'tcx>(tcx: TyCtxt<'tcx>, body: &Body<'tcx>) -> MirPatch<'tcx> {
|
for (bb, data) in body.basic_blocks.iter_enumerated() {
|
||||||
let def_id = body.source.def_id();
|
let loc = Location { block: bb, statement_index: data.statements.len() };
|
||||||
let mut patch = MirPatch::new(body);
|
let terminator = data.terminator();
|
||||||
let param_env = tcx.param_env(def_id);
|
|
||||||
|
|
||||||
for (bb, data) in body.basic_blocks.iter_enumerated() {
|
match terminator.kind {
|
||||||
let loc = Location { block: bb, statement_index: data.statements.len() };
|
TerminatorKind::Drop { place, .. }
|
||||||
let terminator = data.terminator();
|
if util::is_disaligned(tcx, body, param_env, place) =>
|
||||||
|
{
|
||||||
match terminator.kind {
|
add_move_for_packed_drop(
|
||||||
TerminatorKind::Drop { place, .. }
|
tcx,
|
||||||
if util::is_disaligned(tcx, body, param_env, place) =>
|
body,
|
||||||
{
|
&mut patch,
|
||||||
add_move_for_packed_drop(tcx, body, &mut patch, terminator, loc, data.is_cleanup);
|
terminator,
|
||||||
|
loc,
|
||||||
|
data.is_cleanup,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
_ => {}
|
||||||
}
|
}
|
||||||
_ => {}
|
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
patch
|
patch.apply(body);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn add_move_for_packed_drop<'tcx>(
|
fn add_move_for_packed_drop<'tcx>(
|
||||||
|
|
|
@ -60,7 +60,9 @@ impl<'tcx> crate::MirPass<'tcx> for AddRetag {
|
||||||
let basic_blocks = body.basic_blocks.as_mut();
|
let basic_blocks = body.basic_blocks.as_mut();
|
||||||
let local_decls = &body.local_decls;
|
let local_decls = &body.local_decls;
|
||||||
let needs_retag = |place: &Place<'tcx>| {
|
let needs_retag = |place: &Place<'tcx>| {
|
||||||
!place.is_indirect_first_projection() // we're not really interested in stores to "outside" locations, they are hard to keep track of anyway
|
// We're not really interested in stores to "outside" locations, they are hard to keep
|
||||||
|
// track of anyway.
|
||||||
|
!place.is_indirect_first_projection()
|
||||||
&& may_contain_reference(place.ty(&*local_decls, tcx).ty, /*depth*/ 3, tcx)
|
&& may_contain_reference(place.ty(&*local_decls, tcx).ty, /*depth*/ 3, tcx)
|
||||||
&& !local_decls[place.local].is_deref_temp()
|
&& !local_decls[place.local].is_deref_temp()
|
||||||
};
|
};
|
||||||
|
@ -129,9 +131,9 @@ impl<'tcx> crate::MirPass<'tcx> for AddRetag {
|
||||||
StatementKind::Assign(box (ref place, ref rvalue)) => {
|
StatementKind::Assign(box (ref place, ref rvalue)) => {
|
||||||
let add_retag = match rvalue {
|
let add_retag = match rvalue {
|
||||||
// Ptr-creating operations already do their own internal retagging, no
|
// Ptr-creating operations already do their own internal retagging, no
|
||||||
// need to also add a retag statement.
|
// need to also add a retag statement. *Except* if we are deref'ing a
|
||||||
// *Except* if we are deref'ing a Box, because those get desugared to directly working
|
// Box, because those get desugared to directly working with the inner
|
||||||
// with the inner raw pointer! That's relevant for `RawPtr` as Miri otherwise makes it
|
// raw pointer! That's relevant for `RawPtr` as Miri otherwise makes it
|
||||||
// a NOP when the original pointer is already raw.
|
// a NOP when the original pointer is already raw.
|
||||||
Rvalue::RawPtr(_mutbl, place) => {
|
Rvalue::RawPtr(_mutbl, place) => {
|
||||||
// Using `is_box_global` here is a bit sketchy: if this code is
|
// Using `is_box_global` here is a bit sketchy: if this code is
|
||||||
|
|
|
@ -51,18 +51,14 @@ impl<'a, 'tcx> MutVisitor<'tcx> for SubTypeChecker<'a, 'tcx> {
|
||||||
// // gets transformed to
|
// // gets transformed to
|
||||||
// let temp: rval_ty = rval;
|
// let temp: rval_ty = rval;
|
||||||
// let place: place_ty = temp as place_ty;
|
// let place: place_ty = temp as place_ty;
|
||||||
fn subtype_finder<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
|
|
||||||
let patch = MirPatch::new(body);
|
|
||||||
let mut checker = SubTypeChecker { tcx, patcher: patch, local_decls: &body.local_decls };
|
|
||||||
|
|
||||||
for (bb, data) in body.basic_blocks.as_mut_preserves_cfg().iter_enumerated_mut() {
|
|
||||||
checker.visit_basic_block_data(bb, data);
|
|
||||||
}
|
|
||||||
checker.patcher.apply(body);
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'tcx> crate::MirPass<'tcx> for Subtyper {
|
impl<'tcx> crate::MirPass<'tcx> for Subtyper {
|
||||||
fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
|
fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
|
||||||
subtype_finder(tcx, body);
|
let patch = MirPatch::new(body);
|
||||||
|
let mut checker = SubTypeChecker { tcx, patcher: patch, local_decls: &body.local_decls };
|
||||||
|
|
||||||
|
for (bb, data) in body.basic_blocks.as_mut_preserves_cfg().iter_enumerated_mut() {
|
||||||
|
checker.visit_basic_block_data(bb, data);
|
||||||
|
}
|
||||||
|
checker.patcher.apply(body);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -123,6 +123,7 @@ impl<'tcx> Visitor<'tcx> for ConstMutationChecker<'_, 'tcx> {
|
||||||
self.super_statement(stmt, loc);
|
self.super_statement(stmt, loc);
|
||||||
self.target_local = None;
|
self.target_local = None;
|
||||||
}
|
}
|
||||||
|
|
||||||
fn visit_rvalue(&mut self, rvalue: &Rvalue<'tcx>, loc: Location) {
|
fn visit_rvalue(&mut self, rvalue: &Rvalue<'tcx>, loc: Location) {
|
||||||
if let Rvalue::Ref(_, BorrowKind::Mut { .. }, place) = rvalue {
|
if let Rvalue::Ref(_, BorrowKind::Mut { .. }, place) = rvalue {
|
||||||
let local = place.local;
|
let local = place.local;
|
||||||
|
|
|
@ -27,37 +27,34 @@ impl<'tcx> crate::MirPass<'tcx> for CopyProp {
|
||||||
#[instrument(level = "trace", skip(self, tcx, body))]
|
#[instrument(level = "trace", skip(self, tcx, body))]
|
||||||
fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
|
fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
|
||||||
debug!(def_id = ?body.source.def_id());
|
debug!(def_id = ?body.source.def_id());
|
||||||
propagate_ssa(tcx, body);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn propagate_ssa<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
|
let param_env = tcx.param_env_reveal_all_normalized(body.source.def_id());
|
||||||
let param_env = tcx.param_env_reveal_all_normalized(body.source.def_id());
|
let ssa = SsaLocals::new(tcx, body, param_env);
|
||||||
let ssa = SsaLocals::new(tcx, body, param_env);
|
|
||||||
|
|
||||||
let fully_moved = fully_moved_locals(&ssa, body);
|
let fully_moved = fully_moved_locals(&ssa, body);
|
||||||
debug!(?fully_moved);
|
debug!(?fully_moved);
|
||||||
|
|
||||||
let mut storage_to_remove = BitSet::new_empty(fully_moved.domain_size());
|
let mut storage_to_remove = BitSet::new_empty(fully_moved.domain_size());
|
||||||
for (local, &head) in ssa.copy_classes().iter_enumerated() {
|
for (local, &head) in ssa.copy_classes().iter_enumerated() {
|
||||||
if local != head {
|
if local != head {
|
||||||
storage_to_remove.insert(head);
|
storage_to_remove.insert(head);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
let any_replacement = ssa.copy_classes().iter_enumerated().any(|(l, &h)| l != h);
|
let any_replacement = ssa.copy_classes().iter_enumerated().any(|(l, &h)| l != h);
|
||||||
|
|
||||||
Replacer {
|
Replacer {
|
||||||
tcx,
|
tcx,
|
||||||
copy_classes: ssa.copy_classes(),
|
copy_classes: ssa.copy_classes(),
|
||||||
fully_moved,
|
fully_moved,
|
||||||
borrowed_locals: ssa.borrowed_locals(),
|
borrowed_locals: ssa.borrowed_locals(),
|
||||||
storage_to_remove,
|
storage_to_remove,
|
||||||
}
|
}
|
||||||
.visit_body_preserves_cfg(body);
|
.visit_body_preserves_cfg(body);
|
||||||
|
|
||||||
if any_replacement {
|
if any_replacement {
|
||||||
crate::simplify::remove_unused_definitions(body);
|
crate::simplify::remove_unused_definitions(body);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -140,7 +137,8 @@ impl<'tcx> MutVisitor<'tcx> for Replacer<'_, 'tcx> {
|
||||||
|
|
||||||
fn visit_operand(&mut self, operand: &mut Operand<'tcx>, loc: Location) {
|
fn visit_operand(&mut self, operand: &mut Operand<'tcx>, loc: Location) {
|
||||||
if let Operand::Move(place) = *operand
|
if let Operand::Move(place) = *operand
|
||||||
// A move out of a projection of a copy is equivalent to a copy of the original projection.
|
// A move out of a projection of a copy is equivalent to a copy of the original
|
||||||
|
// projection.
|
||||||
&& !place.is_indirect_first_projection()
|
&& !place.is_indirect_first_projection()
|
||||||
&& !self.fully_moved.contains(place.local)
|
&& !self.fully_moved.contains(place.local)
|
||||||
{
|
{
|
||||||
|
|
|
@ -279,7 +279,8 @@ fn inject_mcdc_statements<'tcx>(
|
||||||
basic_coverage_blocks: &CoverageGraph,
|
basic_coverage_blocks: &CoverageGraph,
|
||||||
extracted_mappings: &ExtractedMappings,
|
extracted_mappings: &ExtractedMappings,
|
||||||
) {
|
) {
|
||||||
// Inject test vector update first because `inject_statement` always insert new statement at head.
|
// Inject test vector update first because `inject_statement` always insert new statement at
|
||||||
|
// head.
|
||||||
for &mappings::MCDCDecision {
|
for &mappings::MCDCDecision {
|
||||||
span: _,
|
span: _,
|
||||||
ref end_bcbs,
|
ref end_bcbs,
|
||||||
|
|
|
@ -647,7 +647,8 @@ fn try_write_constant<'tcx>(
|
||||||
ty::FnDef(..) => {}
|
ty::FnDef(..) => {}
|
||||||
|
|
||||||
// Those are scalars, must be handled above.
|
// Those are scalars, must be handled above.
|
||||||
ty::Bool | ty::Int(_) | ty::Uint(_) | ty::Float(_) | ty::Char => throw_machine_stop_str!("primitive type with provenance"),
|
ty::Bool | ty::Int(_) | ty::Uint(_) | ty::Float(_) | ty::Char =>
|
||||||
|
throw_machine_stop_str!("primitive type with provenance"),
|
||||||
|
|
||||||
ty::Tuple(elem_tys) => {
|
ty::Tuple(elem_tys) => {
|
||||||
for (i, elem) in elem_tys.iter().enumerate() {
|
for (i, elem) in elem_tys.iter().enumerate() {
|
||||||
|
|
|
@ -42,9 +42,9 @@ impl<'tcx> Visitor<'tcx> for DeduceReadOnly {
|
||||||
}
|
}
|
||||||
PlaceContext::NonMutatingUse(NonMutatingUseContext::RawBorrow) => {
|
PlaceContext::NonMutatingUse(NonMutatingUseContext::RawBorrow) => {
|
||||||
// Whether mutating though a `&raw const` is allowed is still undecided, so we
|
// Whether mutating though a `&raw const` is allowed is still undecided, so we
|
||||||
// disable any sketchy `readonly` optimizations for now.
|
// disable any sketchy `readonly` optimizations for now. But we only need to do
|
||||||
// But we only need to do this if the pointer would point into the argument.
|
// this if the pointer would point into the argument. IOW: for indirect places,
|
||||||
// IOW: for indirect places, like `&raw (*local).field`, this surely cannot mutate `local`.
|
// like `&raw (*local).field`, this surely cannot mutate `local`.
|
||||||
!place.is_indirect()
|
!place.is_indirect()
|
||||||
}
|
}
|
||||||
PlaceContext::NonMutatingUse(..) | PlaceContext::NonUse(..) => {
|
PlaceContext::NonMutatingUse(..) | PlaceContext::NonUse(..) => {
|
||||||
|
|
|
@ -69,8 +69,8 @@ fn find_duplicates(body: &Body<'_>) -> FxHashMap<BasicBlock, BasicBlock> {
|
||||||
// For example, if bb1, bb2 and bb3 are duplicates, we will first insert bb3 in same_hashes.
|
// For example, if bb1, bb2 and bb3 are duplicates, we will first insert bb3 in same_hashes.
|
||||||
// Then we will see that bb2 is a duplicate of bb3,
|
// Then we will see that bb2 is a duplicate of bb3,
|
||||||
// and insert bb2 with the replacement bb3 in the duplicates list.
|
// and insert bb2 with the replacement bb3 in the duplicates list.
|
||||||
// When we see bb1, we see that it is a duplicate of bb3, and therefore insert it in the duplicates list
|
// When we see bb1, we see that it is a duplicate of bb3, and therefore insert it in the
|
||||||
// with replacement bb3.
|
// duplicates list with replacement bb3.
|
||||||
// When the duplicates are removed, we will end up with only bb3.
|
// When the duplicates are removed, we will end up with only bb3.
|
||||||
for (bb, bbd) in body.basic_blocks.iter_enumerated().rev().filter(|(_, bbd)| !bbd.is_cleanup) {
|
for (bb, bbd) in body.basic_blocks.iter_enumerated().rev().filter(|(_, bbd)| !bbd.is_cleanup) {
|
||||||
// Basic blocks can get really big, so to avoid checking for duplicates in basic blocks
|
// Basic blocks can get really big, so to avoid checking for duplicates in basic blocks
|
||||||
|
@ -105,7 +105,8 @@ struct BasicBlockHashable<'tcx, 'a> {
|
||||||
impl Hash for BasicBlockHashable<'_, '_> {
|
impl Hash for BasicBlockHashable<'_, '_> {
|
||||||
fn hash<H: Hasher>(&self, state: &mut H) {
|
fn hash<H: Hasher>(&self, state: &mut H) {
|
||||||
hash_statements(state, self.basic_block_data.statements.iter());
|
hash_statements(state, self.basic_block_data.statements.iter());
|
||||||
// Note that since we only hash the kind, we lose span information if we deduplicate the blocks
|
// Note that since we only hash the kind, we lose span information if we deduplicate the
|
||||||
|
// blocks.
|
||||||
self.basic_block_data.terminator().kind.hash(state);
|
self.basic_block_data.terminator().kind.hash(state);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -242,7 +242,7 @@ impl<'tcx> crate::MirPass<'tcx> for DestinationPropagation {
|
||||||
}
|
}
|
||||||
round_count += 1;
|
round_count += 1;
|
||||||
|
|
||||||
apply_merges(body, tcx, &merges, &merged_locals);
|
apply_merges(body, tcx, merges, merged_locals);
|
||||||
}
|
}
|
||||||
|
|
||||||
trace!(round_count);
|
trace!(round_count);
|
||||||
|
@ -281,20 +281,20 @@ struct Candidates {
|
||||||
fn apply_merges<'tcx>(
|
fn apply_merges<'tcx>(
|
||||||
body: &mut Body<'tcx>,
|
body: &mut Body<'tcx>,
|
||||||
tcx: TyCtxt<'tcx>,
|
tcx: TyCtxt<'tcx>,
|
||||||
merges: &FxIndexMap<Local, Local>,
|
merges: FxIndexMap<Local, Local>,
|
||||||
merged_locals: &BitSet<Local>,
|
merged_locals: BitSet<Local>,
|
||||||
) {
|
) {
|
||||||
let mut merger = Merger { tcx, merges, merged_locals };
|
let mut merger = Merger { tcx, merges, merged_locals };
|
||||||
merger.visit_body_preserves_cfg(body);
|
merger.visit_body_preserves_cfg(body);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct Merger<'a, 'tcx> {
|
struct Merger<'tcx> {
|
||||||
tcx: TyCtxt<'tcx>,
|
tcx: TyCtxt<'tcx>,
|
||||||
merges: &'a FxIndexMap<Local, Local>,
|
merges: FxIndexMap<Local, Local>,
|
||||||
merged_locals: &'a BitSet<Local>,
|
merged_locals: BitSet<Local>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a, 'tcx> MutVisitor<'tcx> for Merger<'a, 'tcx> {
|
impl<'tcx> MutVisitor<'tcx> for Merger<'tcx> {
|
||||||
fn tcx(&self) -> TyCtxt<'tcx> {
|
fn tcx(&self) -> TyCtxt<'tcx> {
|
||||||
self.tcx
|
self.tcx
|
||||||
}
|
}
|
||||||
|
|
|
@ -261,8 +261,8 @@ fn evaluate_candidate<'tcx>(
|
||||||
// };
|
// };
|
||||||
// ```
|
// ```
|
||||||
//
|
//
|
||||||
// Hoisting the `discriminant(Q)` out of the `A` arm causes us to compute the discriminant of an
|
// Hoisting the `discriminant(Q)` out of the `A` arm causes us to compute the discriminant
|
||||||
// invalid value, which is UB.
|
// of an invalid value, which is UB.
|
||||||
// In order to fix this, **we would either need to show that the discriminant computation of
|
// In order to fix this, **we would either need to show that the discriminant computation of
|
||||||
// `place` is computed in all branches**.
|
// `place` is computed in all branches**.
|
||||||
// FIXME(#95162) For the moment, we adopt a conservative approach and
|
// FIXME(#95162) For the moment, we adopt a conservative approach and
|
||||||
|
|
|
@ -20,8 +20,8 @@ use tracing::{debug, instrument};
|
||||||
use crate::deref_separator::deref_finder;
|
use crate::deref_separator::deref_finder;
|
||||||
|
|
||||||
/// During MIR building, Drop terminators are inserted in every place where a drop may occur.
|
/// During MIR building, Drop terminators are inserted in every place where a drop may occur.
|
||||||
/// However, in this phase, the presence of these terminators does not guarantee that a destructor will run,
|
/// However, in this phase, the presence of these terminators does not guarantee that a destructor
|
||||||
/// as the target of the drop may be uninitialized.
|
/// will run, as the target of the drop may be uninitialized.
|
||||||
/// In general, the compiler cannot determine at compile time whether a destructor will run or not.
|
/// In general, the compiler cannot determine at compile time whether a destructor will run or not.
|
||||||
///
|
///
|
||||||
/// At a high level, this pass refines Drop to only run the destructor if the
|
/// At a high level, this pass refines Drop to only run the destructor if the
|
||||||
|
@ -30,10 +30,10 @@ use crate::deref_separator::deref_finder;
|
||||||
/// Once this is complete, Drop terminators in the MIR correspond to a call to the "drop glue" or
|
/// Once this is complete, Drop terminators in the MIR correspond to a call to the "drop glue" or
|
||||||
/// "drop shim" for the type of the dropped place.
|
/// "drop shim" for the type of the dropped place.
|
||||||
///
|
///
|
||||||
/// This pass relies on dropped places having an associated move path, which is then used to determine
|
/// This pass relies on dropped places having an associated move path, which is then used to
|
||||||
/// the initialization status of the place and its descendants.
|
/// determine the initialization status of the place and its descendants.
|
||||||
/// It's worth noting that a MIR containing a Drop without an associated move path is probably ill formed,
|
/// It's worth noting that a MIR containing a Drop without an associated move path is probably ill
|
||||||
/// as it would allow running a destructor on a place behind a reference:
|
/// formed, as it would allow running a destructor on a place behind a reference:
|
||||||
///
|
///
|
||||||
/// ```text
|
/// ```text
|
||||||
/// fn drop_term<T>(t: &mut T) {
|
/// fn drop_term<T>(t: &mut T) {
|
||||||
|
@ -377,8 +377,8 @@ impl<'a, 'tcx> ElaborateDropsCtxt<'a, 'tcx> {
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
// A drop and replace behind a pointer/array/whatever.
|
// A drop and replace behind a pointer/array/whatever.
|
||||||
// The borrow checker requires that these locations are initialized before the assignment,
|
// The borrow checker requires that these locations are initialized before the
|
||||||
// so we just leave an unconditional drop.
|
// assignment, so we just leave an unconditional drop.
|
||||||
assert!(!data.is_cleanup);
|
assert!(!data.is_cleanup);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -60,8 +60,9 @@ fn has_ffi_unwind_calls(tcx: TyCtxt<'_>, local_def_id: LocalDefId) -> bool {
|
||||||
let fn_def_id = match ty.kind() {
|
let fn_def_id = match ty.kind() {
|
||||||
ty::FnPtr(..) => None,
|
ty::FnPtr(..) => None,
|
||||||
&ty::FnDef(def_id, _) => {
|
&ty::FnDef(def_id, _) => {
|
||||||
// Rust calls cannot themselves create foreign unwinds (even if they use a non-Rust ABI).
|
// Rust calls cannot themselves create foreign unwinds (even if they use a non-Rust
|
||||||
// So the leak of the foreign unwind into Rust can only be elsewhere, not here.
|
// ABI). So the leak of the foreign unwind into Rust can only be elsewhere, not
|
||||||
|
// here.
|
||||||
if !tcx.is_foreign_item(def_id) {
|
if !tcx.is_foreign_item(def_id) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
|
@ -92,8 +92,8 @@ impl<'tcx> FunctionItemRefChecker<'_, 'tcx> {
|
||||||
{
|
{
|
||||||
let mut span = self.nth_arg_span(args, arg_num);
|
let mut span = self.nth_arg_span(args, arg_num);
|
||||||
if span.from_expansion() {
|
if span.from_expansion() {
|
||||||
// The operand's ctxt wouldn't display the lint since it's inside a macro so
|
// The operand's ctxt wouldn't display the lint since it's
|
||||||
// we have to use the callsite's ctxt.
|
// inside a macro so we have to use the callsite's ctxt.
|
||||||
let callsite_ctxt = span.source_callsite().ctxt();
|
let callsite_ctxt = span.source_callsite().ctxt();
|
||||||
span = span.with_ctxt(callsite_ctxt);
|
span = span.with_ctxt(callsite_ctxt);
|
||||||
}
|
}
|
||||||
|
|
|
@ -119,53 +119,50 @@ impl<'tcx> crate::MirPass<'tcx> for GVN {
|
||||||
#[instrument(level = "trace", skip(self, tcx, body))]
|
#[instrument(level = "trace", skip(self, tcx, body))]
|
||||||
fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
|
fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
|
||||||
debug!(def_id = ?body.source.def_id());
|
debug!(def_id = ?body.source.def_id());
|
||||||
propagate_ssa(tcx, body);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn propagate_ssa<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
|
let param_env = tcx.param_env_reveal_all_normalized(body.source.def_id());
|
||||||
let param_env = tcx.param_env_reveal_all_normalized(body.source.def_id());
|
let ssa = SsaLocals::new(tcx, body, param_env);
|
||||||
let ssa = SsaLocals::new(tcx, body, param_env);
|
// Clone dominators because we need them while mutating the body.
|
||||||
// Clone dominators as we need them while mutating the body.
|
let dominators = body.basic_blocks.dominators().clone();
|
||||||
let dominators = body.basic_blocks.dominators().clone();
|
|
||||||
|
|
||||||
let mut state = VnState::new(tcx, body, param_env, &ssa, &dominators, &body.local_decls);
|
let mut state = VnState::new(tcx, body, param_env, &ssa, dominators, &body.local_decls);
|
||||||
ssa.for_each_assignment_mut(
|
ssa.for_each_assignment_mut(
|
||||||
body.basic_blocks.as_mut_preserves_cfg(),
|
body.basic_blocks.as_mut_preserves_cfg(),
|
||||||
|local, value, location| {
|
|local, value, location| {
|
||||||
let value = match value {
|
let value = match value {
|
||||||
// We do not know anything of this assigned value.
|
// We do not know anything of this assigned value.
|
||||||
AssignedValue::Arg | AssignedValue::Terminator => None,
|
AssignedValue::Arg | AssignedValue::Terminator => None,
|
||||||
// Try to get some insight.
|
// Try to get some insight.
|
||||||
AssignedValue::Rvalue(rvalue) => {
|
AssignedValue::Rvalue(rvalue) => {
|
||||||
let value = state.simplify_rvalue(rvalue, location);
|
let value = state.simplify_rvalue(rvalue, location);
|
||||||
// FIXME(#112651) `rvalue` may have a subtype to `local`. We can only mark `local` as
|
// FIXME(#112651) `rvalue` may have a subtype to `local`. We can only mark
|
||||||
// reusable if we have an exact type match.
|
// `local` as reusable if we have an exact type match.
|
||||||
if state.local_decls[local].ty != rvalue.ty(state.local_decls, tcx) {
|
if state.local_decls[local].ty != rvalue.ty(state.local_decls, tcx) {
|
||||||
return;
|
return;
|
||||||
|
}
|
||||||
|
value
|
||||||
}
|
}
|
||||||
value
|
};
|
||||||
}
|
// `next_opaque` is `Some`, so `new_opaque` must return `Some`.
|
||||||
};
|
let value = value.or_else(|| state.new_opaque()).unwrap();
|
||||||
// `next_opaque` is `Some`, so `new_opaque` must return `Some`.
|
state.assign(local, value);
|
||||||
let value = value.or_else(|| state.new_opaque()).unwrap();
|
},
|
||||||
state.assign(local, value);
|
);
|
||||||
},
|
|
||||||
);
|
|
||||||
|
|
||||||
// Stop creating opaques during replacement as it is useless.
|
// Stop creating opaques during replacement as it is useless.
|
||||||
state.next_opaque = None;
|
state.next_opaque = None;
|
||||||
|
|
||||||
let reverse_postorder = body.basic_blocks.reverse_postorder().to_vec();
|
let reverse_postorder = body.basic_blocks.reverse_postorder().to_vec();
|
||||||
for bb in reverse_postorder {
|
for bb in reverse_postorder {
|
||||||
let data = &mut body.basic_blocks.as_mut_preserves_cfg()[bb];
|
let data = &mut body.basic_blocks.as_mut_preserves_cfg()[bb];
|
||||||
state.visit_basic_block_data(bb, data);
|
state.visit_basic_block_data(bb, data);
|
||||||
|
}
|
||||||
|
|
||||||
|
// For each local that is reused (`y` above), we remove its storage statements do avoid any
|
||||||
|
// difficulty. Those locals are SSA, so should be easy to optimize by LLVM without storage
|
||||||
|
// statements.
|
||||||
|
StorageRemover { tcx, reused_locals: state.reused_locals }.visit_body_preserves_cfg(body);
|
||||||
}
|
}
|
||||||
|
|
||||||
// For each local that is reused (`y` above), we remove its storage statements do avoid any
|
|
||||||
// difficulty. Those locals are SSA, so should be easy to optimize by LLVM without storage
|
|
||||||
// statements.
|
|
||||||
StorageRemover { tcx, reused_locals: state.reused_locals }.visit_body_preserves_cfg(body);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
newtype_index! {
|
newtype_index! {
|
||||||
|
@ -261,7 +258,7 @@ struct VnState<'body, 'tcx> {
|
||||||
/// Cache the value of the `unsized_locals` features, to avoid fetching it repeatedly in a loop.
|
/// Cache the value of the `unsized_locals` features, to avoid fetching it repeatedly in a loop.
|
||||||
feature_unsized_locals: bool,
|
feature_unsized_locals: bool,
|
||||||
ssa: &'body SsaLocals,
|
ssa: &'body SsaLocals,
|
||||||
dominators: &'body Dominators<BasicBlock>,
|
dominators: Dominators<BasicBlock>,
|
||||||
reused_locals: BitSet<Local>,
|
reused_locals: BitSet<Local>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -271,7 +268,7 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
|
||||||
body: &Body<'tcx>,
|
body: &Body<'tcx>,
|
||||||
param_env: ty::ParamEnv<'tcx>,
|
param_env: ty::ParamEnv<'tcx>,
|
||||||
ssa: &'body SsaLocals,
|
ssa: &'body SsaLocals,
|
||||||
dominators: &'body Dominators<BasicBlock>,
|
dominators: Dominators<BasicBlock>,
|
||||||
local_decls: &'body LocalDecls<'tcx>,
|
local_decls: &'body LocalDecls<'tcx>,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
// Compute a rough estimate of the number of values in the body from the number of
|
// Compute a rough estimate of the number of values in the body from the number of
|
||||||
|
@ -480,7 +477,8 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
|
||||||
let pointer = self.evaluated[local].as_ref()?;
|
let pointer = self.evaluated[local].as_ref()?;
|
||||||
let mut mplace = self.ecx.deref_pointer(pointer).ok()?;
|
let mut mplace = self.ecx.deref_pointer(pointer).ok()?;
|
||||||
for proj in place.projection.iter().skip(1) {
|
for proj in place.projection.iter().skip(1) {
|
||||||
// We have no call stack to associate a local with a value, so we cannot interpret indexing.
|
// We have no call stack to associate a local with a value, so we cannot
|
||||||
|
// interpret indexing.
|
||||||
if matches!(proj, ProjectionElem::Index(_)) {
|
if matches!(proj, ProjectionElem::Index(_)) {
|
||||||
return None;
|
return None;
|
||||||
}
|
}
|
||||||
|
@ -1382,7 +1380,8 @@ fn op_to_prop_const<'tcx>(
|
||||||
return Some(ConstValue::ZeroSized);
|
return Some(ConstValue::ZeroSized);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Do not synthetize too large constants. Codegen will just memcpy them, which we'd like to avoid.
|
// Do not synthetize too large constants. Codegen will just memcpy them, which we'd like to
|
||||||
|
// avoid.
|
||||||
if !matches!(op.layout.abi, Abi::Scalar(..) | Abi::ScalarPair(..)) {
|
if !matches!(op.layout.abi, Abi::Scalar(..) | Abi::ScalarPair(..)) {
|
||||||
return None;
|
return None;
|
||||||
}
|
}
|
||||||
|
@ -1491,7 +1490,7 @@ impl<'tcx> VnState<'_, 'tcx> {
|
||||||
let other = self.rev_locals.get(index)?;
|
let other = self.rev_locals.get(index)?;
|
||||||
other
|
other
|
||||||
.iter()
|
.iter()
|
||||||
.find(|&&other| self.ssa.assignment_dominates(self.dominators, other, loc))
|
.find(|&&other| self.ssa.assignment_dominates(&self.dominators, other, loc))
|
||||||
.copied()
|
.copied()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -568,7 +568,8 @@ impl<'tcx> Inliner<'tcx> {
|
||||||
// if the no-attribute function ends up with the same instruction set anyway.
|
// if the no-attribute function ends up with the same instruction set anyway.
|
||||||
return Err("Cannot move inline-asm across instruction sets");
|
return Err("Cannot move inline-asm across instruction sets");
|
||||||
} else if let TerminatorKind::TailCall { .. } = term.kind {
|
} else if let TerminatorKind::TailCall { .. } = term.kind {
|
||||||
// FIXME(explicit_tail_calls): figure out how exactly functions containing tail calls can be inlined (and if they even should)
|
// FIXME(explicit_tail_calls): figure out how exactly functions containing tail
|
||||||
|
// calls can be inlined (and if they even should)
|
||||||
return Err("can't inline functions with tail calls");
|
return Err("can't inline functions with tail calls");
|
||||||
} else {
|
} else {
|
||||||
work_list.extend(term.successors())
|
work_list.extend(term.successors())
|
||||||
|
|
|
@ -18,19 +18,13 @@ pub(super) enum InstSimplify {
|
||||||
AfterSimplifyCfg,
|
AfterSimplifyCfg,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl InstSimplify {
|
impl<'tcx> crate::MirPass<'tcx> for InstSimplify {
|
||||||
fn name(&self) -> &'static str {
|
fn name(&self) -> &'static str {
|
||||||
match self {
|
match self {
|
||||||
InstSimplify::BeforeInline => "InstSimplify-before-inline",
|
InstSimplify::BeforeInline => "InstSimplify-before-inline",
|
||||||
InstSimplify::AfterSimplifyCfg => "InstSimplify-after-simplifycfg",
|
InstSimplify::AfterSimplifyCfg => "InstSimplify-after-simplifycfg",
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
impl<'tcx> crate::MirPass<'tcx> for InstSimplify {
|
|
||||||
fn name(&self) -> &'static str {
|
|
||||||
self.name()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn is_enabled(&self, sess: &rustc_session::Session) -> bool {
|
fn is_enabled(&self, sess: &rustc_session::Session) -> bool {
|
||||||
sess.mir_opt_level() > 0
|
sess.mir_opt_level() > 0
|
||||||
|
|
|
@ -78,18 +78,16 @@ impl<'tcx> crate::MirPass<'tcx> for JumpThreading {
|
||||||
}
|
}
|
||||||
|
|
||||||
let param_env = tcx.param_env_reveal_all_normalized(def_id);
|
let param_env = tcx.param_env_reveal_all_normalized(def_id);
|
||||||
let map = Map::new(tcx, body, Some(MAX_PLACES));
|
|
||||||
let loop_headers = loop_headers(body);
|
|
||||||
|
|
||||||
let arena = DroplessArena::default();
|
let arena = &DroplessArena::default();
|
||||||
let mut finder = TOFinder {
|
let mut finder = TOFinder {
|
||||||
tcx,
|
tcx,
|
||||||
param_env,
|
param_env,
|
||||||
ecx: InterpCx::new(tcx, DUMMY_SP, param_env, DummyMachine),
|
ecx: InterpCx::new(tcx, DUMMY_SP, param_env, DummyMachine),
|
||||||
body,
|
body,
|
||||||
arena: &arena,
|
arena,
|
||||||
map: &map,
|
map: Map::new(tcx, body, Some(MAX_PLACES)),
|
||||||
loop_headers: &loop_headers,
|
loop_headers: loop_headers(body),
|
||||||
opportunities: Vec::new(),
|
opportunities: Vec::new(),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -105,7 +103,7 @@ impl<'tcx> crate::MirPass<'tcx> for JumpThreading {
|
||||||
|
|
||||||
// Verify that we do not thread through a loop header.
|
// Verify that we do not thread through a loop header.
|
||||||
for to in opportunities.iter() {
|
for to in opportunities.iter() {
|
||||||
assert!(to.chain.iter().all(|&block| !loop_headers.contains(block)));
|
assert!(to.chain.iter().all(|&block| !finder.loop_headers.contains(block)));
|
||||||
}
|
}
|
||||||
OpportunitySet::new(body, opportunities).apply(body);
|
OpportunitySet::new(body, opportunities).apply(body);
|
||||||
}
|
}
|
||||||
|
@ -124,8 +122,8 @@ struct TOFinder<'tcx, 'a> {
|
||||||
param_env: ty::ParamEnv<'tcx>,
|
param_env: ty::ParamEnv<'tcx>,
|
||||||
ecx: InterpCx<'tcx, DummyMachine>,
|
ecx: InterpCx<'tcx, DummyMachine>,
|
||||||
body: &'a Body<'tcx>,
|
body: &'a Body<'tcx>,
|
||||||
map: &'a Map<'tcx>,
|
map: Map<'tcx>,
|
||||||
loop_headers: &'a BitSet<BasicBlock>,
|
loop_headers: BitSet<BasicBlock>,
|
||||||
/// We use an arena to avoid cloning the slices when cloning `state`.
|
/// We use an arena to avoid cloning the slices when cloning `state`.
|
||||||
arena: &'a DroplessArena,
|
arena: &'a DroplessArena,
|
||||||
opportunities: Vec<ThreadingOpportunity>,
|
opportunities: Vec<ThreadingOpportunity>,
|
||||||
|
@ -223,7 +221,7 @@ impl<'tcx, 'a> TOFinder<'tcx, 'a> {
|
||||||
}))
|
}))
|
||||||
};
|
};
|
||||||
let conds = ConditionSet(conds);
|
let conds = ConditionSet(conds);
|
||||||
state.insert_value_idx(discr, conds, self.map);
|
state.insert_value_idx(discr, conds, &self.map);
|
||||||
|
|
||||||
self.find_opportunity(bb, state, cost, 0);
|
self.find_opportunity(bb, state, cost, 0);
|
||||||
}
|
}
|
||||||
|
@ -264,7 +262,7 @@ impl<'tcx, 'a> TOFinder<'tcx, 'a> {
|
||||||
// _1 = 5 // Whatever happens here, it won't change the result of a `SwitchInt`.
|
// _1 = 5 // Whatever happens here, it won't change the result of a `SwitchInt`.
|
||||||
// _1 = 6
|
// _1 = 6
|
||||||
if let Some((lhs, tail)) = self.mutated_statement(stmt) {
|
if let Some((lhs, tail)) = self.mutated_statement(stmt) {
|
||||||
state.flood_with_tail_elem(lhs.as_ref(), tail, self.map, ConditionSet::BOTTOM);
|
state.flood_with_tail_elem(lhs.as_ref(), tail, &self.map, ConditionSet::BOTTOM);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -370,7 +368,7 @@ impl<'tcx, 'a> TOFinder<'tcx, 'a> {
|
||||||
self.opportunities.push(ThreadingOpportunity { chain: vec![bb], target: c.target })
|
self.opportunities.push(ThreadingOpportunity { chain: vec![bb], target: c.target })
|
||||||
};
|
};
|
||||||
|
|
||||||
if let Some(conditions) = state.try_get_idx(lhs, self.map)
|
if let Some(conditions) = state.try_get_idx(lhs, &self.map)
|
||||||
&& let Immediate::Scalar(Scalar::Int(int)) = *rhs
|
&& let Immediate::Scalar(Scalar::Int(int)) = *rhs
|
||||||
{
|
{
|
||||||
conditions.iter_matches(int).for_each(register_opportunity);
|
conditions.iter_matches(int).for_each(register_opportunity);
|
||||||
|
@ -406,7 +404,7 @@ impl<'tcx, 'a> TOFinder<'tcx, 'a> {
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
&mut |place, op| {
|
&mut |place, op| {
|
||||||
if let Some(conditions) = state.try_get_idx(place, self.map)
|
if let Some(conditions) = state.try_get_idx(place, &self.map)
|
||||||
&& let Ok(imm) = self.ecx.read_immediate_raw(op)
|
&& let Ok(imm) = self.ecx.read_immediate_raw(op)
|
||||||
&& let Some(imm) = imm.right()
|
&& let Some(imm) = imm.right()
|
||||||
&& let Immediate::Scalar(Scalar::Int(int)) = *imm
|
&& let Immediate::Scalar(Scalar::Int(int)) = *imm
|
||||||
|
@ -441,7 +439,7 @@ impl<'tcx, 'a> TOFinder<'tcx, 'a> {
|
||||||
// Transfer the conditions on the copied rhs.
|
// Transfer the conditions on the copied rhs.
|
||||||
Operand::Move(rhs) | Operand::Copy(rhs) => {
|
Operand::Move(rhs) | Operand::Copy(rhs) => {
|
||||||
let Some(rhs) = self.map.find(rhs.as_ref()) else { return };
|
let Some(rhs) = self.map.find(rhs.as_ref()) else { return };
|
||||||
state.insert_place_idx(rhs, lhs, self.map);
|
state.insert_place_idx(rhs, lhs, &self.map);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -461,7 +459,7 @@ impl<'tcx, 'a> TOFinder<'tcx, 'a> {
|
||||||
Rvalue::CopyForDeref(rhs) => self.process_operand(bb, lhs, &Operand::Copy(*rhs), state),
|
Rvalue::CopyForDeref(rhs) => self.process_operand(bb, lhs, &Operand::Copy(*rhs), state),
|
||||||
Rvalue::Discriminant(rhs) => {
|
Rvalue::Discriminant(rhs) => {
|
||||||
let Some(rhs) = self.map.find_discr(rhs.as_ref()) else { return };
|
let Some(rhs) = self.map.find_discr(rhs.as_ref()) else { return };
|
||||||
state.insert_place_idx(rhs, lhs, self.map);
|
state.insert_place_idx(rhs, lhs, &self.map);
|
||||||
}
|
}
|
||||||
// If we expect `lhs ?= A`, we have an opportunity if we assume `constant == A`.
|
// If we expect `lhs ?= A`, we have an opportunity if we assume `constant == A`.
|
||||||
Rvalue::Aggregate(box ref kind, ref operands) => {
|
Rvalue::Aggregate(box ref kind, ref operands) => {
|
||||||
|
@ -492,10 +490,10 @@ impl<'tcx, 'a> TOFinder<'tcx, 'a> {
|
||||||
}
|
}
|
||||||
// Transfer the conditions on the copy rhs, after inversing polarity.
|
// Transfer the conditions on the copy rhs, after inversing polarity.
|
||||||
Rvalue::UnaryOp(UnOp::Not, Operand::Move(place) | Operand::Copy(place)) => {
|
Rvalue::UnaryOp(UnOp::Not, Operand::Move(place) | Operand::Copy(place)) => {
|
||||||
let Some(conditions) = state.try_get_idx(lhs, self.map) else { return };
|
let Some(conditions) = state.try_get_idx(lhs, &self.map) else { return };
|
||||||
let Some(place) = self.map.find(place.as_ref()) else { return };
|
let Some(place) = self.map.find(place.as_ref()) else { return };
|
||||||
let conds = conditions.map(self.arena, Condition::inv);
|
let conds = conditions.map(self.arena, Condition::inv);
|
||||||
state.insert_value_idx(place, conds, self.map);
|
state.insert_value_idx(place, conds, &self.map);
|
||||||
}
|
}
|
||||||
// We expect `lhs ?= A`. We found `lhs = Eq(rhs, B)`.
|
// We expect `lhs ?= A`. We found `lhs = Eq(rhs, B)`.
|
||||||
// Create a condition on `rhs ?= B`.
|
// Create a condition on `rhs ?= B`.
|
||||||
|
@ -504,7 +502,7 @@ impl<'tcx, 'a> TOFinder<'tcx, 'a> {
|
||||||
box (Operand::Move(place) | Operand::Copy(place), Operand::Constant(value))
|
box (Operand::Move(place) | Operand::Copy(place), Operand::Constant(value))
|
||||||
| box (Operand::Constant(value), Operand::Move(place) | Operand::Copy(place)),
|
| box (Operand::Constant(value), Operand::Move(place) | Operand::Copy(place)),
|
||||||
) => {
|
) => {
|
||||||
let Some(conditions) = state.try_get_idx(lhs, self.map) else { return };
|
let Some(conditions) = state.try_get_idx(lhs, &self.map) else { return };
|
||||||
let Some(place) = self.map.find(place.as_ref()) else { return };
|
let Some(place) = self.map.find(place.as_ref()) else { return };
|
||||||
let equals = match op {
|
let equals = match op {
|
||||||
BinOp::Eq => ScalarInt::TRUE,
|
BinOp::Eq => ScalarInt::TRUE,
|
||||||
|
@ -528,7 +526,7 @@ impl<'tcx, 'a> TOFinder<'tcx, 'a> {
|
||||||
polarity: if c.matches(equals) { Polarity::Eq } else { Polarity::Ne },
|
polarity: if c.matches(equals) { Polarity::Eq } else { Polarity::Ne },
|
||||||
..c
|
..c
|
||||||
});
|
});
|
||||||
state.insert_value_idx(place, conds, self.map);
|
state.insert_value_idx(place, conds, &self.map);
|
||||||
}
|
}
|
||||||
|
|
||||||
_ => {}
|
_ => {}
|
||||||
|
@ -583,7 +581,7 @@ impl<'tcx, 'a> TOFinder<'tcx, 'a> {
|
||||||
StatementKind::Intrinsic(box NonDivergingIntrinsic::Assume(
|
StatementKind::Intrinsic(box NonDivergingIntrinsic::Assume(
|
||||||
Operand::Copy(place) | Operand::Move(place),
|
Operand::Copy(place) | Operand::Move(place),
|
||||||
)) => {
|
)) => {
|
||||||
let Some(conditions) = state.try_get(place.as_ref(), self.map) else { return };
|
let Some(conditions) = state.try_get(place.as_ref(), &self.map) else { return };
|
||||||
conditions.iter_matches(ScalarInt::TRUE).for_each(register_opportunity);
|
conditions.iter_matches(ScalarInt::TRUE).for_each(register_opportunity);
|
||||||
}
|
}
|
||||||
StatementKind::Assign(box (lhs_place, rhs)) => {
|
StatementKind::Assign(box (lhs_place, rhs)) => {
|
||||||
|
@ -631,7 +629,7 @@ impl<'tcx, 'a> TOFinder<'tcx, 'a> {
|
||||||
// We can recurse through this terminator.
|
// We can recurse through this terminator.
|
||||||
let mut state = state();
|
let mut state = state();
|
||||||
if let Some(place_to_flood) = place_to_flood {
|
if let Some(place_to_flood) = place_to_flood {
|
||||||
state.flood_with(place_to_flood.as_ref(), self.map, ConditionSet::BOTTOM);
|
state.flood_with(place_to_flood.as_ref(), &self.map, ConditionSet::BOTTOM);
|
||||||
}
|
}
|
||||||
self.find_opportunity(bb, state, cost.clone(), depth + 1);
|
self.find_opportunity(bb, state, cost.clone(), depth + 1);
|
||||||
}
|
}
|
||||||
|
@ -650,7 +648,7 @@ impl<'tcx, 'a> TOFinder<'tcx, 'a> {
|
||||||
let Some(discr) = discr.place() else { return };
|
let Some(discr) = discr.place() else { return };
|
||||||
let discr_ty = discr.ty(self.body, self.tcx).ty;
|
let discr_ty = discr.ty(self.body, self.tcx).ty;
|
||||||
let Ok(discr_layout) = self.ecx.layout_of(discr_ty) else { return };
|
let Ok(discr_layout) = self.ecx.layout_of(discr_ty) else { return };
|
||||||
let Some(conditions) = state.try_get(discr.as_ref(), self.map) else { return };
|
let Some(conditions) = state.try_get(discr.as_ref(), &self.map) else { return };
|
||||||
|
|
||||||
if let Some((value, _)) = targets.iter().find(|&(_, target)| target == target_bb) {
|
if let Some((value, _)) = targets.iter().find(|&(_, target)| target == target_bb) {
|
||||||
let Some(value) = ScalarInt::try_from_uint(value, discr_layout.size) else { return };
|
let Some(value) = ScalarInt::try_from_uint(value, discr_layout.size) else { return };
|
||||||
|
|
|
@ -1,8 +1,6 @@
|
||||||
//! A lint that checks for known panics like
|
//! A lint that checks for known panics like overflows, division by zero,
|
||||||
//! overflows, division by zero,
|
//! out-of-bound access etc. Uses const propagation to determine the values of
|
||||||
//! out-of-bound access etc.
|
//! operands during checks.
|
||||||
//! Uses const propagation to determine the
|
|
||||||
//! values of operands during checks.
|
|
||||||
|
|
||||||
use std::fmt::Debug;
|
use std::fmt::Debug;
|
||||||
|
|
||||||
|
@ -562,7 +560,8 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
|
||||||
|
|
||||||
let val = self.use_ecx(|this| this.ecx.binary_op(bin_op, &left, &right))?;
|
let val = self.use_ecx(|this| this.ecx.binary_op(bin_op, &left, &right))?;
|
||||||
if matches!(val.layout.abi, Abi::ScalarPair(..)) {
|
if matches!(val.layout.abi, Abi::ScalarPair(..)) {
|
||||||
// FIXME `Value` should properly support pairs in `Immediate`... but currently it does not.
|
// FIXME `Value` should properly support pairs in `Immediate`... but currently
|
||||||
|
// it does not.
|
||||||
let (val, overflow) = val.to_pair(&self.ecx);
|
let (val, overflow) = val.to_pair(&self.ecx);
|
||||||
Value::Aggregate {
|
Value::Aggregate {
|
||||||
variant: VariantIdx::ZERO,
|
variant: VariantIdx::ZERO,
|
||||||
|
|
|
@ -16,8 +16,7 @@ use rustc_target::abi::{HasDataLayout, Size, TagEncoding, Variants};
|
||||||
/// Large([u32; 1024]),
|
/// Large([u32; 1024]),
|
||||||
/// }
|
/// }
|
||||||
/// ```
|
/// ```
|
||||||
/// Instead of emitting moves of the large variant,
|
/// Instead of emitting moves of the large variant, perform a memcpy instead.
|
||||||
/// Perform a memcpy instead.
|
|
||||||
/// Based off of [this HackMD](https://hackmd.io/@ft4bxUsFT5CEUBmRKYHr7w/rJM8BBPzD).
|
/// Based off of [this HackMD](https://hackmd.io/@ft4bxUsFT5CEUBmRKYHr7w/rJM8BBPzD).
|
||||||
///
|
///
|
||||||
/// In summary, what this does is at runtime determine which enum variant is active,
|
/// In summary, what this does is at runtime determine which enum variant is active,
|
||||||
|
@ -34,10 +33,173 @@ impl<'tcx> crate::MirPass<'tcx> for EnumSizeOpt {
|
||||||
// https://github.com/rust-lang/rust/pull/85158#issuecomment-1101836457
|
// https://github.com/rust-lang/rust/pull/85158#issuecomment-1101836457
|
||||||
sess.opts.unstable_opts.unsound_mir_opts || sess.mir_opt_level() >= 3
|
sess.opts.unstable_opts.unsound_mir_opts || sess.mir_opt_level() >= 3
|
||||||
}
|
}
|
||||||
|
|
||||||
fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
|
fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
|
||||||
// NOTE: This pass may produce different MIR based on the alignment of the target
|
// NOTE: This pass may produce different MIR based on the alignment of the target
|
||||||
// platform, but it will still be valid.
|
// platform, but it will still be valid.
|
||||||
self.optim(tcx, body);
|
|
||||||
|
let mut alloc_cache = FxHashMap::default();
|
||||||
|
let body_did = body.source.def_id();
|
||||||
|
let param_env = tcx.param_env_reveal_all_normalized(body_did);
|
||||||
|
|
||||||
|
let blocks = body.basic_blocks.as_mut();
|
||||||
|
let local_decls = &mut body.local_decls;
|
||||||
|
|
||||||
|
for bb in blocks {
|
||||||
|
bb.expand_statements(|st| {
|
||||||
|
let StatementKind::Assign(box (
|
||||||
|
lhs,
|
||||||
|
Rvalue::Use(Operand::Copy(rhs) | Operand::Move(rhs)),
|
||||||
|
)) = &st.kind
|
||||||
|
else {
|
||||||
|
return None;
|
||||||
|
};
|
||||||
|
|
||||||
|
let ty = lhs.ty(local_decls, tcx).ty;
|
||||||
|
|
||||||
|
let (adt_def, num_variants, alloc_id) =
|
||||||
|
self.candidate(tcx, param_env, ty, &mut alloc_cache)?;
|
||||||
|
|
||||||
|
let source_info = st.source_info;
|
||||||
|
let span = source_info.span;
|
||||||
|
|
||||||
|
let tmp_ty = Ty::new_array(tcx, tcx.types.usize, num_variants as u64);
|
||||||
|
let size_array_local = local_decls.push(LocalDecl::new(tmp_ty, span));
|
||||||
|
let store_live =
|
||||||
|
Statement { source_info, kind: StatementKind::StorageLive(size_array_local) };
|
||||||
|
|
||||||
|
let place = Place::from(size_array_local);
|
||||||
|
let constant_vals = ConstOperand {
|
||||||
|
span,
|
||||||
|
user_ty: None,
|
||||||
|
const_: Const::Val(
|
||||||
|
ConstValue::Indirect { alloc_id, offset: Size::ZERO },
|
||||||
|
tmp_ty,
|
||||||
|
),
|
||||||
|
};
|
||||||
|
let rval = Rvalue::Use(Operand::Constant(Box::new(constant_vals)));
|
||||||
|
let const_assign =
|
||||||
|
Statement { source_info, kind: StatementKind::Assign(Box::new((place, rval))) };
|
||||||
|
|
||||||
|
let discr_place = Place::from(
|
||||||
|
local_decls.push(LocalDecl::new(adt_def.repr().discr_type().to_ty(tcx), span)),
|
||||||
|
);
|
||||||
|
let store_discr = Statement {
|
||||||
|
source_info,
|
||||||
|
kind: StatementKind::Assign(Box::new((
|
||||||
|
discr_place,
|
||||||
|
Rvalue::Discriminant(*rhs),
|
||||||
|
))),
|
||||||
|
};
|
||||||
|
|
||||||
|
let discr_cast_place =
|
||||||
|
Place::from(local_decls.push(LocalDecl::new(tcx.types.usize, span)));
|
||||||
|
let cast_discr = Statement {
|
||||||
|
source_info,
|
||||||
|
kind: StatementKind::Assign(Box::new((
|
||||||
|
discr_cast_place,
|
||||||
|
Rvalue::Cast(
|
||||||
|
CastKind::IntToInt,
|
||||||
|
Operand::Copy(discr_place),
|
||||||
|
tcx.types.usize,
|
||||||
|
),
|
||||||
|
))),
|
||||||
|
};
|
||||||
|
|
||||||
|
let size_place =
|
||||||
|
Place::from(local_decls.push(LocalDecl::new(tcx.types.usize, span)));
|
||||||
|
let store_size = Statement {
|
||||||
|
source_info,
|
||||||
|
kind: StatementKind::Assign(Box::new((
|
||||||
|
size_place,
|
||||||
|
Rvalue::Use(Operand::Copy(Place {
|
||||||
|
local: size_array_local,
|
||||||
|
projection: tcx
|
||||||
|
.mk_place_elems(&[PlaceElem::Index(discr_cast_place.local)]),
|
||||||
|
})),
|
||||||
|
))),
|
||||||
|
};
|
||||||
|
|
||||||
|
let dst =
|
||||||
|
Place::from(local_decls.push(LocalDecl::new(Ty::new_mut_ptr(tcx, ty), span)));
|
||||||
|
let dst_ptr = Statement {
|
||||||
|
source_info,
|
||||||
|
kind: StatementKind::Assign(Box::new((
|
||||||
|
dst,
|
||||||
|
Rvalue::RawPtr(Mutability::Mut, *lhs),
|
||||||
|
))),
|
||||||
|
};
|
||||||
|
|
||||||
|
let dst_cast_ty = Ty::new_mut_ptr(tcx, tcx.types.u8);
|
||||||
|
let dst_cast_place =
|
||||||
|
Place::from(local_decls.push(LocalDecl::new(dst_cast_ty, span)));
|
||||||
|
let dst_cast = Statement {
|
||||||
|
source_info,
|
||||||
|
kind: StatementKind::Assign(Box::new((
|
||||||
|
dst_cast_place,
|
||||||
|
Rvalue::Cast(CastKind::PtrToPtr, Operand::Copy(dst), dst_cast_ty),
|
||||||
|
))),
|
||||||
|
};
|
||||||
|
|
||||||
|
let src =
|
||||||
|
Place::from(local_decls.push(LocalDecl::new(Ty::new_imm_ptr(tcx, ty), span)));
|
||||||
|
let src_ptr = Statement {
|
||||||
|
source_info,
|
||||||
|
kind: StatementKind::Assign(Box::new((
|
||||||
|
src,
|
||||||
|
Rvalue::RawPtr(Mutability::Not, *rhs),
|
||||||
|
))),
|
||||||
|
};
|
||||||
|
|
||||||
|
let src_cast_ty = Ty::new_imm_ptr(tcx, tcx.types.u8);
|
||||||
|
let src_cast_place =
|
||||||
|
Place::from(local_decls.push(LocalDecl::new(src_cast_ty, span)));
|
||||||
|
let src_cast = Statement {
|
||||||
|
source_info,
|
||||||
|
kind: StatementKind::Assign(Box::new((
|
||||||
|
src_cast_place,
|
||||||
|
Rvalue::Cast(CastKind::PtrToPtr, Operand::Copy(src), src_cast_ty),
|
||||||
|
))),
|
||||||
|
};
|
||||||
|
|
||||||
|
let deinit_old =
|
||||||
|
Statement { source_info, kind: StatementKind::Deinit(Box::new(dst)) };
|
||||||
|
|
||||||
|
let copy_bytes = Statement {
|
||||||
|
source_info,
|
||||||
|
kind: StatementKind::Intrinsic(Box::new(
|
||||||
|
NonDivergingIntrinsic::CopyNonOverlapping(CopyNonOverlapping {
|
||||||
|
src: Operand::Copy(src_cast_place),
|
||||||
|
dst: Operand::Copy(dst_cast_place),
|
||||||
|
count: Operand::Copy(size_place),
|
||||||
|
}),
|
||||||
|
)),
|
||||||
|
};
|
||||||
|
|
||||||
|
let store_dead =
|
||||||
|
Statement { source_info, kind: StatementKind::StorageDead(size_array_local) };
|
||||||
|
|
||||||
|
let iter = [
|
||||||
|
store_live,
|
||||||
|
const_assign,
|
||||||
|
store_discr,
|
||||||
|
cast_discr,
|
||||||
|
store_size,
|
||||||
|
dst_ptr,
|
||||||
|
dst_cast,
|
||||||
|
src_ptr,
|
||||||
|
src_cast,
|
||||||
|
deinit_old,
|
||||||
|
copy_bytes,
|
||||||
|
store_dead,
|
||||||
|
]
|
||||||
|
.into_iter();
|
||||||
|
|
||||||
|
st.make_nop();
|
||||||
|
|
||||||
|
Some(iter)
|
||||||
|
});
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -82,6 +244,8 @@ impl EnumSizeOpt {
|
||||||
let ptr_sized_int = data_layout.ptr_sized_integer();
|
let ptr_sized_int = data_layout.ptr_sized_integer();
|
||||||
let target_bytes = ptr_sized_int.size().bytes() as usize;
|
let target_bytes = ptr_sized_int.size().bytes() as usize;
|
||||||
let mut data = vec![0; target_bytes * num_discrs];
|
let mut data = vec![0; target_bytes * num_discrs];
|
||||||
|
|
||||||
|
// We use a macro because `$bytes` can be u32 or u64.
|
||||||
macro_rules! encode_store {
|
macro_rules! encode_store {
|
||||||
($curr_idx: expr, $endian: expr, $bytes: expr) => {
|
($curr_idx: expr, $endian: expr, $bytes: expr) => {
|
||||||
let bytes = match $endian {
|
let bytes = match $endian {
|
||||||
|
@ -116,184 +280,4 @@ impl EnumSizeOpt {
|
||||||
let alloc = tcx.reserve_and_set_memory_alloc(tcx.mk_const_alloc(alloc));
|
let alloc = tcx.reserve_and_set_memory_alloc(tcx.mk_const_alloc(alloc));
|
||||||
Some((*adt_def, num_discrs, *alloc_cache.entry(ty).or_insert(alloc)))
|
Some((*adt_def, num_discrs, *alloc_cache.entry(ty).or_insert(alloc)))
|
||||||
}
|
}
|
||||||
fn optim<'tcx>(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
|
|
||||||
let mut alloc_cache = FxHashMap::default();
|
|
||||||
let body_did = body.source.def_id();
|
|
||||||
let param_env = tcx.param_env_reveal_all_normalized(body_did);
|
|
||||||
|
|
||||||
let blocks = body.basic_blocks.as_mut();
|
|
||||||
let local_decls = &mut body.local_decls;
|
|
||||||
|
|
||||||
for bb in blocks {
|
|
||||||
bb.expand_statements(|st| {
|
|
||||||
if let StatementKind::Assign(box (
|
|
||||||
lhs,
|
|
||||||
Rvalue::Use(Operand::Copy(rhs) | Operand::Move(rhs)),
|
|
||||||
)) = &st.kind
|
|
||||||
{
|
|
||||||
let ty = lhs.ty(local_decls, tcx).ty;
|
|
||||||
|
|
||||||
let source_info = st.source_info;
|
|
||||||
let span = source_info.span;
|
|
||||||
|
|
||||||
let (adt_def, num_variants, alloc_id) =
|
|
||||||
self.candidate(tcx, param_env, ty, &mut alloc_cache)?;
|
|
||||||
|
|
||||||
let tmp_ty = Ty::new_array(tcx, tcx.types.usize, num_variants as u64);
|
|
||||||
|
|
||||||
let size_array_local = local_decls.push(LocalDecl::new(tmp_ty, span));
|
|
||||||
let store_live = Statement {
|
|
||||||
source_info,
|
|
||||||
kind: StatementKind::StorageLive(size_array_local),
|
|
||||||
};
|
|
||||||
|
|
||||||
let place = Place::from(size_array_local);
|
|
||||||
let constant_vals = ConstOperand {
|
|
||||||
span,
|
|
||||||
user_ty: None,
|
|
||||||
const_: Const::Val(
|
|
||||||
ConstValue::Indirect { alloc_id, offset: Size::ZERO },
|
|
||||||
tmp_ty,
|
|
||||||
),
|
|
||||||
};
|
|
||||||
let rval = Rvalue::Use(Operand::Constant(Box::new(constant_vals)));
|
|
||||||
|
|
||||||
let const_assign = Statement {
|
|
||||||
source_info,
|
|
||||||
kind: StatementKind::Assign(Box::new((place, rval))),
|
|
||||||
};
|
|
||||||
|
|
||||||
let discr_place = Place::from(
|
|
||||||
local_decls
|
|
||||||
.push(LocalDecl::new(adt_def.repr().discr_type().to_ty(tcx), span)),
|
|
||||||
);
|
|
||||||
|
|
||||||
let store_discr = Statement {
|
|
||||||
source_info,
|
|
||||||
kind: StatementKind::Assign(Box::new((
|
|
||||||
discr_place,
|
|
||||||
Rvalue::Discriminant(*rhs),
|
|
||||||
))),
|
|
||||||
};
|
|
||||||
|
|
||||||
let discr_cast_place =
|
|
||||||
Place::from(local_decls.push(LocalDecl::new(tcx.types.usize, span)));
|
|
||||||
|
|
||||||
let cast_discr = Statement {
|
|
||||||
source_info,
|
|
||||||
kind: StatementKind::Assign(Box::new((
|
|
||||||
discr_cast_place,
|
|
||||||
Rvalue::Cast(
|
|
||||||
CastKind::IntToInt,
|
|
||||||
Operand::Copy(discr_place),
|
|
||||||
tcx.types.usize,
|
|
||||||
),
|
|
||||||
))),
|
|
||||||
};
|
|
||||||
|
|
||||||
let size_place =
|
|
||||||
Place::from(local_decls.push(LocalDecl::new(tcx.types.usize, span)));
|
|
||||||
|
|
||||||
let store_size = Statement {
|
|
||||||
source_info,
|
|
||||||
kind: StatementKind::Assign(Box::new((
|
|
||||||
size_place,
|
|
||||||
Rvalue::Use(Operand::Copy(Place {
|
|
||||||
local: size_array_local,
|
|
||||||
projection: tcx
|
|
||||||
.mk_place_elems(&[PlaceElem::Index(discr_cast_place.local)]),
|
|
||||||
})),
|
|
||||||
))),
|
|
||||||
};
|
|
||||||
|
|
||||||
let dst = Place::from(
|
|
||||||
local_decls.push(LocalDecl::new(Ty::new_mut_ptr(tcx, ty), span)),
|
|
||||||
);
|
|
||||||
|
|
||||||
let dst_ptr = Statement {
|
|
||||||
source_info,
|
|
||||||
kind: StatementKind::Assign(Box::new((
|
|
||||||
dst,
|
|
||||||
Rvalue::RawPtr(Mutability::Mut, *lhs),
|
|
||||||
))),
|
|
||||||
};
|
|
||||||
|
|
||||||
let dst_cast_ty = Ty::new_mut_ptr(tcx, tcx.types.u8);
|
|
||||||
let dst_cast_place =
|
|
||||||
Place::from(local_decls.push(LocalDecl::new(dst_cast_ty, span)));
|
|
||||||
|
|
||||||
let dst_cast = Statement {
|
|
||||||
source_info,
|
|
||||||
kind: StatementKind::Assign(Box::new((
|
|
||||||
dst_cast_place,
|
|
||||||
Rvalue::Cast(CastKind::PtrToPtr, Operand::Copy(dst), dst_cast_ty),
|
|
||||||
))),
|
|
||||||
};
|
|
||||||
|
|
||||||
let src = Place::from(
|
|
||||||
local_decls.push(LocalDecl::new(Ty::new_imm_ptr(tcx, ty), span)),
|
|
||||||
);
|
|
||||||
|
|
||||||
let src_ptr = Statement {
|
|
||||||
source_info,
|
|
||||||
kind: StatementKind::Assign(Box::new((
|
|
||||||
src,
|
|
||||||
Rvalue::RawPtr(Mutability::Not, *rhs),
|
|
||||||
))),
|
|
||||||
};
|
|
||||||
|
|
||||||
let src_cast_ty = Ty::new_imm_ptr(tcx, tcx.types.u8);
|
|
||||||
let src_cast_place =
|
|
||||||
Place::from(local_decls.push(LocalDecl::new(src_cast_ty, span)));
|
|
||||||
|
|
||||||
let src_cast = Statement {
|
|
||||||
source_info,
|
|
||||||
kind: StatementKind::Assign(Box::new((
|
|
||||||
src_cast_place,
|
|
||||||
Rvalue::Cast(CastKind::PtrToPtr, Operand::Copy(src), src_cast_ty),
|
|
||||||
))),
|
|
||||||
};
|
|
||||||
|
|
||||||
let deinit_old =
|
|
||||||
Statement { source_info, kind: StatementKind::Deinit(Box::new(dst)) };
|
|
||||||
|
|
||||||
let copy_bytes = Statement {
|
|
||||||
source_info,
|
|
||||||
kind: StatementKind::Intrinsic(Box::new(
|
|
||||||
NonDivergingIntrinsic::CopyNonOverlapping(CopyNonOverlapping {
|
|
||||||
src: Operand::Copy(src_cast_place),
|
|
||||||
dst: Operand::Copy(dst_cast_place),
|
|
||||||
count: Operand::Copy(size_place),
|
|
||||||
}),
|
|
||||||
)),
|
|
||||||
};
|
|
||||||
|
|
||||||
let store_dead = Statement {
|
|
||||||
source_info,
|
|
||||||
kind: StatementKind::StorageDead(size_array_local),
|
|
||||||
};
|
|
||||||
let iter = [
|
|
||||||
store_live,
|
|
||||||
const_assign,
|
|
||||||
store_discr,
|
|
||||||
cast_discr,
|
|
||||||
store_size,
|
|
||||||
dst_ptr,
|
|
||||||
dst_cast,
|
|
||||||
src_ptr,
|
|
||||||
src_cast,
|
|
||||||
deinit_old,
|
|
||||||
copy_bytes,
|
|
||||||
store_dead,
|
|
||||||
]
|
|
||||||
.into_iter();
|
|
||||||
|
|
||||||
st.make_nop();
|
|
||||||
Some(iter)
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -87,6 +87,7 @@ mod match_branches;
|
||||||
mod mentioned_items;
|
mod mentioned_items;
|
||||||
mod multiple_return_terminators;
|
mod multiple_return_terminators;
|
||||||
mod nrvo;
|
mod nrvo;
|
||||||
|
mod post_drop_elaboration;
|
||||||
mod prettify;
|
mod prettify;
|
||||||
mod promote_consts;
|
mod promote_consts;
|
||||||
mod ref_prop;
|
mod ref_prop;
|
||||||
|
@ -168,8 +169,9 @@ fn remap_mir_for_const_eval_select<'tcx>(
|
||||||
let (method, place): (fn(Place<'tcx>) -> Operand<'tcx>, Place<'tcx>) =
|
let (method, place): (fn(Place<'tcx>) -> Operand<'tcx>, Place<'tcx>) =
|
||||||
match tupled_args.node {
|
match tupled_args.node {
|
||||||
Operand::Constant(_) => {
|
Operand::Constant(_) => {
|
||||||
// there is no good way of extracting a tuple arg from a constant (const generic stuff)
|
// There is no good way of extracting a tuple arg from a constant
|
||||||
// so we just create a temporary and deconstruct that.
|
// (const generic stuff) so we just create a temporary and deconstruct
|
||||||
|
// that.
|
||||||
let local = body.local_decls.push(LocalDecl::new(ty, fn_span));
|
let local = body.local_decls.push(LocalDecl::new(ty, fn_span));
|
||||||
bb.statements.push(Statement {
|
bb.statements.push(Statement {
|
||||||
source_info: SourceInfo::outermost(fn_span),
|
source_info: SourceInfo::outermost(fn_span),
|
||||||
|
@ -222,14 +224,14 @@ fn is_mir_available(tcx: TyCtxt<'_>, def_id: LocalDefId) -> bool {
|
||||||
/// MIR associated with them.
|
/// MIR associated with them.
|
||||||
fn mir_keys(tcx: TyCtxt<'_>, (): ()) -> FxIndexSet<LocalDefId> {
|
fn mir_keys(tcx: TyCtxt<'_>, (): ()) -> FxIndexSet<LocalDefId> {
|
||||||
// All body-owners have MIR associated with them.
|
// All body-owners have MIR associated with them.
|
||||||
let mut set: FxIndexSet<_> = tcx.hir().body_owners().collect();
|
let set: FxIndexSet<_> = tcx.hir().body_owners().collect();
|
||||||
|
|
||||||
// Additionally, tuple struct/variant constructors have MIR, but
|
// Additionally, tuple struct/variant constructors have MIR, but
|
||||||
// they don't have a BodyId, so we need to build them separately.
|
// they don't have a BodyId, so we need to build them separately.
|
||||||
struct GatherCtors<'a> {
|
struct GatherCtors {
|
||||||
set: &'a mut FxIndexSet<LocalDefId>,
|
set: FxIndexSet<LocalDefId>,
|
||||||
}
|
}
|
||||||
impl<'tcx> Visitor<'tcx> for GatherCtors<'_> {
|
impl<'tcx> Visitor<'tcx> for GatherCtors {
|
||||||
fn visit_variant_data(&mut self, v: &'tcx hir::VariantData<'tcx>) {
|
fn visit_variant_data(&mut self, v: &'tcx hir::VariantData<'tcx>) {
|
||||||
if let hir::VariantData::Tuple(_, _, def_id) = *v {
|
if let hir::VariantData::Tuple(_, _, def_id) = *v {
|
||||||
self.set.insert(def_id);
|
self.set.insert(def_id);
|
||||||
|
@ -237,9 +239,11 @@ fn mir_keys(tcx: TyCtxt<'_>, (): ()) -> FxIndexSet<LocalDefId> {
|
||||||
intravisit::walk_struct_def(self, v)
|
intravisit::walk_struct_def(self, v)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
tcx.hir().visit_all_item_likes_in_crate(&mut GatherCtors { set: &mut set });
|
|
||||||
|
|
||||||
set
|
let mut gather_ctors = GatherCtors { set };
|
||||||
|
tcx.hir().visit_all_item_likes_in_crate(&mut gather_ctors);
|
||||||
|
|
||||||
|
gather_ctors.set
|
||||||
}
|
}
|
||||||
|
|
||||||
fn mir_const_qualif(tcx: TyCtxt<'_>, def: LocalDefId) -> ConstQualifs {
|
fn mir_const_qualif(tcx: TyCtxt<'_>, def: LocalDefId) -> ConstQualifs {
|
||||||
|
@ -477,10 +481,13 @@ pub fn run_analysis_to_runtime_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'
|
||||||
pm::run_passes(
|
pm::run_passes(
|
||||||
tcx,
|
tcx,
|
||||||
body,
|
body,
|
||||||
&[&remove_uninit_drops::RemoveUninitDrops, &simplify::SimplifyCfg::RemoveFalseEdges],
|
&[
|
||||||
|
&remove_uninit_drops::RemoveUninitDrops,
|
||||||
|
&simplify::SimplifyCfg::RemoveFalseEdges,
|
||||||
|
&Lint(post_drop_elaboration::CheckLiveDrops),
|
||||||
|
],
|
||||||
None,
|
None,
|
||||||
);
|
);
|
||||||
check_consts::post_drop_elaboration::check_live_drops(tcx, body); // FIXME: make this a MIR lint
|
|
||||||
}
|
}
|
||||||
|
|
||||||
debug!("runtime_mir_lowering({:?})", did);
|
debug!("runtime_mir_lowering({:?})", did);
|
||||||
|
@ -509,10 +516,12 @@ fn run_analysis_cleanup_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
|
||||||
/// Returns the sequence of passes that lowers analysis to runtime MIR.
|
/// Returns the sequence of passes that lowers analysis to runtime MIR.
|
||||||
fn run_runtime_lowering_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
|
fn run_runtime_lowering_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
|
||||||
let passes: &[&dyn MirPass<'tcx>] = &[
|
let passes: &[&dyn MirPass<'tcx>] = &[
|
||||||
// These next passes must be executed together
|
// These next passes must be executed together.
|
||||||
&add_call_guards::CriticalCallEdges,
|
&add_call_guards::CriticalCallEdges,
|
||||||
&reveal_all::RevealAll, // has to be done before drop elaboration, since we need to drop opaque types, too.
|
// Must be done before drop elaboration because we need to drop opaque types, too.
|
||||||
&add_subtyping_projections::Subtyper, // calling this after reveal_all ensures that we don't deal with opaque types
|
&reveal_all::RevealAll,
|
||||||
|
// Calling this after reveal_all ensures that we don't deal with opaque types.
|
||||||
|
&add_subtyping_projections::Subtyper,
|
||||||
&elaborate_drops::ElaborateDrops,
|
&elaborate_drops::ElaborateDrops,
|
||||||
// This will remove extraneous landing pads which are no longer
|
// This will remove extraneous landing pads which are no longer
|
||||||
// necessary as well as forcing any call in a non-unwinding
|
// necessary as well as forcing any call in a non-unwinding
|
||||||
|
@ -521,8 +530,8 @@ fn run_runtime_lowering_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
|
||||||
// AddMovesForPackedDrops needs to run after drop
|
// AddMovesForPackedDrops needs to run after drop
|
||||||
// elaboration.
|
// elaboration.
|
||||||
&add_moves_for_packed_drops::AddMovesForPackedDrops,
|
&add_moves_for_packed_drops::AddMovesForPackedDrops,
|
||||||
// `AddRetag` needs to run after `ElaborateDrops` but before `ElaborateBoxDerefs`. Otherwise it should run fairly late,
|
// `AddRetag` needs to run after `ElaborateDrops` but before `ElaborateBoxDerefs`.
|
||||||
// but before optimizations begin.
|
// Otherwise it should run fairly late, but before optimizations begin.
|
||||||
&add_retag::AddRetag,
|
&add_retag::AddRetag,
|
||||||
&elaborate_box_derefs::ElaborateBoxDerefs,
|
&elaborate_box_derefs::ElaborateBoxDerefs,
|
||||||
&coroutine::StateTransform,
|
&coroutine::StateTransform,
|
||||||
|
@ -563,13 +572,15 @@ fn run_optimization_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
|
||||||
// Before inlining: trim down MIR with passes to reduce inlining work.
|
// Before inlining: trim down MIR with passes to reduce inlining work.
|
||||||
|
|
||||||
// Has to be done before inlining, otherwise actual call will be almost always inlined.
|
// Has to be done before inlining, otherwise actual call will be almost always inlined.
|
||||||
// Also simple, so can just do first
|
// Also simple, so can just do first.
|
||||||
&lower_slice_len::LowerSliceLenCalls,
|
&lower_slice_len::LowerSliceLenCalls,
|
||||||
// Perform instsimplify before inline to eliminate some trivial calls (like clone shims).
|
// Perform instsimplify before inline to eliminate some trivial calls (like clone
|
||||||
|
// shims).
|
||||||
&instsimplify::InstSimplify::BeforeInline,
|
&instsimplify::InstSimplify::BeforeInline,
|
||||||
// Perform inlining, which may add a lot of code.
|
// Perform inlining, which may add a lot of code.
|
||||||
&inline::Inline,
|
&inline::Inline,
|
||||||
// Code from other crates may have storage markers, so this needs to happen after inlining.
|
// Code from other crates may have storage markers, so this needs to happen after
|
||||||
|
// inlining.
|
||||||
&remove_storage_markers::RemoveStorageMarkers,
|
&remove_storage_markers::RemoveStorageMarkers,
|
||||||
// Inlining and instantiation may introduce ZST and useless drops.
|
// Inlining and instantiation may introduce ZST and useless drops.
|
||||||
&remove_zsts::RemoveZsts,
|
&remove_zsts::RemoveZsts,
|
||||||
|
@ -586,7 +597,8 @@ fn run_optimization_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
|
||||||
&match_branches::MatchBranchSimplification,
|
&match_branches::MatchBranchSimplification,
|
||||||
// inst combine is after MatchBranchSimplification to clean up Ne(_1, false)
|
// inst combine is after MatchBranchSimplification to clean up Ne(_1, false)
|
||||||
&multiple_return_terminators::MultipleReturnTerminators,
|
&multiple_return_terminators::MultipleReturnTerminators,
|
||||||
// After simplifycfg, it allows us to discover new opportunities for peephole optimizations.
|
// After simplifycfg, it allows us to discover new opportunities for peephole
|
||||||
|
// optimizations.
|
||||||
&instsimplify::InstSimplify::AfterSimplifyCfg,
|
&instsimplify::InstSimplify::AfterSimplifyCfg,
|
||||||
&simplify::SimplifyLocals::BeforeConstProp,
|
&simplify::SimplifyLocals::BeforeConstProp,
|
||||||
&dead_store_elimination::DeadStoreElimination::Initial,
|
&dead_store_elimination::DeadStoreElimination::Initial,
|
||||||
|
|
|
@ -13,22 +13,18 @@ impl<'tcx> crate::MirPass<'tcx> for LowerSliceLenCalls {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
|
fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
|
||||||
lower_slice_len_calls(tcx, body)
|
let language_items = tcx.lang_items();
|
||||||
}
|
let Some(slice_len_fn_item_def_id) = language_items.slice_len_fn() else {
|
||||||
}
|
// there is no lang item to compare to :)
|
||||||
|
return;
|
||||||
|
};
|
||||||
|
|
||||||
fn lower_slice_len_calls<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
|
// The one successor remains unchanged, so no need to invalidate
|
||||||
let language_items = tcx.lang_items();
|
let basic_blocks = body.basic_blocks.as_mut_preserves_cfg();
|
||||||
let Some(slice_len_fn_item_def_id) = language_items.slice_len_fn() else {
|
for block in basic_blocks {
|
||||||
// there is no lang item to compare to :)
|
// lower `<[_]>::len` calls
|
||||||
return;
|
lower_slice_len_call(block, slice_len_fn_item_def_id);
|
||||||
};
|
}
|
||||||
|
|
||||||
// The one successor remains unchanged, so no need to invalidate
|
|
||||||
let basic_blocks = body.basic_blocks.as_mut_preserves_cfg();
|
|
||||||
for block in basic_blocks {
|
|
||||||
// lower `<[_]>::len` calls
|
|
||||||
lower_slice_len_call(block, slice_len_fn_item_def_id);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -57,8 +57,9 @@ impl<'tcx> crate::MirPass<'tcx> for MatchBranchSimplification {
|
||||||
}
|
}
|
||||||
|
|
||||||
trait SimplifyMatch<'tcx> {
|
trait SimplifyMatch<'tcx> {
|
||||||
/// Simplifies a match statement, returning true if the simplification succeeds, false otherwise.
|
/// Simplifies a match statement, returning true if the simplification succeeds, false
|
||||||
/// Generic code is written here, and we generally don't need a custom implementation.
|
/// otherwise. Generic code is written here, and we generally don't need a custom
|
||||||
|
/// implementation.
|
||||||
fn simplify(
|
fn simplify(
|
||||||
&mut self,
|
&mut self,
|
||||||
tcx: TyCtxt<'tcx>,
|
tcx: TyCtxt<'tcx>,
|
||||||
|
@ -240,7 +241,8 @@ impl<'tcx> SimplifyMatch<'tcx> for SimplifyToIf {
|
||||||
// Same value in both blocks. Use statement as is.
|
// Same value in both blocks. Use statement as is.
|
||||||
patch.add_statement(parent_end, f.kind.clone());
|
patch.add_statement(parent_end, f.kind.clone());
|
||||||
} else {
|
} else {
|
||||||
// Different value between blocks. Make value conditional on switch condition.
|
// Different value between blocks. Make value conditional on switch
|
||||||
|
// condition.
|
||||||
let size = tcx.layout_of(param_env.and(discr_ty)).unwrap().size;
|
let size = tcx.layout_of(param_env.and(discr_ty)).unwrap().size;
|
||||||
let const_cmp = Operand::const_from_scalar(
|
let const_cmp = Operand::const_from_scalar(
|
||||||
tcx,
|
tcx,
|
||||||
|
@ -394,14 +396,16 @@ impl<'tcx> SimplifyMatch<'tcx> for SimplifyToExp {
|
||||||
return None;
|
return None;
|
||||||
}
|
}
|
||||||
|
|
||||||
// We first compare the two branches, and then the other branches need to fulfill the same conditions.
|
// We first compare the two branches, and then the other branches need to fulfill the same
|
||||||
|
// conditions.
|
||||||
let mut expected_transform_kinds = Vec::new();
|
let mut expected_transform_kinds = Vec::new();
|
||||||
for (f, s) in iter::zip(first_stmts, second_stmts) {
|
for (f, s) in iter::zip(first_stmts, second_stmts) {
|
||||||
let compare_type = match (&f.kind, &s.kind) {
|
let compare_type = match (&f.kind, &s.kind) {
|
||||||
// If two statements are exactly the same, we can optimize.
|
// If two statements are exactly the same, we can optimize.
|
||||||
(f_s, s_s) if f_s == s_s => ExpectedTransformKind::Same(f_s),
|
(f_s, s_s) if f_s == s_s => ExpectedTransformKind::Same(f_s),
|
||||||
|
|
||||||
// If two statements are assignments with the match values to the same place, we can optimize.
|
// If two statements are assignments with the match values to the same place, we
|
||||||
|
// can optimize.
|
||||||
(
|
(
|
||||||
StatementKind::Assign(box (lhs_f, Rvalue::Use(Operand::Constant(f_c)))),
|
StatementKind::Assign(box (lhs_f, Rvalue::Use(Operand::Constant(f_c)))),
|
||||||
StatementKind::Assign(box (lhs_s, Rvalue::Use(Operand::Constant(s_c)))),
|
StatementKind::Assign(box (lhs_s, Rvalue::Use(Operand::Constant(s_c)))),
|
||||||
|
|
|
@ -10,7 +10,7 @@ pub(super) struct MentionedItems;
|
||||||
struct MentionedItemsVisitor<'a, 'tcx> {
|
struct MentionedItemsVisitor<'a, 'tcx> {
|
||||||
tcx: TyCtxt<'tcx>,
|
tcx: TyCtxt<'tcx>,
|
||||||
body: &'a mir::Body<'tcx>,
|
body: &'a mir::Body<'tcx>,
|
||||||
mentioned_items: &'a mut Vec<Spanned<MentionedItem<'tcx>>>,
|
mentioned_items: Vec<Spanned<MentionedItem<'tcx>>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'tcx> crate::MirPass<'tcx> for MentionedItems {
|
impl<'tcx> crate::MirPass<'tcx> for MentionedItems {
|
||||||
|
@ -23,9 +23,9 @@ impl<'tcx> crate::MirPass<'tcx> for MentionedItems {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut mir::Body<'tcx>) {
|
fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut mir::Body<'tcx>) {
|
||||||
let mut mentioned_items = Vec::new();
|
let mut visitor = MentionedItemsVisitor { tcx, body, mentioned_items: Vec::new() };
|
||||||
MentionedItemsVisitor { tcx, body, mentioned_items: &mut mentioned_items }.visit_body(body);
|
visitor.visit_body(body);
|
||||||
body.set_mentioned_items(mentioned_items);
|
body.set_mentioned_items(visitor.mentioned_items);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -82,7 +82,9 @@ impl<'tcx> Visitor<'tcx> for MentionedItemsVisitor<'_, 'tcx> {
|
||||||
source_ty.builtin_deref(true).map(|t| t.kind()),
|
source_ty.builtin_deref(true).map(|t| t.kind()),
|
||||||
target_ty.builtin_deref(true).map(|t| t.kind()),
|
target_ty.builtin_deref(true).map(|t| t.kind()),
|
||||||
) {
|
) {
|
||||||
(Some(ty::Array(..)), Some(ty::Str | ty::Slice(..))) => false, // &str/&[T] unsizing
|
// &str/&[T] unsizing
|
||||||
|
(Some(ty::Array(..)), Some(ty::Str | ty::Slice(..))) => false,
|
||||||
|
|
||||||
_ => true,
|
_ => true,
|
||||||
};
|
};
|
||||||
if may_involve_vtable {
|
if may_involve_vtable {
|
||||||
|
|
13
compiler/rustc_mir_transform/src/post_drop_elaboration.rs
Normal file
13
compiler/rustc_mir_transform/src/post_drop_elaboration.rs
Normal file
|
@ -0,0 +1,13 @@
|
||||||
|
use rustc_const_eval::check_consts;
|
||||||
|
use rustc_middle::mir::*;
|
||||||
|
use rustc_middle::ty::TyCtxt;
|
||||||
|
|
||||||
|
use crate::MirLint;
|
||||||
|
|
||||||
|
pub(super) struct CheckLiveDrops;
|
||||||
|
|
||||||
|
impl<'tcx> MirLint<'tcx> for CheckLiveDrops {
|
||||||
|
fn run_lint(&self, tcx: TyCtxt<'tcx>, body: &Body<'tcx>) {
|
||||||
|
check_consts::post_drop_elaboration::check_live_drops(tcx, body);
|
||||||
|
}
|
||||||
|
}
|
|
@ -63,7 +63,7 @@ impl<'tcx> crate::MirPass<'tcx> for ReorderLocals {
|
||||||
finder.visit_basic_block_data(bb, bbd);
|
finder.visit_basic_block_data(bb, bbd);
|
||||||
}
|
}
|
||||||
|
|
||||||
// track everything in case there are some locals that we never saw,
|
// Track everything in case there are some locals that we never saw,
|
||||||
// such as in non-block things like debug info or in non-uses.
|
// such as in non-block things like debug info or in non-uses.
|
||||||
for local in body.local_decls.indices() {
|
for local in body.local_decls.indices() {
|
||||||
finder.track(local);
|
finder.track(local);
|
||||||
|
@ -87,7 +87,7 @@ impl<'tcx> crate::MirPass<'tcx> for ReorderLocals {
|
||||||
|
|
||||||
fn permute<I: rustc_index::Idx + Ord, T>(data: &mut IndexVec<I, T>, map: &IndexSlice<I, I>) {
|
fn permute<I: rustc_index::Idx + Ord, T>(data: &mut IndexVec<I, T>, map: &IndexSlice<I, I>) {
|
||||||
// FIXME: It would be nice to have a less-awkward way to apply permutations,
|
// FIXME: It would be nice to have a less-awkward way to apply permutations,
|
||||||
// but I don't know one that exists. `sort_by_cached_key` has logic for it
|
// but I don't know one that exists. `sort_by_cached_key` has logic for it
|
||||||
// internally, but not in a way that we're allowed to use here.
|
// internally, but not in a way that we're allowed to use here.
|
||||||
let mut enumerated: Vec<_> = std::mem::take(data).into_iter_enumerated().collect();
|
let mut enumerated: Vec<_> = std::mem::take(data).into_iter_enumerated().collect();
|
||||||
enumerated.sort_by_key(|p| map[p.0]);
|
enumerated.sort_by_key(|p| map[p.0]);
|
||||||
|
|
|
@ -1,16 +1,14 @@
|
||||||
//! A pass that promotes borrows of constant rvalues.
|
//! A pass that promotes borrows of constant rvalues.
|
||||||
//!
|
//!
|
||||||
//! The rvalues considered constant are trees of temps,
|
//! The rvalues considered constant are trees of temps, each with exactly one
|
||||||
//! each with exactly one initialization, and holding
|
//! initialization, and holding a constant value with no interior mutability.
|
||||||
//! a constant value with no interior mutability.
|
//! They are placed into a new MIR constant body in `promoted` and the borrow
|
||||||
//! They are placed into a new MIR constant body in
|
//! rvalue is replaced with a `Literal::Promoted` using the index into
|
||||||
//! `promoted` and the borrow rvalue is replaced with
|
//! `promoted` of that constant MIR.
|
||||||
//! a `Literal::Promoted` using the index into `promoted`
|
|
||||||
//! of that constant MIR.
|
|
||||||
//!
|
//!
|
||||||
//! This pass assumes that every use is dominated by an
|
//! This pass assumes that every use is dominated by an initialization and can
|
||||||
//! initialization and can otherwise silence errors, if
|
//! otherwise silence errors, if move analysis runs after promotion on broken
|
||||||
//! move analysis runs after promotion on broken MIR.
|
//! MIR.
|
||||||
|
|
||||||
use std::assert_matches::assert_matches;
|
use std::assert_matches::assert_matches;
|
||||||
use std::cell::Cell;
|
use std::cell::Cell;
|
||||||
|
@ -38,6 +36,7 @@ use tracing::{debug, instrument};
|
||||||
/// newly created `Constant`.
|
/// newly created `Constant`.
|
||||||
#[derive(Default)]
|
#[derive(Default)]
|
||||||
pub(super) struct PromoteTemps<'tcx> {
|
pub(super) struct PromoteTemps<'tcx> {
|
||||||
|
// Must use `Cell` because `run_pass` takes `&self`, not `&mut self`.
|
||||||
pub promoted_fragments: Cell<IndexVec<Promoted, Body<'tcx>>>,
|
pub promoted_fragments: Cell<IndexVec<Promoted, Body<'tcx>>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -386,7 +385,8 @@ impl<'tcx> Validator<'_, 'tcx> {
|
||||||
fn validate_ref(&mut self, kind: BorrowKind, place: &Place<'tcx>) -> Result<(), Unpromotable> {
|
fn validate_ref(&mut self, kind: BorrowKind, place: &Place<'tcx>) -> Result<(), Unpromotable> {
|
||||||
match kind {
|
match kind {
|
||||||
// Reject these borrow types just to be safe.
|
// Reject these borrow types just to be safe.
|
||||||
// FIXME(RalfJung): could we allow them? Should we? No point in it until we have a usecase.
|
// FIXME(RalfJung): could we allow them? Should we? No point in it until we have a
|
||||||
|
// usecase.
|
||||||
BorrowKind::Fake(_) | BorrowKind::Mut { kind: MutBorrowKind::ClosureCapture } => {
|
BorrowKind::Fake(_) | BorrowKind::Mut { kind: MutBorrowKind::ClosureCapture } => {
|
||||||
return Err(Unpromotable);
|
return Err(Unpromotable);
|
||||||
}
|
}
|
||||||
|
@ -468,7 +468,8 @@ impl<'tcx> Validator<'_, 'tcx> {
|
||||||
let lhs_ty = lhs.ty(self.body, self.tcx);
|
let lhs_ty = lhs.ty(self.body, self.tcx);
|
||||||
|
|
||||||
if let ty::RawPtr(_, _) | ty::FnPtr(..) = lhs_ty.kind() {
|
if let ty::RawPtr(_, _) | ty::FnPtr(..) = lhs_ty.kind() {
|
||||||
// Raw and fn pointer operations are not allowed inside consts and thus not promotable.
|
// Raw and fn pointer operations are not allowed inside consts and thus not
|
||||||
|
// promotable.
|
||||||
assert_matches!(
|
assert_matches!(
|
||||||
op,
|
op,
|
||||||
BinOp::Eq
|
BinOp::Eq
|
||||||
|
@ -498,7 +499,8 @@ impl<'tcx> Validator<'_, 'tcx> {
|
||||||
Some(x) if x != 0 => {} // okay
|
Some(x) if x != 0 => {} // okay
|
||||||
_ => return Err(Unpromotable), // value not known or 0 -- not okay
|
_ => return Err(Unpromotable), // value not known or 0 -- not okay
|
||||||
}
|
}
|
||||||
// Furthermore, for signed division, we also have to exclude `int::MIN / -1`.
|
// Furthermore, for signed division, we also have to exclude `int::MIN /
|
||||||
|
// -1`.
|
||||||
if lhs_ty.is_signed() {
|
if lhs_ty.is_signed() {
|
||||||
match rhs_val.map(|x| x.to_int(sz)) {
|
match rhs_val.map(|x| x.to_int(sz)) {
|
||||||
Some(-1) | None => {
|
Some(-1) | None => {
|
||||||
|
@ -512,8 +514,11 @@ impl<'tcx> Validator<'_, 'tcx> {
|
||||||
};
|
};
|
||||||
let lhs_min = sz.signed_int_min();
|
let lhs_min = sz.signed_int_min();
|
||||||
match lhs_val.map(|x| x.to_int(sz)) {
|
match lhs_val.map(|x| x.to_int(sz)) {
|
||||||
Some(x) if x != lhs_min => {} // okay
|
// okay
|
||||||
_ => return Err(Unpromotable), // value not known or int::MIN -- not okay
|
Some(x) if x != lhs_min => {}
|
||||||
|
|
||||||
|
// value not known or int::MIN -- not okay
|
||||||
|
_ => return Err(Unpromotable),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
_ => {}
|
_ => {}
|
||||||
|
@ -815,8 +820,8 @@ impl<'a, 'tcx> Promoter<'a, 'tcx> {
|
||||||
TerminatorKind::Call {
|
TerminatorKind::Call {
|
||||||
mut func, mut args, call_source: desugar, fn_span, ..
|
mut func, mut args, call_source: desugar, fn_span, ..
|
||||||
} => {
|
} => {
|
||||||
// This promoted involves a function call, so it may fail to evaluate.
|
// This promoted involves a function call, so it may fail to evaluate. Let's
|
||||||
// Let's make sure it is added to `required_consts` so that failure cannot get lost.
|
// make sure it is added to `required_consts` so that failure cannot get lost.
|
||||||
self.add_to_required = true;
|
self.add_to_required = true;
|
||||||
|
|
||||||
self.visit_operand(&mut func, loc);
|
self.visit_operand(&mut func, loc);
|
||||||
|
|
|
@ -253,11 +253,8 @@ fn compute_replacement<'tcx>(
|
||||||
|
|
||||||
debug!(?targets);
|
debug!(?targets);
|
||||||
|
|
||||||
let mut finder = ReplacementFinder {
|
let mut finder =
|
||||||
targets: &mut targets,
|
ReplacementFinder { targets, can_perform_opt, allowed_replacements: FxHashSet::default() };
|
||||||
can_perform_opt,
|
|
||||||
allowed_replacements: FxHashSet::default(),
|
|
||||||
};
|
|
||||||
let reachable_blocks = traversal::reachable_as_bitset(body);
|
let reachable_blocks = traversal::reachable_as_bitset(body);
|
||||||
for (bb, bbdata) in body.basic_blocks.iter_enumerated() {
|
for (bb, bbdata) in body.basic_blocks.iter_enumerated() {
|
||||||
// Only visit reachable blocks as we rely on dataflow.
|
// Only visit reachable blocks as we rely on dataflow.
|
||||||
|
@ -269,19 +266,19 @@ fn compute_replacement<'tcx>(
|
||||||
let allowed_replacements = finder.allowed_replacements;
|
let allowed_replacements = finder.allowed_replacements;
|
||||||
return Replacer {
|
return Replacer {
|
||||||
tcx,
|
tcx,
|
||||||
targets,
|
targets: finder.targets,
|
||||||
storage_to_remove,
|
storage_to_remove,
|
||||||
allowed_replacements,
|
allowed_replacements,
|
||||||
any_replacement: false,
|
any_replacement: false,
|
||||||
};
|
};
|
||||||
|
|
||||||
struct ReplacementFinder<'a, 'tcx, F> {
|
struct ReplacementFinder<'tcx, F> {
|
||||||
targets: &'a mut IndexVec<Local, Value<'tcx>>,
|
targets: IndexVec<Local, Value<'tcx>>,
|
||||||
can_perform_opt: F,
|
can_perform_opt: F,
|
||||||
allowed_replacements: FxHashSet<(Local, Location)>,
|
allowed_replacements: FxHashSet<(Local, Location)>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'tcx, F> Visitor<'tcx> for ReplacementFinder<'_, 'tcx, F>
|
impl<'tcx, F> Visitor<'tcx> for ReplacementFinder<'tcx, F>
|
||||||
where
|
where
|
||||||
F: FnMut(Place<'tcx>, Location) -> bool,
|
F: FnMut(Place<'tcx>, Location) -> bool,
|
||||||
{
|
{
|
||||||
|
|
|
@ -18,7 +18,61 @@ impl<'tcx> crate::MirPass<'tcx> for RemoveNoopLandingPads {
|
||||||
fn run_pass(&self, _tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
|
fn run_pass(&self, _tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
|
||||||
let def_id = body.source.def_id();
|
let def_id = body.source.def_id();
|
||||||
debug!(?def_id);
|
debug!(?def_id);
|
||||||
self.remove_nop_landing_pads(body)
|
|
||||||
|
// Skip the pass if there are no blocks with a resume terminator.
|
||||||
|
let has_resume = body
|
||||||
|
.basic_blocks
|
||||||
|
.iter_enumerated()
|
||||||
|
.any(|(_bb, block)| matches!(block.terminator().kind, TerminatorKind::UnwindResume));
|
||||||
|
if !has_resume {
|
||||||
|
debug!("remove_noop_landing_pads: no resume block in MIR");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// make sure there's a resume block without any statements
|
||||||
|
let resume_block = {
|
||||||
|
let mut patch = MirPatch::new(body);
|
||||||
|
let resume_block = patch.resume_block();
|
||||||
|
patch.apply(body);
|
||||||
|
resume_block
|
||||||
|
};
|
||||||
|
debug!("remove_noop_landing_pads: resume block is {:?}", resume_block);
|
||||||
|
|
||||||
|
let mut jumps_folded = 0;
|
||||||
|
let mut landing_pads_removed = 0;
|
||||||
|
let mut nop_landing_pads = BitSet::new_empty(body.basic_blocks.len());
|
||||||
|
|
||||||
|
// This is a post-order traversal, so that if A post-dominates B
|
||||||
|
// then A will be visited before B.
|
||||||
|
let postorder: Vec<_> = traversal::postorder(body).map(|(bb, _)| bb).collect();
|
||||||
|
for bb in postorder {
|
||||||
|
debug!(" processing {:?}", bb);
|
||||||
|
if let Some(unwind) = body[bb].terminator_mut().unwind_mut() {
|
||||||
|
if let UnwindAction::Cleanup(unwind_bb) = *unwind {
|
||||||
|
if nop_landing_pads.contains(unwind_bb) {
|
||||||
|
debug!(" removing noop landing pad");
|
||||||
|
landing_pads_removed += 1;
|
||||||
|
*unwind = UnwindAction::Continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for target in body[bb].terminator_mut().successors_mut() {
|
||||||
|
if *target != resume_block && nop_landing_pads.contains(*target) {
|
||||||
|
debug!(" folding noop jump to {:?} to resume block", target);
|
||||||
|
*target = resume_block;
|
||||||
|
jumps_folded += 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let is_nop_landing_pad = self.is_nop_landing_pad(bb, body, &nop_landing_pads);
|
||||||
|
if is_nop_landing_pad {
|
||||||
|
nop_landing_pads.insert(bb);
|
||||||
|
}
|
||||||
|
debug!(" is_nop_landing_pad({:?}) = {}", bb, is_nop_landing_pad);
|
||||||
|
}
|
||||||
|
|
||||||
|
debug!("removed {:?} jumps and {:?} landing pads", jumps_folded, landing_pads_removed);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -82,61 +136,4 @@ impl RemoveNoopLandingPads {
|
||||||
| TerminatorKind::InlineAsm { .. } => false,
|
| TerminatorKind::InlineAsm { .. } => false,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn remove_nop_landing_pads(&self, body: &mut Body<'_>) {
|
|
||||||
// Skip the pass if there are no blocks with a resume terminator.
|
|
||||||
let has_resume = body
|
|
||||||
.basic_blocks
|
|
||||||
.iter_enumerated()
|
|
||||||
.any(|(_bb, block)| matches!(block.terminator().kind, TerminatorKind::UnwindResume));
|
|
||||||
if !has_resume {
|
|
||||||
debug!("remove_noop_landing_pads: no resume block in MIR");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// make sure there's a resume block without any statements
|
|
||||||
let resume_block = {
|
|
||||||
let mut patch = MirPatch::new(body);
|
|
||||||
let resume_block = patch.resume_block();
|
|
||||||
patch.apply(body);
|
|
||||||
resume_block
|
|
||||||
};
|
|
||||||
debug!("remove_noop_landing_pads: resume block is {:?}", resume_block);
|
|
||||||
|
|
||||||
let mut jumps_folded = 0;
|
|
||||||
let mut landing_pads_removed = 0;
|
|
||||||
let mut nop_landing_pads = BitSet::new_empty(body.basic_blocks.len());
|
|
||||||
|
|
||||||
// This is a post-order traversal, so that if A post-dominates B
|
|
||||||
// then A will be visited before B.
|
|
||||||
let postorder: Vec<_> = traversal::postorder(body).map(|(bb, _)| bb).collect();
|
|
||||||
for bb in postorder {
|
|
||||||
debug!(" processing {:?}", bb);
|
|
||||||
if let Some(unwind) = body[bb].terminator_mut().unwind_mut() {
|
|
||||||
if let UnwindAction::Cleanup(unwind_bb) = *unwind {
|
|
||||||
if nop_landing_pads.contains(unwind_bb) {
|
|
||||||
debug!(" removing noop landing pad");
|
|
||||||
landing_pads_removed += 1;
|
|
||||||
*unwind = UnwindAction::Continue;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for target in body[bb].terminator_mut().successors_mut() {
|
|
||||||
if *target != resume_block && nop_landing_pads.contains(*target) {
|
|
||||||
debug!(" folding noop jump to {:?} to resume block", target);
|
|
||||||
*target = resume_block;
|
|
||||||
jumps_folded += 1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let is_nop_landing_pad = self.is_nop_landing_pad(bb, body, &nop_landing_pads);
|
|
||||||
if is_nop_landing_pad {
|
|
||||||
nop_landing_pads.insert(bb);
|
|
||||||
}
|
|
||||||
debug!(" is_nop_landing_pad({:?}) = {}", bb, is_nop_landing_pad);
|
|
||||||
}
|
|
||||||
|
|
||||||
debug!("removed {:?} jumps and {:?} landing pads", jumps_folded, landing_pads_removed);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -106,8 +106,9 @@ fn is_needs_drop_and_init<'tcx>(
|
||||||
// If its projection *is* present in `MoveData`, then the field may have been moved
|
// If its projection *is* present in `MoveData`, then the field may have been moved
|
||||||
// from separate from its parent. Recurse.
|
// from separate from its parent. Recurse.
|
||||||
adt.variants().iter_enumerated().any(|(vid, variant)| {
|
adt.variants().iter_enumerated().any(|(vid, variant)| {
|
||||||
// Enums have multiple variants, which are discriminated with a `Downcast` projection.
|
// Enums have multiple variants, which are discriminated with a `Downcast`
|
||||||
// Structs have a single variant, and don't use a `Downcast` projection.
|
// projection. Structs have a single variant, and don't use a `Downcast`
|
||||||
|
// projection.
|
||||||
let mpi = if adt.is_enum() {
|
let mpi = if adt.is_enum() {
|
||||||
let downcast =
|
let downcast =
|
||||||
move_path_children_matching(move_data, mpi, |x| x.is_downcast_to(vid));
|
move_path_children_matching(move_data, mpi, |x| x.is_downcast_to(vid));
|
||||||
|
|
|
@ -1,26 +1,21 @@
|
||||||
use rustc_middle::mir::visit::Visitor;
|
use rustc_middle::mir::visit::Visitor;
|
||||||
use rustc_middle::mir::{traversal, Body, ConstOperand, Location};
|
use rustc_middle::mir::{traversal, Body, ConstOperand, Location};
|
||||||
|
|
||||||
pub(super) struct RequiredConstsVisitor<'a, 'tcx> {
|
pub(super) struct RequiredConstsVisitor<'tcx> {
|
||||||
required_consts: &'a mut Vec<ConstOperand<'tcx>>,
|
required_consts: Vec<ConstOperand<'tcx>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a, 'tcx> RequiredConstsVisitor<'a, 'tcx> {
|
impl<'tcx> RequiredConstsVisitor<'tcx> {
|
||||||
fn new(required_consts: &'a mut Vec<ConstOperand<'tcx>>) -> Self {
|
|
||||||
RequiredConstsVisitor { required_consts }
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(super) fn compute_required_consts(body: &mut Body<'tcx>) {
|
pub(super) fn compute_required_consts(body: &mut Body<'tcx>) {
|
||||||
let mut required_consts = Vec::new();
|
let mut visitor = RequiredConstsVisitor { required_consts: Vec::new() };
|
||||||
let mut required_consts_visitor = RequiredConstsVisitor::new(&mut required_consts);
|
|
||||||
for (bb, bb_data) in traversal::reverse_postorder(&body) {
|
for (bb, bb_data) in traversal::reverse_postorder(&body) {
|
||||||
required_consts_visitor.visit_basic_block_data(bb, bb_data);
|
visitor.visit_basic_block_data(bb, bb_data);
|
||||||
}
|
}
|
||||||
body.set_required_consts(required_consts);
|
body.set_required_consts(visitor.required_consts);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'tcx> Visitor<'tcx> for RequiredConstsVisitor<'_, 'tcx> {
|
impl<'tcx> Visitor<'tcx> for RequiredConstsVisitor<'tcx> {
|
||||||
fn visit_const_operand(&mut self, constant: &ConstOperand<'tcx>, _: Location) {
|
fn visit_const_operand(&mut self, constant: &ConstOperand<'tcx>, _: Location) {
|
||||||
if constant.const_.is_required_const() {
|
if constant.const_.is_required_const() {
|
||||||
self.required_consts.push(*constant);
|
self.required_consts.push(*constant);
|
||||||
|
|
|
@ -35,9 +35,9 @@ impl<'tcx> MutVisitor<'tcx> for RevealAllVisitor<'tcx> {
|
||||||
if place.projection.iter().all(|elem| !matches!(elem, ProjectionElem::OpaqueCast(_))) {
|
if place.projection.iter().all(|elem| !matches!(elem, ProjectionElem::OpaqueCast(_))) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
// `OpaqueCast` projections are only needed if there are opaque types on which projections are performed.
|
// `OpaqueCast` projections are only needed if there are opaque types on which projections
|
||||||
// After the `RevealAll` pass, all opaque types are replaced with their hidden types, so we don't need these
|
// are performed. After the `RevealAll` pass, all opaque types are replaced with their
|
||||||
// projections anymore.
|
// hidden types, so we don't need these projections anymore.
|
||||||
place.projection = self.tcx.mk_place_elems(
|
place.projection = self.tcx.mk_place_elems(
|
||||||
&place
|
&place
|
||||||
.projection
|
.projection
|
||||||
|
|
|
@ -404,8 +404,7 @@ fn build_thread_local_shim<'tcx>(
|
||||||
let span = tcx.def_span(def_id);
|
let span = tcx.def_span(def_id);
|
||||||
let source_info = SourceInfo::outermost(span);
|
let source_info = SourceInfo::outermost(span);
|
||||||
|
|
||||||
let mut blocks = IndexVec::with_capacity(1);
|
let blocks = IndexVec::from_raw(vec![BasicBlockData {
|
||||||
blocks.push(BasicBlockData {
|
|
||||||
statements: vec![Statement {
|
statements: vec![Statement {
|
||||||
source_info,
|
source_info,
|
||||||
kind: StatementKind::Assign(Box::new((
|
kind: StatementKind::Assign(Box::new((
|
||||||
|
@ -415,7 +414,7 @@ fn build_thread_local_shim<'tcx>(
|
||||||
}],
|
}],
|
||||||
terminator: Some(Terminator { source_info, kind: TerminatorKind::Return }),
|
terminator: Some(Terminator { source_info, kind: TerminatorKind::Return }),
|
||||||
is_cleanup: false,
|
is_cleanup: false,
|
||||||
});
|
}]);
|
||||||
|
|
||||||
new_body(
|
new_body(
|
||||||
MirSource::from_instance(instance),
|
MirSource::from_instance(instance),
|
||||||
|
@ -1003,7 +1002,8 @@ fn build_fn_ptr_addr_shim<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId, self_ty: Ty<'t
|
||||||
let locals = local_decls_for_sig(&sig, span);
|
let locals = local_decls_for_sig(&sig, span);
|
||||||
|
|
||||||
let source_info = SourceInfo::outermost(span);
|
let source_info = SourceInfo::outermost(span);
|
||||||
// FIXME: use `expose_provenance` once we figure out whether function pointers have meaningful provenance.
|
// FIXME: use `expose_provenance` once we figure out whether function pointers have meaningful
|
||||||
|
// provenance.
|
||||||
let rvalue = Rvalue::Cast(
|
let rvalue = Rvalue::Cast(
|
||||||
CastKind::FnPtrToPtr,
|
CastKind::FnPtrToPtr,
|
||||||
Operand::Move(Place::from(Local::new(1))),
|
Operand::Move(Place::from(Local::new(1))),
|
||||||
|
|
|
@ -381,7 +381,29 @@ impl<'tcx> crate::MirPass<'tcx> for SimplifyLocals {
|
||||||
|
|
||||||
fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
|
fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
|
||||||
trace!("running SimplifyLocals on {:?}", body.source);
|
trace!("running SimplifyLocals on {:?}", body.source);
|
||||||
simplify_locals(body, tcx);
|
|
||||||
|
// First, we're going to get a count of *actual* uses for every `Local`.
|
||||||
|
let mut used_locals = UsedLocals::new(body);
|
||||||
|
|
||||||
|
// Next, we're going to remove any `Local` with zero actual uses. When we remove those
|
||||||
|
// `Locals`, we're also going to subtract any uses of other `Locals` from the `used_locals`
|
||||||
|
// count. For example, if we removed `_2 = discriminant(_1)`, then we'll subtract one from
|
||||||
|
// `use_counts[_1]`. That in turn might make `_1` unused, so we loop until we hit a
|
||||||
|
// fixedpoint where there are no more unused locals.
|
||||||
|
remove_unused_definitions_helper(&mut used_locals, body);
|
||||||
|
|
||||||
|
// Finally, we'll actually do the work of shrinking `body.local_decls` and remapping the
|
||||||
|
// `Local`s.
|
||||||
|
let map = make_local_map(&mut body.local_decls, &used_locals);
|
||||||
|
|
||||||
|
// Only bother running the `LocalUpdater` if we actually found locals to remove.
|
||||||
|
if map.iter().any(Option::is_none) {
|
||||||
|
// Update references to all vars and tmps now
|
||||||
|
let mut updater = LocalUpdater { map, tcx };
|
||||||
|
updater.visit_body_preserves_cfg(body);
|
||||||
|
|
||||||
|
body.local_decls.shrink_to_fit();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -397,30 +419,6 @@ pub(super) fn remove_unused_definitions<'tcx>(body: &mut Body<'tcx>) {
|
||||||
remove_unused_definitions_helper(&mut used_locals, body);
|
remove_unused_definitions_helper(&mut used_locals, body);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn simplify_locals<'tcx>(body: &mut Body<'tcx>, tcx: TyCtxt<'tcx>) {
|
|
||||||
// First, we're going to get a count of *actual* uses for every `Local`.
|
|
||||||
let mut used_locals = UsedLocals::new(body);
|
|
||||||
|
|
||||||
// Next, we're going to remove any `Local` with zero actual uses. When we remove those
|
|
||||||
// `Locals`, we're also going to subtract any uses of other `Locals` from the `used_locals`
|
|
||||||
// count. For example, if we removed `_2 = discriminant(_1)`, then we'll subtract one from
|
|
||||||
// `use_counts[_1]`. That in turn might make `_1` unused, so we loop until we hit a
|
|
||||||
// fixedpoint where there are no more unused locals.
|
|
||||||
remove_unused_definitions_helper(&mut used_locals, body);
|
|
||||||
|
|
||||||
// Finally, we'll actually do the work of shrinking `body.local_decls` and remapping the `Local`s.
|
|
||||||
let map = make_local_map(&mut body.local_decls, &used_locals);
|
|
||||||
|
|
||||||
// Only bother running the `LocalUpdater` if we actually found locals to remove.
|
|
||||||
if map.iter().any(Option::is_none) {
|
|
||||||
// Update references to all vars and tmps now
|
|
||||||
let mut updater = LocalUpdater { map, tcx };
|
|
||||||
updater.visit_body_preserves_cfg(body);
|
|
||||||
|
|
||||||
body.local_decls.shrink_to_fit();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Construct the mapping while swapping out unused stuff out from the `vec`.
|
/// Construct the mapping while swapping out unused stuff out from the `vec`.
|
||||||
fn make_local_map<V>(
|
fn make_local_map<V>(
|
||||||
local_decls: &mut IndexVec<Local, V>,
|
local_decls: &mut IndexVec<Local, V>,
|
||||||
|
|
|
@ -73,12 +73,13 @@ impl<'tcx> crate::MirPass<'tcx> for SimplifyComparisonIntegral {
|
||||||
_ => unreachable!(),
|
_ => unreachable!(),
|
||||||
}
|
}
|
||||||
|
|
||||||
// delete comparison statement if it the value being switched on was moved, which means it can not be user later on
|
// delete comparison statement if it the value being switched on was moved, which means
|
||||||
|
// it can not be user later on
|
||||||
if opt.can_remove_bin_op_stmt {
|
if opt.can_remove_bin_op_stmt {
|
||||||
bb.statements[opt.bin_op_stmt_idx].make_nop();
|
bb.statements[opt.bin_op_stmt_idx].make_nop();
|
||||||
} else {
|
} else {
|
||||||
// if the integer being compared to a const integral is being moved into the comparison,
|
// if the integer being compared to a const integral is being moved into the
|
||||||
// e.g `_2 = Eq(move _3, const 'x');`
|
// comparison, e.g `_2 = Eq(move _3, const 'x');`
|
||||||
// we want to avoid making a double move later on in the switchInt on _3.
|
// we want to avoid making a double move later on in the switchInt on _3.
|
||||||
// So to avoid `switchInt(move _3) -> ['x': bb2, otherwise: bb1];`,
|
// So to avoid `switchInt(move _3) -> ['x': bb2, otherwise: bb1];`,
|
||||||
// we convert the move in the comparison statement to a copy.
|
// we convert the move in the comparison statement to a copy.
|
||||||
|
@ -102,12 +103,15 @@ impl<'tcx> crate::MirPass<'tcx> for SimplifyComparisonIntegral {
|
||||||
|
|
||||||
// remove StorageDead (if it exists) being used in the assign of the comparison
|
// remove StorageDead (if it exists) being used in the assign of the comparison
|
||||||
for (stmt_idx, stmt) in bb.statements.iter().enumerate() {
|
for (stmt_idx, stmt) in bb.statements.iter().enumerate() {
|
||||||
if !matches!(stmt.kind, StatementKind::StorageDead(local) if local == opt.to_switch_on.local)
|
if !matches!(
|
||||||
{
|
stmt.kind,
|
||||||
|
StatementKind::StorageDead(local) if local == opt.to_switch_on.local
|
||||||
|
) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
storage_deads_to_remove.push((stmt_idx, opt.bb_idx));
|
storage_deads_to_remove.push((stmt_idx, opt.bb_idx));
|
||||||
// if we have StorageDeads to remove then make sure to insert them at the top of each target
|
// if we have StorageDeads to remove then make sure to insert them at the top of
|
||||||
|
// each target
|
||||||
for bb_idx in new_targets.all_targets() {
|
for bb_idx in new_targets.all_targets() {
|
||||||
storage_deads_to_insert.push((
|
storage_deads_to_insert.push((
|
||||||
*bb_idx,
|
*bb_idx,
|
||||||
|
@ -207,7 +211,8 @@ fn find_branch_value_info<'tcx>(
|
||||||
(Constant(branch_value), Copy(to_switch_on) | Move(to_switch_on))
|
(Constant(branch_value), Copy(to_switch_on) | Move(to_switch_on))
|
||||||
| (Copy(to_switch_on) | Move(to_switch_on), Constant(branch_value)) => {
|
| (Copy(to_switch_on) | Move(to_switch_on), Constant(branch_value)) => {
|
||||||
let branch_value_ty = branch_value.const_.ty();
|
let branch_value_ty = branch_value.const_.ty();
|
||||||
// we only want to apply this optimization if we are matching on integrals (and chars), as it is not possible to switch on floats
|
// we only want to apply this optimization if we are matching on integrals (and chars),
|
||||||
|
// as it is not possible to switch on floats
|
||||||
if !branch_value_ty.is_integral() && !branch_value_ty.is_char() {
|
if !branch_value_ty.is_integral() && !branch_value_ty.is_char() {
|
||||||
return None;
|
return None;
|
||||||
};
|
};
|
||||||
|
@ -222,7 +227,8 @@ fn find_branch_value_info<'tcx>(
|
||||||
struct OptimizationInfo<'tcx> {
|
struct OptimizationInfo<'tcx> {
|
||||||
/// Basic block to apply the optimization
|
/// Basic block to apply the optimization
|
||||||
bb_idx: BasicBlock,
|
bb_idx: BasicBlock,
|
||||||
/// Statement index of Eq/Ne assignment that can be removed. None if the assignment can not be removed - i.e the statement is used later on
|
/// Statement index of Eq/Ne assignment that can be removed. None if the assignment can not be
|
||||||
|
/// removed - i.e the statement is used later on
|
||||||
bin_op_stmt_idx: usize,
|
bin_op_stmt_idx: usize,
|
||||||
/// Can remove Eq/Ne assignment
|
/// Can remove Eq/Ne assignment
|
||||||
can_remove_bin_op_stmt: bool,
|
can_remove_bin_op_stmt: bool,
|
||||||
|
|
|
@ -156,9 +156,9 @@ impl<'tcx> crate::MirPass<'tcx> for UnreachableEnumBranching {
|
||||||
};
|
};
|
||||||
true
|
true
|
||||||
}
|
}
|
||||||
// If and only if there is a variant that does not have a branch set,
|
// If and only if there is a variant that does not have a branch set, change the
|
||||||
// change the current of otherwise as the variant branch and set otherwise to unreachable.
|
// current of otherwise as the variant branch and set otherwise to unreachable. It
|
||||||
// It transforms following code
|
// transforms following code
|
||||||
// ```rust
|
// ```rust
|
||||||
// match c {
|
// match c {
|
||||||
// Ordering::Less => 1,
|
// Ordering::Less => 1,
|
||||||
|
|
|
@ -26,7 +26,8 @@ impl crate::MirPass<'_> for UnreachablePropagation {
|
||||||
let terminator = bb_data.terminator();
|
let terminator = bb_data.terminator();
|
||||||
let is_unreachable = match &terminator.kind {
|
let is_unreachable = match &terminator.kind {
|
||||||
TerminatorKind::Unreachable => true,
|
TerminatorKind::Unreachable => true,
|
||||||
// This will unconditionally run into an unreachable and is therefore unreachable as well.
|
// This will unconditionally run into an unreachable and is therefore unreachable
|
||||||
|
// as well.
|
||||||
TerminatorKind::Goto { target } if unreachable_blocks.contains(target) => {
|
TerminatorKind::Goto { target } if unreachable_blocks.contains(target) => {
|
||||||
patch.patch_terminator(bb, TerminatorKind::Unreachable);
|
patch.patch_terminator(bb, TerminatorKind::Unreachable);
|
||||||
true
|
true
|
||||||
|
@ -85,8 +86,9 @@ fn remove_successors_from_switch<'tcx>(
|
||||||
// }
|
// }
|
||||||
// }
|
// }
|
||||||
//
|
//
|
||||||
// This generates a `switchInt() -> [0: 0, 1: 1, otherwise: unreachable]`, which allows us or LLVM to
|
// This generates a `switchInt() -> [0: 0, 1: 1, otherwise: unreachable]`, which allows us or
|
||||||
// turn it into just `x` later. Without the unreachable, such a transformation would be illegal.
|
// LLVM to turn it into just `x` later. Without the unreachable, such a transformation would be
|
||||||
|
// illegal.
|
||||||
//
|
//
|
||||||
// In order to preserve this information, we record reachable and unreachable targets as
|
// In order to preserve this information, we record reachable and unreachable targets as
|
||||||
// `Assume` statements in MIR.
|
// `Assume` statements in MIR.
|
||||||
|
|
|
@ -388,10 +388,11 @@ impl<'a, 'tcx> Visitor<'tcx> for CfgChecker<'a, 'tcx> {
|
||||||
}
|
}
|
||||||
self.check_unwind_edge(location, unwind);
|
self.check_unwind_edge(location, unwind);
|
||||||
|
|
||||||
// The code generation assumes that there are no critical call edges. The assumption
|
// The code generation assumes that there are no critical call edges. The
|
||||||
// is used to simplify inserting code that should be executed along the return edge
|
// assumption is used to simplify inserting code that should be executed along
|
||||||
// from the call. FIXME(tmiasko): Since this is a strictly code generation concern,
|
// the return edge from the call. FIXME(tmiasko): Since this is a strictly code
|
||||||
// the code generation should be responsible for handling it.
|
// generation concern, the code generation should be responsible for handling
|
||||||
|
// it.
|
||||||
if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Optimized)
|
if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Optimized)
|
||||||
&& self.is_critical_call_edge(target, unwind)
|
&& self.is_critical_call_edge(target, unwind)
|
||||||
{
|
{
|
||||||
|
@ -404,8 +405,8 @@ impl<'a, 'tcx> Visitor<'tcx> for CfgChecker<'a, 'tcx> {
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
// The call destination place and Operand::Move place used as an argument might be
|
// The call destination place and Operand::Move place used as an argument might
|
||||||
// passed by a reference to the callee. Consequently they cannot be packed.
|
// be passed by a reference to the callee. Consequently they cannot be packed.
|
||||||
if is_within_packed(self.tcx, &self.body.local_decls, destination).is_some() {
|
if is_within_packed(self.tcx, &self.body.local_decls, destination).is_some() {
|
||||||
// This is bad! The callee will expect the memory to be aligned.
|
// This is bad! The callee will expect the memory to be aligned.
|
||||||
self.fail(
|
self.fail(
|
||||||
|
@ -953,9 +954,9 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
|
||||||
}
|
}
|
||||||
AggregateKind::RawPtr(pointee_ty, mutability) => {
|
AggregateKind::RawPtr(pointee_ty, mutability) => {
|
||||||
if !matches!(self.mir_phase, MirPhase::Runtime(_)) {
|
if !matches!(self.mir_phase, MirPhase::Runtime(_)) {
|
||||||
// It would probably be fine to support this in earlier phases,
|
// It would probably be fine to support this in earlier phases, but at the
|
||||||
// but at the time of writing it's only ever introduced from intrinsic lowering,
|
// time of writing it's only ever introduced from intrinsic lowering, so
|
||||||
// so earlier things just `bug!` on it.
|
// earlier things just `bug!` on it.
|
||||||
self.fail(location, "RawPtr should be in runtime MIR only");
|
self.fail(location, "RawPtr should be in runtime MIR only");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1109,10 +1110,10 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
|
||||||
}
|
}
|
||||||
UnOp::PtrMetadata => {
|
UnOp::PtrMetadata => {
|
||||||
if !matches!(self.mir_phase, MirPhase::Runtime(_)) {
|
if !matches!(self.mir_phase, MirPhase::Runtime(_)) {
|
||||||
// It would probably be fine to support this in earlier phases,
|
// It would probably be fine to support this in earlier phases, but at
|
||||||
// but at the time of writing it's only ever introduced from intrinsic lowering
|
// the time of writing it's only ever introduced from intrinsic
|
||||||
// or other runtime-phase optimization passes,
|
// lowering or other runtime-phase optimization passes, so earlier
|
||||||
// so earlier things can just `bug!` on it.
|
// things can just `bug!` on it.
|
||||||
self.fail(location, "PtrMetadata should be in runtime MIR only");
|
self.fail(location, "PtrMetadata should be in runtime MIR only");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1506,7 +1507,8 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
|
||||||
}
|
}
|
||||||
|
|
||||||
if let TerminatorKind::TailCall { .. } = terminator.kind {
|
if let TerminatorKind::TailCall { .. } = terminator.kind {
|
||||||
// FIXME(explicit_tail_calls): implement tail-call specific checks here (such as signature matching, forbidding closures, etc)
|
// FIXME(explicit_tail_calls): implement tail-call specific checks here (such
|
||||||
|
// as signature matching, forbidding closures, etc)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
TerminatorKind::Assert { cond, .. } => {
|
TerminatorKind::Assert { cond, .. } => {
|
||||||
|
|
Loading…
Add table
Reference in a new issue