MIR: s/lv(al(ue)?)?/place in function/variable/module names.

This commit is contained in:
Eduard-Mihai Burtescu 2017-12-01 14:39:51 +02:00
parent 511743c438
commit 473f044225
60 changed files with 1047 additions and 1046 deletions

View file

@ -272,24 +272,24 @@ for mir::StatementKind<'gcx> {
mem::discriminant(self).hash_stable(hcx, hasher);
match *self {
mir::StatementKind::Assign(ref lvalue, ref rvalue) => {
lvalue.hash_stable(hcx, hasher);
mir::StatementKind::Assign(ref place, ref rvalue) => {
place.hash_stable(hcx, hasher);
rvalue.hash_stable(hcx, hasher);
}
mir::StatementKind::SetDiscriminant { ref lvalue, variant_index } => {
lvalue.hash_stable(hcx, hasher);
mir::StatementKind::SetDiscriminant { ref place, variant_index } => {
place.hash_stable(hcx, hasher);
variant_index.hash_stable(hcx, hasher);
}
mir::StatementKind::StorageLive(ref lvalue) |
mir::StatementKind::StorageDead(ref lvalue) => {
lvalue.hash_stable(hcx, hasher);
mir::StatementKind::StorageLive(ref place) |
mir::StatementKind::StorageDead(ref place) => {
place.hash_stable(hcx, hasher);
}
mir::StatementKind::EndRegion(ref region_scope) => {
region_scope.hash_stable(hcx, hasher);
}
mir::StatementKind::Validate(ref op, ref lvalues) => {
mir::StatementKind::Validate(ref op, ref places) => {
op.hash_stable(hcx, hasher);
lvalues.hash_stable(hcx, hasher);
places.hash_stable(hcx, hasher);
}
mir::StatementKind::Nop => {}
mir::StatementKind::InlineAsm { ref asm, ref outputs, ref inputs } => {
@ -309,7 +309,7 @@ impl<'gcx, T> HashStable<StableHashingContext<'gcx>>
hcx: &mut StableHashingContext<'gcx>,
hasher: &mut StableHasher<W>)
{
self.lval.hash_stable(hcx, hasher);
self.place.hash_stable(hcx, hasher);
self.ty.hash_stable(hcx, hasher);
self.re.hash_stable(hcx, hasher);
self.mutbl.hash_stable(hcx, hasher);
@ -330,8 +330,8 @@ impl<'gcx> HashStable<StableHashingContext<'gcx>> for mir::Place<'gcx> {
mir::Place::Static(ref statik) => {
statik.hash_stable(hcx, hasher);
}
mir::Place::Projection(ref lvalue_projection) => {
lvalue_projection.hash_stable(hcx, hasher);
mir::Place::Projection(ref place_projection) => {
place_projection.hash_stable(hcx, hasher);
}
}
}
@ -420,11 +420,11 @@ impl<'gcx> HashStable<StableHashingContext<'gcx>> for mir::Operand<'gcx> {
mem::discriminant(self).hash_stable(hcx, hasher);
match *self {
mir::Operand::Copy(ref lvalue) => {
lvalue.hash_stable(hcx, hasher);
mir::Operand::Copy(ref place) => {
place.hash_stable(hcx, hasher);
}
mir::Operand::Move(ref lvalue) => {
lvalue.hash_stable(hcx, hasher);
mir::Operand::Move(ref place) => {
place.hash_stable(hcx, hasher);
}
mir::Operand::Constant(ref constant) => {
constant.hash_stable(hcx, hasher);
@ -447,13 +447,13 @@ impl<'gcx> HashStable<StableHashingContext<'gcx>> for mir::Rvalue<'gcx> {
operand.hash_stable(hcx, hasher);
val.hash_stable(hcx, hasher);
}
mir::Rvalue::Ref(region, borrow_kind, ref lvalue) => {
mir::Rvalue::Ref(region, borrow_kind, ref place) => {
region.hash_stable(hcx, hasher);
borrow_kind.hash_stable(hcx, hasher);
lvalue.hash_stable(hcx, hasher);
place.hash_stable(hcx, hasher);
}
mir::Rvalue::Len(ref lvalue) => {
lvalue.hash_stable(hcx, hasher);
mir::Rvalue::Len(ref place) => {
place.hash_stable(hcx, hasher);
}
mir::Rvalue::Cast(cast_kind, ref operand, ty) => {
cast_kind.hash_stable(hcx, hasher);
@ -470,8 +470,8 @@ impl<'gcx> HashStable<StableHashingContext<'gcx>> for mir::Rvalue<'gcx> {
op.hash_stable(hcx, hasher);
operand.hash_stable(hcx, hasher);
}
mir::Rvalue::Discriminant(ref lvalue) => {
lvalue.hash_stable(hcx, hasher);
mir::Rvalue::Discriminant(ref place) => {
place.hash_stable(hcx, hasher);
}
mir::Rvalue::NullaryOp(op, ty) => {
op.hash_stable(hcx, hasher);

View file

@ -139,7 +139,7 @@ impl<'tcx> Mir<'tcx> {
upvar_decls: Vec<UpvarDecl>,
span: Span) -> Self
{
// We need `arg_count` locals, and one for the return pointer
// We need `arg_count` locals, and one for the return place
assert!(local_decls.len() >= arg_count + 1,
"expected at least {} locals, got {}", arg_count + 1, local_decls.len());
@ -200,7 +200,7 @@ impl<'tcx> Mir<'tcx> {
let index = local.0 as usize;
if index == 0 {
debug_assert!(self.local_decls[local].mutability == Mutability::Mut,
"return pointer should be mutable");
"return place should be mutable");
LocalKind::ReturnPointer
} else if index < self.arg_count + 1 {
@ -249,7 +249,7 @@ impl<'tcx> Mir<'tcx> {
}
/// Returns an iterator over all user-defined variables and compiler-generated temporaries (all
/// locals that are neither arguments nor the return pointer).
/// locals that are neither arguments nor the return place).
#[inline]
pub fn vars_and_temps_iter(&self) -> impl Iterator<Item=Local> {
let arg_count = self.arg_count;
@ -280,7 +280,7 @@ impl<'tcx> Mir<'tcx> {
/// Return the return type, it always return first element from `local_decls` array
pub fn return_ty(&self) -> Ty<'tcx> {
self.local_decls[RETURN_POINTER].ty
self.local_decls[RETURN_PLACE].ty
}
}
@ -417,7 +417,7 @@ pub enum BorrowKind {
newtype_index!(Local
{
DEBUG_FORMAT = "_{}",
const RETURN_POINTER = 0,
const RETURN_PLACE = 0,
});
/// Classifies locals into categories. See `Mir::local_kind`.
@ -436,12 +436,12 @@ pub enum LocalKind {
/// A MIR local.
///
/// This can be a binding declared by the user, a temporary inserted by the compiler, a function
/// argument, or the return pointer.
/// argument, or the return place.
#[derive(Clone, Debug, RustcEncodable, RustcDecodable)]
pub struct LocalDecl<'tcx> {
/// `let mut x` vs `let x`.
///
/// Temporaries and the return pointer are always mutable.
/// Temporaries and the return place are always mutable.
pub mutability: Mutability,
/// True if this corresponds to a user-declared local variable.
@ -520,11 +520,11 @@ impl<'tcx> LocalDecl<'tcx> {
}
}
/// Builds a `LocalDecl` for the return pointer.
/// Builds a `LocalDecl` for the return place.
///
/// This must be inserted into the `local_decls` list as the first local.
#[inline]
pub fn new_return_pointer(return_ty: Ty, span: Span) -> LocalDecl {
pub fn new_return_place(return_ty: Ty, span: Span) -> LocalDecl {
LocalDecl {
mutability: Mutability::Mut,
ty: return_ty,
@ -634,8 +634,8 @@ pub enum TerminatorKind<'tcx> {
/// continue. Emitted by build::scope::diverge_cleanup.
Resume,
/// Indicates a normal return. The return pointer lvalue should
/// have been filled in by now. This should occur at most once.
/// Indicates a normal return. The return place should have
/// been filled in by now. This should occur at most once.
Return,
/// Indicates a terminator that can never be reached.
@ -650,7 +650,7 @@ pub enum TerminatorKind<'tcx> {
/// Drop the Place and assign the new value over it. This ensures
/// that the assignment to LV occurs *even if* the destructor for
/// lvalue unwinds. Its semantics are best explained by by the
/// place unwinds. Its semantics are best explained by by the
/// elaboration:
///
/// ```
@ -878,7 +878,7 @@ impl<'tcx> TerminatorKind<'tcx> {
use self::TerminatorKind::*;
match *self {
Goto { .. } => write!(fmt, "goto"),
SwitchInt { discr: ref lv, .. } => write!(fmt, "switchInt({:?})", lv),
SwitchInt { discr: ref place, .. } => write!(fmt, "switchInt({:?})", place),
Return => write!(fmt, "return"),
GeneratorDrop => write!(fmt, "generator_drop"),
Resume => write!(fmt, "resume"),
@ -1006,7 +1006,7 @@ pub enum StatementKind<'tcx> {
Assign(Place<'tcx>, Rvalue<'tcx>),
/// Write the discriminant for a variant to the enum Place.
SetDiscriminant { lvalue: Place<'tcx>, variant_index: usize },
SetDiscriminant { place: Place<'tcx>, variant_index: usize },
/// Start a live range for the storage of the local.
StorageLive(Local),
@ -1021,7 +1021,7 @@ pub enum StatementKind<'tcx> {
inputs: Vec<Operand<'tcx>>
},
/// Assert the given lvalues to be valid inhabitants of their type. These statements are
/// Assert the given places to be valid inhabitants of their type. These statements are
/// currently only interpreted by miri and only generated when "-Z mir-emit-validate" is passed.
/// See <https://internals.rust-lang.org/t/types-as-contracts/5562/73> for more details.
Validate(ValidationOp, Vec<ValidationOperand<'tcx, Place<'tcx>>>),
@ -1038,9 +1038,9 @@ pub enum StatementKind<'tcx> {
/// `Validate` statement.
#[derive(Copy, Clone, RustcEncodable, RustcDecodable, PartialEq, Eq)]
pub enum ValidationOp {
/// Recursively traverse the lvalue following the type and validate that all type
/// Recursively traverse the place following the type and validate that all type
/// invariants are maintained. Furthermore, acquire exclusive/read-only access to the
/// memory reachable from the lvalue.
/// memory reachable from the place.
Acquire,
/// Recursive traverse the *mutable* part of the type and relinquish all exclusive
/// access.
@ -1065,7 +1065,7 @@ impl Debug for ValidationOp {
// This is generic so that it can be reused by miri
#[derive(Clone, RustcEncodable, RustcDecodable)]
pub struct ValidationOperand<'tcx, T> {
pub lval: T,
pub place: T,
pub ty: Ty<'tcx>,
pub re: Option<region::Scope>,
pub mutbl: hir::Mutability,
@ -1073,7 +1073,7 @@ pub struct ValidationOperand<'tcx, T> {
impl<'tcx, T: Debug> Debug for ValidationOperand<'tcx, T> {
fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
write!(fmt, "{:?}: {:?}", self.lval, self.ty)?;
write!(fmt, "{:?}: {:?}", self.place, self.ty)?;
if let Some(ce) = self.re {
// (reuse lifetime rendering policy from ppaux.)
write!(fmt, "/{}", ty::ReScope(ce))?;
@ -1089,14 +1089,14 @@ impl<'tcx> Debug for Statement<'tcx> {
fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
use self::StatementKind::*;
match self.kind {
Assign(ref lv, ref rv) => write!(fmt, "{:?} = {:?}", lv, rv),
Assign(ref place, ref rv) => write!(fmt, "{:?} = {:?}", place, rv),
// (reuse lifetime rendering policy from ppaux.)
EndRegion(ref ce) => write!(fmt, "EndRegion({})", ty::ReScope(*ce)),
Validate(ref op, ref lvalues) => write!(fmt, "Validate({:?}, {:?})", op, lvalues),
StorageLive(ref lv) => write!(fmt, "StorageLive({:?})", lv),
StorageDead(ref lv) => write!(fmt, "StorageDead({:?})", lv),
SetDiscriminant{lvalue: ref lv, variant_index: index} => {
write!(fmt, "discriminant({:?}) = {:?}", lv, index)
Validate(ref op, ref places) => write!(fmt, "Validate({:?}, {:?})", op, places),
StorageLive(ref place) => write!(fmt, "StorageLive({:?})", place),
StorageDead(ref place) => write!(fmt, "StorageDead({:?})", place),
SetDiscriminant { ref place, variant_index } => {
write!(fmt, "discriminant({:?}) = {:?}", place, variant_index)
},
InlineAsm { ref asm, ref outputs, ref inputs } => {
write!(fmt, "asm!({:?} : {:?} : {:?})", asm, outputs, inputs)
@ -1119,7 +1119,7 @@ pub enum Place<'tcx> {
/// static or static mut variable
Static(Box<Static<'tcx>>),
/// projection out of an lvalue (access a field, deref a pointer, etc)
/// projection out of a place (access a field, deref a pointer, etc)
Projection(Box<PlaceProjection<'tcx>>),
}
@ -1184,11 +1184,11 @@ pub enum ProjectionElem<'tcx, V, T> {
Downcast(&'tcx AdtDef, usize),
}
/// Alias for projections as they appear in lvalues, where the base is an lvalue
/// Alias for projections as they appear in places, where the base is a place
/// and the index is a local.
pub type PlaceProjection<'tcx> = Projection<'tcx, Place<'tcx>, Local, Ty<'tcx>>;
/// Alias for projections as they appear in lvalues, where the base is an lvalue
/// Alias for projections as they appear in places, where the base is a place
/// and the index is a local.
pub type PlaceElem<'tcx> = ProjectionElem<'tcx, Local, Ty<'tcx>>;
@ -1273,13 +1273,13 @@ pub struct VisibilityScopeData {
// Operands
/// These are values that can appear inside an rvalue (or an index
/// lvalue). They are intentionally limited to prevent rvalues from
/// place). They are intentionally limited to prevent rvalues from
/// being nested in one another.
#[derive(Clone, PartialEq, RustcEncodable, RustcDecodable)]
pub enum Operand<'tcx> {
/// Copy: The value must be available for use afterwards.
///
/// This implies that the type of the lvalue must be `Copy`; this is true
/// This implies that the type of the place must be `Copy`; this is true
/// by construction during build, but also checked by the MIR type checker.
Copy(Place<'tcx>),
/// Move: The value (including old borrows of it) will not be used again.
@ -1296,8 +1296,8 @@ impl<'tcx> Debug for Operand<'tcx> {
use self::Operand::*;
match *self {
Constant(ref a) => write!(fmt, "{:?}", a),
Copy(ref lv) => write!(fmt, "{:?}", lv),
Move(ref lv) => write!(fmt, "move {:?}", lv),
Copy(ref place) => write!(fmt, "{:?}", place),
Move(ref place) => write!(fmt, "move {:?}", place),
}
}
}
@ -1470,18 +1470,20 @@ impl<'tcx> Debug for Rvalue<'tcx> {
use self::Rvalue::*;
match *self {
Use(ref lvalue) => write!(fmt, "{:?}", lvalue),
Use(ref place) => write!(fmt, "{:?}", place),
Repeat(ref a, ref b) => write!(fmt, "[{:?}; {:?}]", a, b),
Len(ref a) => write!(fmt, "Len({:?})", a),
Cast(ref kind, ref lv, ref ty) => write!(fmt, "{:?} as {:?} ({:?})", lv, ty, kind),
Cast(ref kind, ref place, ref ty) => {
write!(fmt, "{:?} as {:?} ({:?})", place, ty, kind)
}
BinaryOp(ref op, ref a, ref b) => write!(fmt, "{:?}({:?}, {:?})", op, a, b),
CheckedBinaryOp(ref op, ref a, ref b) => {
write!(fmt, "Checked{:?}({:?}, {:?})", op, a, b)
}
UnaryOp(ref op, ref a) => write!(fmt, "{:?}({:?})", op, a),
Discriminant(ref lval) => write!(fmt, "discriminant({:?})", lval),
Discriminant(ref place) => write!(fmt, "discriminant({:?})", place),
NullaryOp(ref op, ref t) => write!(fmt, "{:?}({:?})", op, t),
Ref(region, borrow_kind, ref lv) => {
Ref(region, borrow_kind, ref place) => {
let kind_str = match borrow_kind {
BorrowKind::Shared => "",
BorrowKind::Mut | BorrowKind::Unique => "mut ",
@ -1496,26 +1498,26 @@ impl<'tcx> Debug for Rvalue<'tcx> {
// Do not even print 'static
"".to_owned()
};
write!(fmt, "&{}{}{:?}", region, kind_str, lv)
write!(fmt, "&{}{}{:?}", region, kind_str, place)
}
Aggregate(ref kind, ref lvs) => {
fn fmt_tuple(fmt: &mut Formatter, lvs: &[Operand]) -> fmt::Result {
Aggregate(ref kind, ref places) => {
fn fmt_tuple(fmt: &mut Formatter, places: &[Operand]) -> fmt::Result {
let mut tuple_fmt = fmt.debug_tuple("");
for lv in lvs {
tuple_fmt.field(lv);
for place in places {
tuple_fmt.field(place);
}
tuple_fmt.finish()
}
match **kind {
AggregateKind::Array(_) => write!(fmt, "{:?}", lvs),
AggregateKind::Array(_) => write!(fmt, "{:?}", places),
AggregateKind::Tuple => {
match lvs.len() {
match places.len() {
0 => write!(fmt, "()"),
1 => write!(fmt, "({:?},)", lvs[0]),
_ => fmt_tuple(fmt, lvs),
1 => write!(fmt, "({:?},)", places[0]),
_ => fmt_tuple(fmt, places),
}
}
@ -1526,11 +1528,11 @@ impl<'tcx> Debug for Rvalue<'tcx> {
match variant_def.ctor_kind {
CtorKind::Const => Ok(()),
CtorKind::Fn => fmt_tuple(fmt, lvs),
CtorKind::Fn => fmt_tuple(fmt, places),
CtorKind::Fictive => {
let mut struct_fmt = fmt.debug_struct("");
for (field, lv) in variant_def.fields.iter().zip(lvs) {
struct_fmt.field(&field.name.as_str(), lv);
for (field, place) in variant_def.fields.iter().zip(places) {
struct_fmt.field(&field.name.as_str(), place);
}
struct_fmt.finish()
}
@ -1547,9 +1549,9 @@ impl<'tcx> Debug for Rvalue<'tcx> {
let mut struct_fmt = fmt.debug_struct(&name);
tcx.with_freevars(node_id, |freevars| {
for (freevar, lv) in freevars.iter().zip(lvs) {
for (freevar, place) in freevars.iter().zip(places) {
let var_name = tcx.hir.name(freevar.var_id());
struct_fmt.field(&var_name.as_str(), lv);
struct_fmt.field(&var_name.as_str(), place);
}
});
@ -1565,14 +1567,14 @@ impl<'tcx> Debug for Rvalue<'tcx> {
let mut struct_fmt = fmt.debug_struct(&name);
tcx.with_freevars(node_id, |freevars| {
for (freevar, lv) in freevars.iter().zip(lvs) {
for (freevar, place) in freevars.iter().zip(places) {
let var_name = tcx.hir.name(freevar.var_id());
struct_fmt.field(&var_name.as_str(), lv);
struct_fmt.field(&var_name.as_str(), place);
}
struct_fmt.field("$state", &lvs[freevars.len()]);
for i in (freevars.len() + 1)..lvs.len() {
struct_fmt.field("$state", &places[freevars.len()]);
for i in (freevars.len() + 1)..places.len() {
struct_fmt.field(&format!("${}", i - freevars.len() - 1),
&lvs[i]);
&places[i]);
}
});
@ -1831,7 +1833,7 @@ impl<'tcx> TypeFoldable<'tcx> for BasicBlockData<'tcx> {
impl<'tcx> TypeFoldable<'tcx> for ValidationOperand<'tcx, Place<'tcx>> {
fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self {
ValidationOperand {
lval: self.lval.fold_with(folder),
place: self.place.fold_with(folder),
ty: self.ty.fold_with(folder),
re: self.re,
mutbl: self.mutbl,
@ -1839,7 +1841,7 @@ impl<'tcx> TypeFoldable<'tcx> for ValidationOperand<'tcx, Place<'tcx>> {
}
fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
self.lval.visit_with(visitor) || self.ty.visit_with(visitor)
self.place.visit_with(visitor) || self.ty.visit_with(visitor)
}
}
@ -1848,9 +1850,9 @@ impl<'tcx> TypeFoldable<'tcx> for Statement<'tcx> {
use mir::StatementKind::*;
let kind = match self.kind {
Assign(ref lval, ref rval) => Assign(lval.fold_with(folder), rval.fold_with(folder)),
SetDiscriminant { ref lvalue, variant_index } => SetDiscriminant {
lvalue: lvalue.fold_with(folder),
Assign(ref place, ref rval) => Assign(place.fold_with(folder), rval.fold_with(folder)),
SetDiscriminant { ref place, variant_index } => SetDiscriminant {
place: place.fold_with(folder),
variant_index,
},
StorageLive(ref local) => StorageLive(local.fold_with(folder)),
@ -1867,9 +1869,9 @@ impl<'tcx> TypeFoldable<'tcx> for Statement<'tcx> {
// trait with a `fn fold_scope`.
EndRegion(ref region_scope) => EndRegion(region_scope.clone()),
Validate(ref op, ref lvals) =>
Validate(ref op, ref places) =>
Validate(op.clone(),
lvals.iter().map(|operand| operand.fold_with(folder)).collect()),
places.iter().map(|operand| operand.fold_with(folder)).collect()),
Nop => Nop,
};
@ -1883,8 +1885,8 @@ impl<'tcx> TypeFoldable<'tcx> for Statement<'tcx> {
use mir::StatementKind::*;
match self.kind {
Assign(ref lval, ref rval) => { lval.visit_with(visitor) || rval.visit_with(visitor) }
SetDiscriminant { ref lvalue, .. } => lvalue.visit_with(visitor),
Assign(ref place, ref rval) => { place.visit_with(visitor) || rval.visit_with(visitor) }
SetDiscriminant { ref place, .. } => place.visit_with(visitor),
StorageLive(ref local) |
StorageDead(ref local) => local.visit_with(visitor),
InlineAsm { ref outputs, ref inputs, .. } =>
@ -1896,8 +1898,8 @@ impl<'tcx> TypeFoldable<'tcx> for Statement<'tcx> {
// trait with a `fn visit_scope`.
EndRegion(ref _scope) => false,
Validate(ref _op, ref lvalues) =>
lvalues.iter().any(|ty_and_lvalue| ty_and_lvalue.visit_with(visitor)),
Validate(ref _op, ref places) =>
places.iter().any(|ty_and_place| ty_and_place.visit_with(visitor)),
Nop => false,
}
@ -2035,15 +2037,16 @@ impl<'tcx> TypeFoldable<'tcx> for Rvalue<'tcx> {
match *self {
Use(ref op) => Use(op.fold_with(folder)),
Repeat(ref op, len) => Repeat(op.fold_with(folder), len),
Ref(region, bk, ref lval) => Ref(region.fold_with(folder), bk, lval.fold_with(folder)),
Len(ref lval) => Len(lval.fold_with(folder)),
Ref(region, bk, ref place) =>
Ref(region.fold_with(folder), bk, place.fold_with(folder)),
Len(ref place) => Len(place.fold_with(folder)),
Cast(kind, ref op, ty) => Cast(kind, op.fold_with(folder), ty.fold_with(folder)),
BinaryOp(op, ref rhs, ref lhs) =>
BinaryOp(op, rhs.fold_with(folder), lhs.fold_with(folder)),
CheckedBinaryOp(op, ref rhs, ref lhs) =>
CheckedBinaryOp(op, rhs.fold_with(folder), lhs.fold_with(folder)),
UnaryOp(op, ref val) => UnaryOp(op, val.fold_with(folder)),
Discriminant(ref lval) => Discriminant(lval.fold_with(folder)),
Discriminant(ref place) => Discriminant(place.fold_with(folder)),
NullaryOp(op, ty) => NullaryOp(op, ty.fold_with(folder)),
Aggregate(ref kind, ref fields) => {
let kind = box match **kind {
@ -2068,14 +2071,14 @@ impl<'tcx> TypeFoldable<'tcx> for Rvalue<'tcx> {
match *self {
Use(ref op) => op.visit_with(visitor),
Repeat(ref op, _) => op.visit_with(visitor),
Ref(region, _, ref lval) => region.visit_with(visitor) || lval.visit_with(visitor),
Len(ref lval) => lval.visit_with(visitor),
Ref(region, _, ref place) => region.visit_with(visitor) || place.visit_with(visitor),
Len(ref place) => place.visit_with(visitor),
Cast(_, ref op, ty) => op.visit_with(visitor) || ty.visit_with(visitor),
BinaryOp(_, ref rhs, ref lhs) |
CheckedBinaryOp(_, ref rhs, ref lhs) =>
rhs.visit_with(visitor) || lhs.visit_with(visitor),
UnaryOp(_, ref val) => val.visit_with(visitor),
Discriminant(ref lval) => lval.visit_with(visitor),
Discriminant(ref place) => place.visit_with(visitor),
NullaryOp(_, ty) => ty.visit_with(visitor),
Aggregate(ref kind, ref fields) => {
(match **kind {
@ -2094,16 +2097,16 @@ impl<'tcx> TypeFoldable<'tcx> for Rvalue<'tcx> {
impl<'tcx> TypeFoldable<'tcx> for Operand<'tcx> {
fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self {
match *self {
Operand::Copy(ref lval) => Operand::Copy(lval.fold_with(folder)),
Operand::Move(ref lval) => Operand::Move(lval.fold_with(folder)),
Operand::Copy(ref place) => Operand::Copy(place.fold_with(folder)),
Operand::Move(ref place) => Operand::Move(place.fold_with(folder)),
Operand::Constant(ref c) => Operand::Constant(c.fold_with(folder)),
}
}
fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
match *self {
Operand::Copy(ref lval) |
Operand::Move(ref lval) => lval.visit_with(visitor),
Operand::Copy(ref place) |
Operand::Move(ref place) => place.visit_with(visitor),
Operand::Constant(ref c) => c.visit_with(visitor)
}
}

View file

@ -151,11 +151,11 @@ impl<'tcx> Rvalue<'tcx> {
Rvalue::Repeat(ref operand, count) => {
tcx.mk_array_const_usize(operand.ty(local_decls, tcx), count)
}
Rvalue::Ref(reg, bk, ref lv) => {
let lv_ty = lv.ty(local_decls, tcx).to_ty(tcx);
Rvalue::Ref(reg, bk, ref place) => {
let place_ty = place.ty(local_decls, tcx).to_ty(tcx);
tcx.mk_ref(reg,
ty::TypeAndMut {
ty: lv_ty,
ty: place_ty,
mutbl: bk.to_mutbl_lossy()
}
)
@ -177,8 +177,8 @@ impl<'tcx> Rvalue<'tcx> {
Rvalue::UnaryOp(UnOp::Neg, ref operand) => {
operand.ty(local_decls, tcx)
}
Rvalue::Discriminant(ref lval) => {
let ty = lval.ty(local_decls, tcx).to_ty(tcx);
Rvalue::Discriminant(ref place) => {
let ty = place.ty(local_decls, tcx).to_ty(tcx);
if let ty::TyAdt(adt_def, _) = ty.sty {
adt_def.repr.discr_type().to_ty(tcx)
} else {

View file

@ -107,10 +107,10 @@ macro_rules! make_mir_visitor {
fn visit_assign(&mut self,
block: BasicBlock,
lvalue: & $($mutability)* Place<'tcx>,
place: & $($mutability)* Place<'tcx>,
rvalue: & $($mutability)* Rvalue<'tcx>,
location: Location) {
self.super_assign(block, lvalue, rvalue, location);
self.super_assign(block, place, rvalue, location);
}
fn visit_terminator(&mut self,
@ -145,11 +145,11 @@ macro_rules! make_mir_visitor {
self.super_operand(operand, location);
}
fn visit_lvalue(&mut self,
lvalue: & $($mutability)* Place<'tcx>,
fn visit_place(&mut self,
place: & $($mutability)* Place<'tcx>,
context: PlaceContext<'tcx>,
location: Location) {
self.super_lvalue(lvalue, context, location);
self.super_place(place, context, location);
}
fn visit_static(&mut self,
@ -160,17 +160,17 @@ macro_rules! make_mir_visitor {
}
fn visit_projection(&mut self,
lvalue: & $($mutability)* PlaceProjection<'tcx>,
place: & $($mutability)* PlaceProjection<'tcx>,
context: PlaceContext<'tcx>,
location: Location) {
self.super_projection(lvalue, context, location);
self.super_projection(place, context, location);
}
fn visit_projection_elem(&mut self,
lvalue: & $($mutability)* PlaceElem<'tcx>,
place: & $($mutability)* PlaceElem<'tcx>,
context: PlaceContext<'tcx>,
location: Location) {
self.super_projection_elem(lvalue, context, location);
self.super_projection_elem(place, context, location);
}
fn visit_branch(&mut self,
@ -350,21 +350,21 @@ macro_rules! make_mir_visitor {
self.visit_source_info(source_info);
match *kind {
StatementKind::Assign(ref $($mutability)* lvalue,
StatementKind::Assign(ref $($mutability)* place,
ref $($mutability)* rvalue) => {
self.visit_assign(block, lvalue, rvalue, location);
self.visit_assign(block, place, rvalue, location);
}
StatementKind::EndRegion(_) => {}
StatementKind::Validate(_, ref $($mutability)* lvalues) => {
for operand in lvalues {
self.visit_lvalue(& $($mutability)* operand.lval,
StatementKind::Validate(_, ref $($mutability)* places) => {
for operand in places {
self.visit_place(& $($mutability)* operand.place,
PlaceContext::Validate, location);
self.visit_ty(& $($mutability)* operand.ty,
TyContext::Location(location));
}
}
StatementKind::SetDiscriminant{ ref $($mutability)* lvalue, .. } => {
self.visit_lvalue(lvalue, PlaceContext::Store, location);
StatementKind::SetDiscriminant{ ref $($mutability)* place, .. } => {
self.visit_place(place, PlaceContext::Store, location);
}
StatementKind::StorageLive(ref $($mutability)* local) => {
self.visit_local(local, PlaceContext::StorageLive, location);
@ -376,7 +376,7 @@ macro_rules! make_mir_visitor {
ref $($mutability)* inputs,
asm: _ } => {
for output in & $($mutability)* outputs[..] {
self.visit_lvalue(output, PlaceContext::Store, location);
self.visit_place(output, PlaceContext::Store, location);
}
for input in & $($mutability)* inputs[..] {
self.visit_operand(input, location);
@ -388,10 +388,10 @@ macro_rules! make_mir_visitor {
fn super_assign(&mut self,
_block: BasicBlock,
lvalue: &$($mutability)* Place<'tcx>,
place: &$($mutability)* Place<'tcx>,
rvalue: &$($mutability)* Rvalue<'tcx>,
location: Location) {
self.visit_lvalue(lvalue, PlaceContext::Store, location);
self.visit_place(place, PlaceContext::Store, location);
self.visit_rvalue(rvalue, location);
}
@ -440,7 +440,7 @@ macro_rules! make_mir_visitor {
TerminatorKind::Drop { ref $($mutability)* location,
target,
unwind } => {
self.visit_lvalue(location, PlaceContext::Drop, source_location);
self.visit_place(location, PlaceContext::Drop, source_location);
self.visit_branch(block, target);
unwind.map(|t| self.visit_branch(block, t));
}
@ -449,7 +449,7 @@ macro_rules! make_mir_visitor {
ref $($mutability)* value,
target,
unwind } => {
self.visit_lvalue(location, PlaceContext::Drop, source_location);
self.visit_place(location, PlaceContext::Drop, source_location);
self.visit_operand(value, source_location);
self.visit_branch(block, target);
unwind.map(|t| self.visit_branch(block, t));
@ -464,7 +464,7 @@ macro_rules! make_mir_visitor {
self.visit_operand(arg, source_location);
}
if let Some((ref $($mutability)* destination, target)) = *destination {
self.visit_lvalue(destination, PlaceContext::Call, source_location);
self.visit_place(destination, PlaceContext::Call, source_location);
self.visit_branch(block, target);
}
cleanup.map(|t| self.visit_branch(block, t));
@ -532,14 +532,14 @@ macro_rules! make_mir_visitor {
Rvalue::Ref(ref $($mutability)* r, bk, ref $($mutability)* path) => {
self.visit_region(r, location);
self.visit_lvalue(path, PlaceContext::Borrow {
self.visit_place(path, PlaceContext::Borrow {
region: *r,
kind: bk
}, location);
}
Rvalue::Len(ref $($mutability)* path) => {
self.visit_lvalue(path, PlaceContext::Inspect, location);
self.visit_place(path, PlaceContext::Inspect, location);
}
Rvalue::Cast(_cast_kind,
@ -563,8 +563,8 @@ macro_rules! make_mir_visitor {
self.visit_operand(op, location);
}
Rvalue::Discriminant(ref $($mutability)* lvalue) => {
self.visit_lvalue(lvalue, PlaceContext::Inspect, location);
Rvalue::Discriminant(ref $($mutability)* place) => {
self.visit_place(place, PlaceContext::Inspect, location);
}
Rvalue::NullaryOp(_op, ref $($mutability)* ty) => {
@ -611,11 +611,11 @@ macro_rules! make_mir_visitor {
operand: & $($mutability)* Operand<'tcx>,
location: Location) {
match *operand {
Operand::Copy(ref $($mutability)* lvalue) => {
self.visit_lvalue(lvalue, PlaceContext::Copy, location);
Operand::Copy(ref $($mutability)* place) => {
self.visit_place(place, PlaceContext::Copy, location);
}
Operand::Move(ref $($mutability)* lvalue) => {
self.visit_lvalue(lvalue, PlaceContext::Move, location);
Operand::Move(ref $($mutability)* place) => {
self.visit_place(place, PlaceContext::Move, location);
}
Operand::Constant(ref $($mutability)* constant) => {
self.visit_constant(constant, location);
@ -623,11 +623,11 @@ macro_rules! make_mir_visitor {
}
}
fn super_lvalue(&mut self,
lvalue: & $($mutability)* Place<'tcx>,
fn super_place(&mut self,
place: & $($mutability)* Place<'tcx>,
context: PlaceContext<'tcx>,
location: Location) {
match *lvalue {
match *place {
Place::Local(ref $($mutability)* local) => {
self.visit_local(local, context, location);
}
@ -665,7 +665,7 @@ macro_rules! make_mir_visitor {
} else {
PlaceContext::Projection(Mutability::Not)
};
self.visit_lvalue(base, context, location);
self.visit_place(base, context, location);
self.visit_projection_elem(elem, context, location);
}
@ -847,10 +847,10 @@ pub enum PlaceContext<'tcx> {
// Being borrowed
Borrow { region: Region<'tcx>, kind: BorrowKind },
// Used as base for another lvalue, e.g. `x` in `x.y`.
// Used as base for another place, e.g. `x` in `x.y`.
//
// The `Mutability` argument specifies whether the projection is being performed in order to
// (potentially) mutate the lvalue. For example, the projection `x.y` is marked as a mutation
// (potentially) mutate the place. For example, the projection `x.y` is marked as a mutation
// in these cases:
//
// x.y = ...;
@ -875,7 +875,7 @@ pub enum PlaceContext<'tcx> {
}
impl<'tcx> PlaceContext<'tcx> {
/// Returns true if this lvalue context represents a drop.
/// Returns true if this place context represents a drop.
pub fn is_drop(&self) -> bool {
match *self {
PlaceContext::Drop => true,
@ -883,7 +883,7 @@ impl<'tcx> PlaceContext<'tcx> {
}
}
/// Returns true if this lvalue context represents a storage live or storage dead marker.
/// Returns true if this place context represents a storage live or storage dead marker.
pub fn is_storage_marker(&self) -> bool {
match *self {
PlaceContext::StorageLive | PlaceContext::StorageDead => true,
@ -891,7 +891,7 @@ impl<'tcx> PlaceContext<'tcx> {
}
}
/// Returns true if this lvalue context represents a storage live marker.
/// Returns true if this place context represents a storage live marker.
pub fn is_storage_live_marker(&self) -> bool {
match *self {
PlaceContext::StorageLive => true,
@ -899,7 +899,7 @@ impl<'tcx> PlaceContext<'tcx> {
}
}
/// Returns true if this lvalue context represents a storage dead marker.
/// Returns true if this place context represents a storage dead marker.
pub fn is_storage_dead_marker(&self) -> bool {
match *self {
PlaceContext::StorageDead => true,
@ -907,7 +907,7 @@ impl<'tcx> PlaceContext<'tcx> {
}
}
/// Returns true if this lvalue context represents a use that potentially changes the value.
/// Returns true if this place context represents a use that potentially changes the value.
pub fn is_mutating_use(&self) -> bool {
match *self {
PlaceContext::Store | PlaceContext::Call |
@ -924,7 +924,7 @@ impl<'tcx> PlaceContext<'tcx> {
}
}
/// Returns true if this lvalue context represents a use that does not change the value.
/// Returns true if this place context represents a use that does not change the value.
pub fn is_nonmutating_use(&self) -> bool {
match *self {
PlaceContext::Inspect | PlaceContext::Borrow { kind: BorrowKind::Shared, .. } |

File diff suppressed because it is too large Load diff

View file

@ -61,11 +61,11 @@ impl<'tcx> CFG<'tcx> {
pub fn push_assign(&mut self,
block: BasicBlock,
source_info: SourceInfo,
lvalue: &Place<'tcx>,
place: &Place<'tcx>,
rvalue: Rvalue<'tcx>) {
self.push(block, Statement {
source_info,
kind: StatementKind::Assign(lvalue.clone(), rvalue)
kind: StatementKind::Assign(place.clone(), rvalue)
});
}
@ -81,8 +81,8 @@ impl<'tcx> CFG<'tcx> {
pub fn push_assign_unit(&mut self,
block: BasicBlock,
source_info: SourceInfo,
lvalue: &Place<'tcx>) {
self.push_assign(block, source_info, lvalue, Rvalue::Aggregate(
place: &Place<'tcx>) {
self.push_assign(block, source_info, place, Rvalue::Aggregate(
box AggregateKind::Tuple, vec![]
));
}

View file

@ -32,7 +32,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
}
/// Compile `expr` into a value that can be used as an operand.
/// If `expr` is an lvalue like `x`, this will introduce a
/// If `expr` is a place like `x`, this will introduce a
/// temporary `tmp = x`, so that we capture the value of `x` at
/// this time.
///

View file

@ -18,22 +18,22 @@ use rustc::mir::*;
use rustc_data_structures::indexed_vec::Idx;
impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
/// Compile `expr`, yielding an lvalue that we can move from etc.
pub fn as_lvalue<M>(&mut self,
/// Compile `expr`, yielding a place that we can move from etc.
pub fn as_place<M>(&mut self,
block: BasicBlock,
expr: M)
-> BlockAnd<Place<'tcx>>
where M: Mirror<'tcx, Output=Expr<'tcx>>
{
let expr = self.hir.mirror(expr);
self.expr_as_lvalue(block, expr)
self.expr_as_place(block, expr)
}
fn expr_as_lvalue(&mut self,
fn expr_as_place(&mut self,
mut block: BasicBlock,
expr: Expr<'tcx>)
-> BlockAnd<Place<'tcx>> {
debug!("expr_as_lvalue(block={:?}, expr={:?})", block, expr);
debug!("expr_as_place(block={:?}, expr={:?})", block, expr);
let this = self;
let expr_span = expr.span;
@ -41,24 +41,24 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
match expr.kind {
ExprKind::Scope { region_scope, lint_level, value } => {
this.in_scope((region_scope, source_info), lint_level, block, |this| {
this.as_lvalue(block, value)
this.as_place(block, value)
})
}
ExprKind::Field { lhs, name } => {
let lvalue = unpack!(block = this.as_lvalue(block, lhs));
let lvalue = lvalue.field(name, expr.ty);
block.and(lvalue)
let place = unpack!(block = this.as_place(block, lhs));
let place = place.field(name, expr.ty);
block.and(place)
}
ExprKind::Deref { arg } => {
let lvalue = unpack!(block = this.as_lvalue(block, arg));
let lvalue = lvalue.deref();
block.and(lvalue)
let place = unpack!(block = this.as_place(block, arg));
let place = place.deref();
block.and(place)
}
ExprKind::Index { lhs, index } => {
let (usize_ty, bool_ty) = (this.hir.usize_ty(), this.hir.bool_ty());
let slice = unpack!(block = this.as_lvalue(block, lhs));
// region_scope=None so lvalue indexes live forever. They are scalars so they
let slice = unpack!(block = this.as_place(block, lhs));
// region_scope=None so place indexes live forever. They are scalars so they
// do not need storage annotations, and they are often copied between
// places.
let idx = unpack!(block = this.as_temp(block, None, index));
@ -122,7 +122,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
ExprKind::InlineAsm { .. } |
ExprKind::Yield { .. } |
ExprKind::Call { .. } => {
// these are not lvalues, so we need to make a temporary.
// these are not places, so we need to make a temporary.
debug_assert!(match Category::of(&expr.kind) {
Some(Category::Place) => false,
_ => true,

View file

@ -68,8 +68,8 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
block.and(Rvalue::Repeat(value_operand, count))
}
ExprKind::Borrow { region, borrow_kind, arg } => {
let arg_lvalue = unpack!(block = this.as_lvalue(block, arg));
block.and(Rvalue::Ref(region, borrow_kind, arg_lvalue))
let arg_place = unpack!(block = this.as_place(block, arg));
block.and(Rvalue::Ref(region, borrow_kind, arg_place))
}
ExprKind::Binary { op, lhs, rhs } => {
let lhs = unpack!(block = this.as_operand(block, scope, lhs));
@ -229,7 +229,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
let field_names = this.hir.all_fields(adt_def, variant_index);
let fields = if let Some(FruInfo { base, field_types }) = base {
let base = unpack!(block = this.as_lvalue(block, base));
let base = unpack!(block = this.as_place(block, base));
// MIR does not natively support FRU, so for each
// base-supplied field, generate an operand that

View file

@ -58,16 +58,16 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
}
// Careful here not to cause an infinite cycle. If we always
// called `into`, then for lvalues like `x.f`, it would
// called `into`, then for places like `x.f`, it would
// eventually fallback to us, and we'd loop. There's a reason
// for this: `as_temp` is the point where we bridge the "by
// reference" semantics of `as_lvalue` with the "by value"
// reference" semantics of `as_place` with the "by value"
// semantics of `into`, `as_operand`, `as_rvalue`, and (of
// course) `as_temp`.
match Category::of(&expr.kind).unwrap() {
Category::Place => {
let lvalue = unpack!(block = this.as_lvalue(block, expr));
let rvalue = Rvalue::Use(this.consume_by_copy_or_move(lvalue));
let place = unpack!(block = this.as_place(block, expr));
let rvalue = Rvalue::Use(this.consume_by_copy_or_move(place));
this.cfg.push_assign(block, source_info, &Place::Local(temp), rvalue);
}
_ => {

View file

@ -24,13 +24,13 @@
//! - `as_operand` -- evaluates the value and yields an `Operand`,
//! suitable for use as an argument to an `Rvalue`
//! - `as_temp` -- evaluates into a temporary; this is similar to `as_operand`
//! except it always returns a fresh lvalue, even for constants
//! except it always returns a fresh place, even for constants
//! - `as_rvalue` -- yields an `Rvalue`, suitable for use in an assignment;
//! as of this writing, never needed outside of the `expr` module itself
//!
//! Sometimes though want the expression's *location*. An example
//! would be during a match statement, or the operand of the `&`
//! operator. In that case, you want `as_lvalue`. This will create a
//! operator. In that case, you want `as_place`. This will create a
//! temporary if necessary.
//!
//! Finally, if it's a constant you seek, then call
@ -46,7 +46,7 @@
//! struct expression (or other expression that creates a new value)
//! is typically easiest to write in terms of `as_rvalue` or `into`,
//! whereas a reference to a field is easiest to write in terms of
//! `as_lvalue`. (The exception to this is scope and paren
//! `as_place`. (The exception to this is scope and paren
//! expressions, which have no category.)
//!
//! Therefore, the various functions above make use of one another in
@ -54,12 +54,12 @@
//! the most suitable spot to implement it, and then just let the
//! other fns cycle around. The handoff works like this:
//!
//! - `into(lv)` -> fallback is to create a rvalue with `as_rvalue` and assign it to `lv`
//! - `into(place)` -> fallback is to create a rvalue with `as_rvalue` and assign it to `place`
//! - `as_rvalue` -> fallback is to create an Operand with `as_operand` and use `Rvalue::use`
//! - `as_operand` -> either invokes `as_constant` or `as_temp`
//! - `as_constant` -> (no fallback)
//! - `as_temp` -> creates a temporary and either calls `as_lvalue` or `into`
//! - `as_lvalue` -> for rvalues, falls back to `as_temp` and returns that
//! - `as_temp` -> creates a temporary and either calls `as_place` or `into`
//! - `as_place` -> for rvalues, falls back to `as_temp` and returns that
//!
//! As you can see, there is a cycle where `into` can (in theory) fallback to `as_temp`
//! which can fallback to `into`. So if one of the `ExprKind` variants is not, in fact,
@ -68,10 +68,10 @@
//! Of those fallbacks, the most interesting one is `as_temp`, because
//! it discriminates based on the category of the expression. This is
//! basically the point where the "by value" operations are bridged
//! over to the "by reference" mode (`as_lvalue`).
//! over to the "by reference" mode (`as_place`).
mod as_constant;
mod as_lvalue;
mod as_place;
mod as_rvalue;
mod as_operand;
mod as_temp;

View file

@ -41,14 +41,14 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
// dropped.
if this.hir.needs_drop(lhs.ty) {
let rhs = unpack!(block = this.as_local_operand(block, rhs));
let lhs = unpack!(block = this.as_lvalue(block, lhs));
let lhs = unpack!(block = this.as_place(block, lhs));
unpack!(block = this.build_drop_and_replace(
block, lhs_span, lhs, rhs
));
block.unit()
} else {
let rhs = unpack!(block = this.as_local_rvalue(block, rhs));
let lhs = unpack!(block = this.as_lvalue(block, lhs));
let lhs = unpack!(block = this.as_place(block, lhs));
this.cfg.push_assign(block, source_info, &lhs, rhs);
block.unit()
}
@ -67,7 +67,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
// As above, RTL.
let rhs = unpack!(block = this.as_local_operand(block, rhs));
let lhs = unpack!(block = this.as_lvalue(block, lhs));
let lhs = unpack!(block = this.as_place(block, lhs));
// we don't have to drop prior contents or anything
// because AssignOp is only legal for Copy types
@ -107,12 +107,12 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
ExprKind::Return { value } => {
block = match value {
Some(value) => {
unpack!(this.into(&Place::Local(RETURN_POINTER), block, value))
unpack!(this.into(&Place::Local(RETURN_PLACE), block, value))
}
None => {
this.cfg.push_assign_unit(block,
source_info,
&Place::Local(RETURN_POINTER));
&Place::Local(RETURN_PLACE));
block
}
};
@ -123,7 +123,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
}
ExprKind::InlineAsm { asm, outputs, inputs } => {
let outputs = outputs.into_iter().map(|output| {
unpack!(block = this.as_lvalue(block, output))
unpack!(block = this.as_place(block, output))
}).collect();
let inputs = inputs.into_iter().map(|input| {
unpack!(block = this.as_local_operand(block, input))

View file

@ -36,7 +36,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
discriminant: ExprRef<'tcx>,
arms: Vec<Arm<'tcx>>)
-> BlockAnd<()> {
let discriminant_lvalue = unpack!(block = self.as_lvalue(block, discriminant));
let discriminant_place = unpack!(block = self.as_place(block, discriminant));
let mut arm_blocks = ArmBlocks {
blocks: arms.iter()
@ -77,7 +77,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
(pre_binding_block, next_candidate_pre_binding_block))| {
Candidate {
span: pattern.span,
match_pairs: vec![MatchPair::new(discriminant_lvalue.clone(), pattern)],
match_pairs: vec![MatchPair::new(discriminant_place.clone(), pattern)],
bindings: vec![],
guard,
arm_index,
@ -91,7 +91,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
self.cfg.terminate(*pre_binding_blocks.last().unwrap(),
outer_source_info, TerminatorKind::Unreachable);
// this will generate code to test discriminant_lvalue and
// this will generate code to test discriminant_place and
// branch to the appropriate arm block
let otherwise = self.match_candidates(span, &mut arm_blocks, candidates, block);
@ -139,19 +139,19 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
PatternKind::Binding { mode: BindingMode::ByValue,
var,
subpattern: None, .. } => {
let lvalue = self.storage_live_binding(block, var, irrefutable_pat.span);
unpack!(block = self.into(&lvalue, block, initializer));
let place = self.storage_live_binding(block, var, irrefutable_pat.span);
unpack!(block = self.into(&place, block, initializer));
self.schedule_drop_for_binding(var, irrefutable_pat.span);
block.unit()
}
_ => {
let lvalue = unpack!(block = self.as_lvalue(block, initializer));
self.lvalue_into_pattern(block, irrefutable_pat, &lvalue)
let place = unpack!(block = self.as_place(block, initializer));
self.place_into_pattern(block, irrefutable_pat, &place)
}
}
}
pub fn lvalue_into_pattern(&mut self,
pub fn place_into_pattern(&mut self,
mut block: BasicBlock,
irrefutable_pat: Pattern<'tcx>,
initializer: &Place<'tcx>)
@ -315,8 +315,8 @@ struct Binding<'tcx> {
#[derive(Clone, Debug)]
pub struct MatchPair<'pat, 'tcx:'pat> {
// this lvalue...
lvalue: Place<'tcx>,
// this place...
place: Place<'tcx>,
// ... must match this pattern.
pattern: &'pat Pattern<'tcx>,
@ -635,7 +635,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
match test.kind {
TestKind::SwitchInt { switch_ty, ref mut options, ref mut indices } => {
for candidate in candidates.iter() {
if !self.add_cases_to_switch(&match_pair.lvalue,
if !self.add_cases_to_switch(&match_pair.place,
candidate,
switch_ty,
options,
@ -646,7 +646,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
}
TestKind::Switch { adt_def: _, ref mut variants} => {
for candidate in candidates.iter() {
if !self.add_variants_to_switch(&match_pair.lvalue,
if !self.add_variants_to_switch(&match_pair.place,
candidate,
variants) {
break;
@ -661,7 +661,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
// vector of candidates. Those are the candidates that still
// apply if the test has that particular outcome.
debug!("match_candidates: test={:?} match_pair={:?}", test, match_pair);
let target_blocks = self.perform_test(block, &match_pair.lvalue, &test);
let target_blocks = self.perform_test(block, &match_pair.place, &test);
let mut target_candidates: Vec<_> = (0..target_blocks.len()).map(|_| vec![]).collect();
// Sort the candidates into the appropriate vector in
@ -670,7 +670,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
// that point, we stop sorting.
let tested_candidates =
candidates.iter()
.take_while(|c| self.sort_candidate(&match_pair.lvalue,
.take_while(|c| self.sort_candidate(&match_pair.place,
&test,
c,
&mut target_candidates))

View file

@ -10,11 +10,11 @@
//! Simplifying Candidates
//!
//! *Simplifying* a match pair `lvalue @ pattern` means breaking it down
//! *Simplifying* a match pair `place @ pattern` means breaking it down
//! into bindings or other, simpler match pairs. For example:
//!
//! - `lvalue @ (P1, P2)` can be simplified to `[lvalue.0 @ P1, lvalue.1 @ P2]`
//! - `lvalue @ x` can be simplified to `[]` by binding `x` to `lvalue`
//! - `place @ (P1, P2)` can be simplified to `[place.0 @ P1, place.1 @ P2]`
//! - `place @ x` can be simplified to `[]` by binding `x` to `place`
//!
//! The `simplify_candidate` routine just repeatedly applies these
//! sort of simplifications until there is nothing left to
@ -73,7 +73,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
name,
mutability,
span: match_pair.pattern.span,
source: match_pair.lvalue.clone(),
source: match_pair.place.clone(),
var_id: var,
var_ty: ty,
binding_mode: mode,
@ -81,7 +81,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
if let Some(subpattern) = subpattern.as_ref() {
// this is the `x @ P` case; have to keep matching against `P` now
candidate.match_pairs.push(MatchPair::new(match_pair.lvalue, subpattern));
candidate.match_pairs.push(MatchPair::new(match_pair.place, subpattern));
}
Ok(())
@ -105,8 +105,8 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
}
});
if irrefutable {
let lvalue = match_pair.lvalue.downcast(adt_def, variant_index);
candidate.match_pairs.extend(self.field_match_pairs(lvalue, subpatterns));
let place = match_pair.place.downcast(adt_def, variant_index);
candidate.match_pairs.extend(self.field_match_pairs(place, subpatterns));
Ok(())
} else {
Err(match_pair)
@ -115,7 +115,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
PatternKind::Array { ref prefix, ref slice, ref suffix } => {
self.prefix_slice_suffix(&mut candidate.match_pairs,
&match_pair.lvalue,
&match_pair.place,
prefix,
slice.as_ref(),
suffix);
@ -125,13 +125,13 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
PatternKind::Leaf { ref subpatterns } => {
// tuple struct, match subpats (if any)
candidate.match_pairs
.extend(self.field_match_pairs(match_pair.lvalue, subpatterns));
.extend(self.field_match_pairs(match_pair.place, subpatterns));
Ok(())
}
PatternKind::Deref { ref subpattern } => {
let lvalue = match_pair.lvalue.deref();
candidate.match_pairs.push(MatchPair::new(lvalue, subpattern));
let place = match_pair.place.deref();
candidate.match_pairs.push(MatchPair::new(place, subpattern));
Ok(())
}
}

View file

@ -109,21 +109,21 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
}
pub fn add_cases_to_switch<'pat>(&mut self,
test_lvalue: &Place<'tcx>,
test_place: &Place<'tcx>,
candidate: &Candidate<'pat, 'tcx>,
switch_ty: Ty<'tcx>,
options: &mut Vec<&'tcx ty::Const<'tcx>>,
indices: &mut FxHashMap<&'tcx ty::Const<'tcx>, usize>)
-> bool
{
let match_pair = match candidate.match_pairs.iter().find(|mp| mp.lvalue == *test_lvalue) {
let match_pair = match candidate.match_pairs.iter().find(|mp| mp.place == *test_place) {
Some(match_pair) => match_pair,
_ => { return false; }
};
match *match_pair.pattern.kind {
PatternKind::Constant { value } => {
// if the lvalues match, the type should match
// if the places match, the type should match
assert_eq!(match_pair.pattern.ty, switch_ty);
indices.entry(value)
@ -150,12 +150,12 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
}
pub fn add_variants_to_switch<'pat>(&mut self,
test_lvalue: &Place<'tcx>,
test_place: &Place<'tcx>,
candidate: &Candidate<'pat, 'tcx>,
variants: &mut BitVector)
-> bool
{
let match_pair = match candidate.match_pairs.iter().find(|mp| mp.lvalue == *test_lvalue) {
let match_pair = match candidate.match_pairs.iter().find(|mp| mp.place == *test_place) {
Some(match_pair) => match_pair,
_ => { return false; }
};
@ -177,7 +177,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
/// Generates the code to perform a test.
pub fn perform_test(&mut self,
block: BasicBlock,
lvalue: &Place<'tcx>,
place: &Place<'tcx>,
test: &Test<'tcx>)
-> Vec<BasicBlock> {
let source_info = self.source_info(test.span);
@ -212,7 +212,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
let discr_ty = adt_def.repr.discr_type().to_ty(tcx);
let discr = self.temp(discr_ty, test.span);
self.cfg.push_assign(block, source_info, &discr,
Rvalue::Discriminant(lvalue.clone()));
Rvalue::Discriminant(place.clone()));
assert_eq!(values.len() + 1, targets.len());
self.cfg.terminate(block, source_info, TerminatorKind::SwitchInt {
discr: Operand::Move(discr),
@ -233,7 +233,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
ConstVal::Bool(false) => vec![false_bb, true_bb],
v => span_bug!(test.span, "expected boolean value but got {:?}", v)
};
(ret, TerminatorKind::if_(self.hir.tcx(), Operand::Copy(lvalue.clone()),
(ret, TerminatorKind::if_(self.hir.tcx(), Operand::Copy(place.clone()),
true_bb, false_bb))
} else {
// The switch may be inexhaustive so we
@ -248,7 +248,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
v.val.to_const_int().expect("switching on integral")
).collect();
(targets.clone(), TerminatorKind::SwitchInt {
discr: Operand::Copy(lvalue.clone()),
discr: Operand::Copy(place.clone()),
switch_ty,
values: From::from(values),
targets,
@ -259,14 +259,14 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
}
TestKind::Eq { value, mut ty } => {
let mut val = Operand::Copy(lvalue.clone());
let mut val = Operand::Copy(place.clone());
// If we're using b"..." as a pattern, we need to insert an
// unsizing coercion, as the byte string has the type &[u8; N].
let expect = if let ConstVal::ByteStr(bytes) = value.val {
let tcx = self.hir.tcx();
// Unsize the lvalue to &[u8], too, if necessary.
// Unsize the place to &[u8], too, if necessary.
if let ty::TyRef(region, mt) = ty.sty {
if let ty::TyArray(_, _) = mt.ty.sty {
ty = tcx.mk_imm_ref(region, tcx.mk_slice(tcx.types.u8));
@ -335,7 +335,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
// Test `val` by computing `lo <= val && val <= hi`, using primitive comparisons.
let lo = self.literal_operand(test.span, ty.clone(), lo.clone());
let hi = self.literal_operand(test.span, ty.clone(), hi.clone());
let val = Operand::Copy(lvalue.clone());
let val = Operand::Copy(place.clone());
let fail = self.cfg.start_new_block();
let block = self.compare(block, fail, test.span, BinOp::Le, lo, val.clone());
@ -352,9 +352,9 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
let (actual, result) = (self.temp(usize_ty, test.span),
self.temp(bool_ty, test.span));
// actual = len(lvalue)
// actual = len(place)
self.cfg.push_assign(block, source_info,
&actual, Rvalue::Len(lvalue.clone()));
&actual, Rvalue::Len(place.clone()));
// expected = <N>
let expected = self.push_usize(block, source_info, len);
@ -399,7 +399,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
target_block
}
/// Given that we are performing `test` against `test_lvalue`,
/// Given that we are performing `test` against `test_place`,
/// this job sorts out what the status of `candidate` will be
/// after the test. The `resulting_candidates` vector stores, for
/// each possible outcome of `test`, a vector of the candidates
@ -430,12 +430,12 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
/// not apply to this candidate, but it might be we can get
/// tighter match code if we do something a bit different.
pub fn sort_candidate<'pat>(&mut self,
test_lvalue: &Place<'tcx>,
test_place: &Place<'tcx>,
test: &Test<'tcx>,
candidate: &Candidate<'pat, 'tcx>,
resulting_candidates: &mut [Vec<Candidate<'pat, 'tcx>>])
-> bool {
// Find the match_pair for this lvalue (if any). At present,
// Find the match_pair for this place (if any). At present,
// afaik, there can be at most one. (In the future, if we
// adopted a more general `@` operator, there might be more
// than one, but it'd be very unusual to have two sides that
@ -443,12 +443,12 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
// away.)
let tested_match_pair = candidate.match_pairs.iter()
.enumerate()
.filter(|&(_, mp)| mp.lvalue == *test_lvalue)
.filter(|&(_, mp)| mp.place == *test_place)
.next();
let (match_pair_index, match_pair) = match tested_match_pair {
Some(pair) => pair,
None => {
// We are not testing this lvalue. Therefore, this
// We are not testing this place. Therefore, this
// candidate applies to ALL outcomes.
return false;
}
@ -614,7 +614,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
self.candidate_without_match_pair(match_pair_index, candidate);
self.prefix_slice_suffix(
&mut new_candidate.match_pairs,
&candidate.match_pairs[match_pair_index].lvalue,
&candidate.match_pairs[match_pair_index].place,
prefix,
opt_slice,
suffix);
@ -635,15 +635,15 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
// we want to create a set of derived match-patterns like
// `(x as Variant).0 @ P1` and `(x as Variant).1 @ P1`.
let elem = ProjectionElem::Downcast(adt_def, variant_index);
let downcast_lvalue = match_pair.lvalue.clone().elem(elem); // `(x as Variant)`
let downcast_place = match_pair.place.clone().elem(elem); // `(x as Variant)`
let consequent_match_pairs =
subpatterns.iter()
.map(|subpattern| {
// e.g., `(x as Variant).0`
let lvalue = downcast_lvalue.clone().field(subpattern.field,
let place = downcast_place.clone().field(subpattern.field,
subpattern.pattern.ty);
// e.g., `(x as Variant).0 @ P1`
MatchPair::new(lvalue, &subpattern.pattern)
MatchPair::new(place, &subpattern.pattern)
});
// In addition, we need all the other match pairs from the old candidate.

View file

@ -16,21 +16,21 @@ use std::u32;
impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
pub fn field_match_pairs<'pat>(&mut self,
lvalue: Place<'tcx>,
place: Place<'tcx>,
subpatterns: &'pat [FieldPattern<'tcx>])
-> Vec<MatchPair<'pat, 'tcx>> {
subpatterns.iter()
.map(|fieldpat| {
let lvalue = lvalue.clone().field(fieldpat.field,
let place = place.clone().field(fieldpat.field,
fieldpat.pattern.ty);
MatchPair::new(lvalue, &fieldpat.pattern)
MatchPair::new(place, &fieldpat.pattern)
})
.collect()
}
pub fn prefix_slice_suffix<'pat>(&mut self,
match_pairs: &mut Vec<MatchPair<'pat, 'tcx>>,
lvalue: &Place<'tcx>,
place: &Place<'tcx>,
prefix: &'pat [Pattern<'tcx>],
opt_slice: Option<&'pat Pattern<'tcx>>,
suffix: &'pat [Pattern<'tcx>]) {
@ -47,13 +47,13 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
min_length,
from_end: false,
};
let lvalue = lvalue.clone().elem(elem);
MatchPair::new(lvalue, subpattern)
let place = place.clone().elem(elem);
MatchPair::new(place, subpattern)
})
);
if let Some(subslice_pat) = opt_slice {
let subslice = lvalue.clone().elem(ProjectionElem::Subslice {
let subslice = place.clone().elem(ProjectionElem::Subslice {
from: prefix.len() as u32,
to: suffix.len() as u32
});
@ -70,17 +70,17 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
min_length,
from_end: true,
};
let lvalue = lvalue.clone().elem(elem);
MatchPair::new(lvalue, subpattern)
let place = place.clone().elem(elem);
MatchPair::new(place, subpattern)
})
);
}
}
impl<'pat, 'tcx> MatchPair<'pat, 'tcx> {
pub fn new(lvalue: Place<'tcx>, pattern: &'pat Pattern<'tcx>) -> MatchPair<'pat, 'tcx> {
pub fn new(place: Place<'tcx>, pattern: &'pat Pattern<'tcx>) -> MatchPair<'pat, 'tcx> {
MatchPair {
lvalue,
place,
pattern,
slice_len_checked: false,
}

View file

@ -29,10 +29,10 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
/// call `schedule_drop` once the temporary is initialized.
pub fn temp(&mut self, ty: Ty<'tcx>, span: Span) -> Place<'tcx> {
let temp = self.local_decls.push(LocalDecl::new_temp(ty, span));
let lvalue = Place::Local(temp);
let place = Place::Local(temp);
debug!("temp: created temp {:?} with type {:?}",
lvalue, self.local_decls[temp].ty);
lvalue
place, self.local_decls[temp].ty);
place
}
pub fn literal_operand(&mut self,
@ -134,13 +134,13 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
temp
}
pub fn consume_by_copy_or_move(&self, lvalue: Place<'tcx>) -> Operand<'tcx> {
pub fn consume_by_copy_or_move(&self, place: Place<'tcx>) -> Operand<'tcx> {
let tcx = self.hir.tcx();
let ty = lvalue.ty(&self.local_decls, tcx).to_ty(tcx);
let ty = place.ty(&self.local_decls, tcx).to_ty(tcx);
if self.hir.type_moves_by_default(ty, DUMMY_SP) {
Operand::Move(lvalue)
Operand::Move(place)
} else {
Operand::Copy(lvalue)
Operand::Copy(place)
}
}
}

View file

@ -480,7 +480,7 @@ fn construct_const<'a, 'gcx, 'tcx>(hir: Cx<'a, 'gcx, 'tcx>,
let mut block = START_BLOCK;
let expr = builder.hir.mirror(ast_expr);
unpack!(block = builder.into_expr(&Place::Local(RETURN_POINTER), block, expr));
unpack!(block = builder.into_expr(&Place::Local(RETURN_PLACE), block, expr));
let source_info = builder.source_info(span);
builder.cfg.terminate(block, source_info, TerminatorKind::Return);
@ -523,7 +523,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
push_unsafe_count: 0,
unpushed_unsafe: safety,
breakable_scopes: vec![],
local_decls: IndexVec::from_elem_n(LocalDecl::new_return_pointer(return_ty,
local_decls: IndexVec::from_elem_n(LocalDecl::new_return_place(return_ty,
span), 1),
var_indices: NodeMap(),
unit_temp: None,
@ -597,9 +597,9 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
let mut scope = None;
// Bind the argument patterns
for (index, &(ty, pattern)) in arguments.iter().enumerate() {
// Function arguments always get the first Local indices after the return pointer
// Function arguments always get the first Local indices after the return place
let local = Local::new(index + 1);
let lvalue = Place::Local(local);
let place = Place::Local(local);
if let Some(pattern) = pattern {
let pattern = self.hir.pattern_from_hir(pattern);
@ -613,14 +613,14 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
_ => {
scope = self.declare_bindings(scope, ast_body.span,
LintLevel::Inherited, &pattern);
unpack!(block = self.lvalue_into_pattern(block, pattern, &lvalue));
unpack!(block = self.place_into_pattern(block, pattern, &place));
}
}
}
// Make sure we drop (parts of) the argument even when not matched on.
self.schedule_drop(pattern.as_ref().map_or(ast_body.span, |pat| pat.span),
argument_scope, &lvalue, ty);
argument_scope, &place, ty);
}
@ -630,7 +630,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
}
let body = self.hir.mirror(ast_body);
self.into(&Place::Local(RETURN_POINTER), block, body)
self.into(&Place::Local(RETURN_PLACE), block, body)
}
fn get_unit_temp(&mut self) -> Place<'tcx> {

View file

@ -39,10 +39,10 @@ mapping is from one scope to a vector of SEME regions.
### Drops
The primary purpose for scopes is to insert drops: while translating
the contents, we also accumulate lvalues that need to be dropped upon
the contents, we also accumulate places that need to be dropped upon
exit from each scope. This is done by calling `schedule_drop`. Once a
drop is scheduled, whenever we branch out we will insert drops of all
those lvalues onto the outgoing edge. Note that we don't know the full
those places onto the outgoing edge. Note that we don't know the full
set of scheduled drops up front, and so whenever we exit from the
scope we only drop the values scheduled thus far. For example, consider
the scope S corresponding to this loop:
@ -120,7 +120,7 @@ pub struct Scope<'tcx> {
/// * freeing up stack space has no effect during unwinding
needs_cleanup: bool,
/// set of lvalues to drop when exiting this scope. This starts
/// set of places to drop when exiting this scope. This starts
/// out empty but grows as variables are declared during the
/// building process. This is a stack, so we always drop from the
/// end of the vector (top of the stack) first.
@ -138,10 +138,10 @@ pub struct Scope<'tcx> {
#[derive(Debug)]
struct DropData<'tcx> {
/// span where drop obligation was incurred (typically where lvalue was declared)
/// span where drop obligation was incurred (typically where place was declared)
span: Span,
/// lvalue to drop
/// place to drop
location: Place<'tcx>,
/// Whether this is a full value Drop, or just a StorageDead.
@ -608,19 +608,19 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
// Scheduling drops
// ================
/// Indicates that `lvalue` should be dropped on exit from
/// Indicates that `place` should be dropped on exit from
/// `region_scope`.
pub fn schedule_drop(&mut self,
span: Span,
region_scope: region::Scope,
lvalue: &Place<'tcx>,
lvalue_ty: Ty<'tcx>) {
let needs_drop = self.hir.needs_drop(lvalue_ty);
place: &Place<'tcx>,
place_ty: Ty<'tcx>) {
let needs_drop = self.hir.needs_drop(place_ty);
let drop_kind = if needs_drop {
DropKind::Value { cached_block: CachedBlock::default() }
} else {
// Only temps and vars need their storage dead.
match *lvalue {
match *place {
Place::Local(index) if index.index() > self.arg_count => DropKind::Storage,
_ => return
}
@ -685,13 +685,13 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
let scope_end = region_scope_span.with_lo(region_scope_span.hi());
scope.drops.push(DropData {
span: scope_end,
location: lvalue.clone(),
location: place.clone(),
kind: drop_kind
});
return;
}
}
span_bug!(span, "region scope {:?} not in scope to drop {:?}", region_scope, lvalue);
span_bug!(span, "region scope {:?} not in scope to drop {:?}", region_scope, place);
}
// Other

View file

@ -24,7 +24,7 @@ pub fn move_path_children_matching<'tcx, F>(move_data: &MoveData<'tcx>,
{
let mut next_child = move_data.move_paths[path].first_child;
while let Some(child_index) = next_child {
match move_data.move_paths[child_index].lvalue {
match move_data.move_paths[child_index].place {
mir::Place::Projection(ref proj) => {
if cond(proj) {
return Some(child_index)
@ -56,19 +56,19 @@ pub fn move_path_children_matching<'tcx, F>(move_data: &MoveData<'tcx>,
/// is no need to maintain separate drop flags to track such state.
///
/// FIXME: we have to do something for moving slice patterns.
fn lvalue_contents_drop_state_cannot_differ<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>,
fn place_contents_drop_state_cannot_differ<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>,
mir: &Mir<'tcx>,
lv: &mir::Place<'tcx>) -> bool {
let ty = lv.ty(mir, tcx).to_ty(tcx);
place: &mir::Place<'tcx>) -> bool {
let ty = place.ty(mir, tcx).to_ty(tcx);
match ty.sty {
ty::TyArray(..) | ty::TySlice(..) | ty::TyRef(..) | ty::TyRawPtr(..) => {
debug!("lvalue_contents_drop_state_cannot_differ lv: {:?} ty: {:?} refd => true",
lv, ty);
debug!("place_contents_drop_state_cannot_differ place: {:?} ty: {:?} refd => true",
place, ty);
true
}
ty::TyAdt(def, _) if (def.has_dtor(tcx) && !def.is_box()) || def.is_union() => {
debug!("lvalue_contents_drop_state_cannot_differ lv: {:?} ty: {:?} Drop => true",
lv, ty);
debug!("place_contents_drop_state_cannot_differ place: {:?} ty: {:?} Drop => true",
place, ty);
true
}
_ => {
@ -109,8 +109,8 @@ pub(crate) fn on_all_children_bits<'a, 'gcx, 'tcx, F>(
move_data: &MoveData<'tcx>,
path: MovePathIndex) -> bool
{
lvalue_contents_drop_state_cannot_differ(
tcx, mir, &move_data.move_paths[path].lvalue)
place_contents_drop_state_cannot_differ(
tcx, mir, &move_data.move_paths[path].place)
}
fn on_all_children_bits<'a, 'gcx, 'tcx, F>(
@ -145,9 +145,9 @@ pub(crate) fn on_all_drop_children_bits<'a, 'gcx, 'tcx, F>(
where F: FnMut(MovePathIndex)
{
on_all_children_bits(tcx, mir, &ctxt.move_data, path, |child| {
let lvalue = &ctxt.move_data.move_paths[path].lvalue;
let ty = lvalue.ty(mir, tcx).to_ty(tcx);
debug!("on_all_drop_children_bits({:?}, {:?} : {:?})", path, lvalue, ty);
let place = &ctxt.move_data.move_paths[path].place;
let ty = place.ty(mir, tcx).to_ty(tcx);
debug!("on_all_drop_children_bits({:?}, {:?} : {:?})", path, place, ty);
let gcx = tcx.global_tcx();
let erased_ty = gcx.lift(&tcx.erase_regions(&ty)).unwrap();
@ -168,8 +168,8 @@ pub(crate) fn drop_flag_effects_for_function_entry<'a, 'gcx, 'tcx, F>(
{
let move_data = &ctxt.move_data;
for arg in mir.args_iter() {
let lvalue = mir::Place::Local(arg);
let lookup_result = move_data.rev_lookup.find(&lvalue);
let place = mir::Place::Local(arg);
let lookup_result = move_data.rev_lookup.find(&place);
on_lookup_result_bits(tcx, mir, move_data,
lookup_result,
|mpi| callback(mpi, DropFlagState::Present));

View file

@ -42,7 +42,7 @@ pub struct Borrows<'a, 'gcx: 'tcx, 'tcx: 'a> {
}
// temporarily allow some dead fields: `kind` and `region` will be
// needed by borrowck; `lvalue` will probably be a MovePathIndex when
// needed by borrowck; `place` will probably be a MovePathIndex when
// that is extended to include borrowed data paths.
#[allow(dead_code)]
#[derive(Debug)]
@ -50,7 +50,7 @@ pub struct BorrowData<'tcx> {
pub(crate) location: Location,
pub(crate) kind: mir::BorrowKind,
pub(crate) region: Region<'tcx>,
pub(crate) lvalue: mir::Place<'tcx>,
pub(crate) place: mir::Place<'tcx>,
}
impl<'tcx> fmt::Display for BorrowData<'tcx> {
@ -62,7 +62,7 @@ impl<'tcx> fmt::Display for BorrowData<'tcx> {
};
let region = format!("{}", self.region);
let region = if region.len() > 0 { format!("{} ", region) } else { region };
write!(w, "&{}{}{:?}", region, kind, self.lvalue)
write!(w, "&{}{}{:?}", region, kind, self.place)
}
}
@ -101,11 +101,11 @@ impl<'a, 'gcx, 'tcx> Borrows<'a, 'gcx, 'tcx> {
fn visit_rvalue(&mut self,
rvalue: &mir::Rvalue<'tcx>,
location: mir::Location) {
if let mir::Rvalue::Ref(region, kind, ref lvalue) = *rvalue {
if is_unsafe_lvalue(self.tcx, self.mir, lvalue) { return; }
if let mir::Rvalue::Ref(region, kind, ref place) = *rvalue {
if is_unsafe_place(self.tcx, self.mir, place) { return; }
let borrow = BorrowData {
location: location, kind: kind, region: region, lvalue: lvalue.clone(),
location: location, kind: kind, region: region, place: place.clone(),
};
let idx = self.idx_vec.push(borrow);
self.location_map.insert(location, idx);
@ -206,8 +206,8 @@ impl<'a, 'gcx, 'tcx> BitDenotation for Borrows<'a, 'gcx, 'tcx> {
}
mir::StatementKind::Assign(_, ref rhs) => {
if let mir::Rvalue::Ref(region, _, ref lvalue) = *rhs {
if is_unsafe_lvalue(self.tcx, self.mir, lvalue) { return; }
if let mir::Rvalue::Ref(region, _, ref place) = *rhs {
if is_unsafe_place(self.tcx, self.mir, place) { return; }
let index = self.location_map.get(&location).unwrap_or_else(|| {
panic!("could not find BorrowIndex for location {:?}", location);
});
@ -269,7 +269,7 @@ impl<'a, 'gcx, 'tcx> BitDenotation for Borrows<'a, 'gcx, 'tcx> {
_in_out: &mut IdxSet<BorrowIndex>,
_call_bb: mir::BasicBlock,
_dest_bb: mir::BasicBlock,
_dest_lval: &mir::Place) {
_dest_place: &mir::Place) {
// there are no effects on the region scopes from method calls.
}
}
@ -288,15 +288,15 @@ impl<'a, 'gcx, 'tcx> DataflowOperator for Borrows<'a, 'gcx, 'tcx> {
}
}
fn is_unsafe_lvalue<'a, 'gcx: 'tcx, 'tcx: 'a>(
fn is_unsafe_place<'a, 'gcx: 'tcx, 'tcx: 'a>(
tcx: TyCtxt<'a, 'gcx, 'tcx>,
mir: &'a Mir<'tcx>,
lvalue: &mir::Place<'tcx>
place: &mir::Place<'tcx>
) -> bool {
use self::mir::Place::*;
use self::mir::ProjectionElem;
match *lvalue {
match *place {
Local(_) => false,
Static(ref static_) => tcx.is_static_mut(static_.def_id),
Projection(ref proj) => {
@ -306,13 +306,13 @@ fn is_unsafe_lvalue<'a, 'gcx: 'tcx, 'tcx: 'a>(
ProjectionElem::Subslice { .. } |
ProjectionElem::ConstantIndex { .. } |
ProjectionElem::Index(_) => {
is_unsafe_lvalue(tcx, mir, &proj.base)
is_unsafe_place(tcx, mir, &proj.base)
}
ProjectionElem::Deref => {
let ty = proj.base.ty(mir, tcx).to_ty(tcx);
match ty.sty {
ty::TyRawPtr(..) => true,
_ => is_unsafe_lvalue(tcx, mir, &proj.base),
_ => is_unsafe_place(tcx, mir, &proj.base),
}
}
}

View file

@ -368,11 +368,11 @@ impl<'a, 'gcx, 'tcx> BitDenotation for MaybeInitializedLvals<'a, 'gcx, 'tcx> {
in_out: &mut IdxSet<MovePathIndex>,
_call_bb: mir::BasicBlock,
_dest_bb: mir::BasicBlock,
dest_lval: &mir::Place) {
dest_place: &mir::Place) {
// when a call returns successfully, that means we need to set
// the bits for that dest_lval to 1 (initialized).
// the bits for that dest_place to 1 (initialized).
on_lookup_result_bits(self.tcx, self.mir, self.move_data(),
self.move_data().rev_lookup.find(dest_lval),
self.move_data().rev_lookup.find(dest_place),
|mpi| { in_out.add(&mpi); });
}
}
@ -384,7 +384,7 @@ impl<'a, 'gcx, 'tcx> BitDenotation for MaybeUninitializedLvals<'a, 'gcx, 'tcx> {
self.move_data().move_paths.len()
}
// sets on_entry bits for Arg lvalues
// sets on_entry bits for Arg places
fn start_block_effect(&self, sets: &mut BlockSets<MovePathIndex>) {
// set all bits to 1 (uninit) before gathering counterevidence
for e in sets.on_entry.words_mut() { *e = !0; }
@ -423,11 +423,11 @@ impl<'a, 'gcx, 'tcx> BitDenotation for MaybeUninitializedLvals<'a, 'gcx, 'tcx> {
in_out: &mut IdxSet<MovePathIndex>,
_call_bb: mir::BasicBlock,
_dest_bb: mir::BasicBlock,
dest_lval: &mir::Place) {
dest_place: &mir::Place) {
// when a call returns successfully, that means we need to set
// the bits for that dest_lval to 0 (initialized).
// the bits for that dest_place to 0 (initialized).
on_lookup_result_bits(self.tcx, self.mir, self.move_data(),
self.move_data().rev_lookup.find(dest_lval),
self.move_data().rev_lookup.find(dest_place),
|mpi| { in_out.remove(&mpi); });
}
}
@ -439,7 +439,7 @@ impl<'a, 'gcx, 'tcx> BitDenotation for DefinitelyInitializedLvals<'a, 'gcx, 'tcx
self.move_data().move_paths.len()
}
// sets on_entry bits for Arg lvalues
// sets on_entry bits for Arg places
fn start_block_effect(&self, sets: &mut BlockSets<MovePathIndex>) {
for e in sets.on_entry.words_mut() { *e = 0; }
@ -477,11 +477,11 @@ impl<'a, 'gcx, 'tcx> BitDenotation for DefinitelyInitializedLvals<'a, 'gcx, 'tcx
in_out: &mut IdxSet<MovePathIndex>,
_call_bb: mir::BasicBlock,
_dest_bb: mir::BasicBlock,
dest_lval: &mir::Place) {
dest_place: &mir::Place) {
// when a call returns successfully, that means we need to set
// the bits for that dest_lval to 1 (initialized).
// the bits for that dest_place to 1 (initialized).
on_lookup_result_bits(self.tcx, self.mir, self.move_data(),
self.move_data().rev_lookup.find(dest_lval),
self.move_data().rev_lookup.find(dest_place),
|mpi| { in_out.add(&mpi); });
}
}
@ -561,7 +561,7 @@ impl<'a, 'gcx, 'tcx> BitDenotation for MovingOutStatements<'a, 'gcx, 'tcx> {
in_out: &mut IdxSet<MoveOutIndex>,
_call_bb: mir::BasicBlock,
_dest_bb: mir::BasicBlock,
dest_lval: &mir::Place) {
dest_place: &mir::Place) {
let move_data = self.move_data();
let bits_per_block = self.bits_per_block();
@ -569,7 +569,7 @@ impl<'a, 'gcx, 'tcx> BitDenotation for MovingOutStatements<'a, 'gcx, 'tcx> {
on_lookup_result_bits(self.tcx,
self.mir,
move_data,
move_data.rev_lookup.find(dest_lval),
move_data.rev_lookup.find(dest_place),
|mpi| for moi in &path_map[mpi] {
assert!(moi.index() < bits_per_block);
in_out.remove(&moi);
@ -647,7 +647,7 @@ impl<'a, 'gcx, 'tcx> BitDenotation for EverInitializedLvals<'a, 'gcx, 'tcx> {
in_out: &mut IdxSet<InitIndex>,
call_bb: mir::BasicBlock,
_dest_bb: mir::BasicBlock,
_dest_lval: &mir::Place) {
_dest_place: &mir::Place) {
let move_data = self.move_data();
let bits_per_block = self.bits_per_block();
let init_loc_map = &move_data.init_loc_map;

View file

@ -62,7 +62,7 @@ impl<'a, 'tcx> BitDenotation for MaybeStorageLive<'a, 'tcx> {
_in_out: &mut IdxSet<Local>,
_call_bb: mir::BasicBlock,
_dest_bb: mir::BasicBlock,
_dest_lval: &mir::Place) {
_dest_place: &mir::Place) {
// Nothing to do when a call returns successfully
}
}

View file

@ -610,7 +610,7 @@ pub trait BitDenotation: DataflowOperator {
in_out: &mut IdxSet<Self::Idx>,
call_bb: mir::BasicBlock,
dest_bb: mir::BasicBlock,
dest_lval: &mir::Place);
dest_place: &mir::Place);
}
impl<'a, 'gcx, 'tcx: 'a, D> DataflowAnalysis<'a, 'tcx, D> where D: BitDenotation
@ -714,11 +714,11 @@ impl<'a, 'tcx: 'a, D> DataflowAnalysis<'a, 'tcx, D> where D: BitDenotation
self.propagate_bits_into_entry_set_for(in_out, changed, unwind);
}
}
if let Some((ref dest_lval, ref dest_bb)) = *destination {
if let Some((ref dest_place, ref dest_bb)) = *destination {
// N.B.: This must be done *last*, after all other
// propagation, as documented in comment above.
self.flow_state.operator.propagate_call_return(
in_out, bb, *dest_bb, dest_lval);
in_out, bb, *dest_bb, dest_place);
self.propagate_bits_into_entry_set_for(in_out, changed, dest_bb);
}
}

View file

@ -68,14 +68,14 @@ impl<'a, 'gcx, 'tcx> MoveDataBuilder<'a, 'gcx, 'tcx> {
path_map: &mut IndexVec<MovePathIndex, Vec<MoveOutIndex>>,
init_path_map: &mut IndexVec<MovePathIndex, Vec<InitIndex>>,
parent: Option<MovePathIndex>,
lvalue: Place<'tcx>)
place: Place<'tcx>)
-> MovePathIndex
{
let move_path = move_paths.push(MovePath {
next_sibling: None,
first_child: None,
parent,
lvalue,
place,
});
if let Some(parent) = parent {
@ -95,52 +95,52 @@ impl<'a, 'gcx, 'tcx> MoveDataBuilder<'a, 'gcx, 'tcx> {
}
impl<'b, 'a, 'gcx, 'tcx> Gatherer<'b, 'a, 'gcx, 'tcx> {
/// This creates a MovePath for a given lvalue, returning an `MovePathError`
/// if that lvalue can't be moved from.
/// This creates a MovePath for a given place, returning an `MovePathError`
/// if that place can't be moved from.
///
/// NOTE: lvalues behind references *do not* get a move path, which is
/// NOTE: places behind references *do not* get a move path, which is
/// problematic for borrowck.
///
/// Maybe we should have separate "borrowck" and "moveck" modes.
fn move_path_for(&mut self, lval: &Place<'tcx>)
fn move_path_for(&mut self, place: &Place<'tcx>)
-> Result<MovePathIndex, MoveError<'tcx>>
{
debug!("lookup({:?})", lval);
match *lval {
debug!("lookup({:?})", place);
match *place {
Place::Local(local) => Ok(self.builder.data.rev_lookup.locals[local]),
Place::Static(..) => {
let span = self.builder.mir.source_info(self.loc).span;
Err(MoveError::cannot_move_out_of(span, Static))
}
Place::Projection(ref proj) => {
self.move_path_for_projection(lval, proj)
self.move_path_for_projection(place, proj)
}
}
}
fn create_move_path(&mut self, lval: &Place<'tcx>) {
fn create_move_path(&mut self, place: &Place<'tcx>) {
// This is an assignment, not a move, so this not being a valid
// move path is OK.
let _ = self.move_path_for(lval);
let _ = self.move_path_for(place);
}
fn move_path_for_projection(&mut self,
lval: &Place<'tcx>,
place: &Place<'tcx>,
proj: &PlaceProjection<'tcx>)
-> Result<MovePathIndex, MoveError<'tcx>>
{
let base = try!(self.move_path_for(&proj.base));
let mir = self.builder.mir;
let tcx = self.builder.tcx;
let lv_ty = proj.base.ty(mir, tcx).to_ty(tcx);
match lv_ty.sty {
let place_ty = proj.base.ty(mir, tcx).to_ty(tcx);
match place_ty.sty {
ty::TyRef(..) | ty::TyRawPtr(..) =>
return Err(MoveError::cannot_move_out_of(mir.source_info(self.loc).span,
BorrowedContent)),
ty::TyAdt(adt, _) if adt.has_dtor(tcx) && !adt.is_box() =>
return Err(MoveError::cannot_move_out_of(mir.source_info(self.loc).span,
InteriorOfTypeWithDestructor {
container_ty: lv_ty
container_ty: place_ty
})),
// move out of union - always move the entire union
ty::TyAdt(adt, _) if adt.is_union() =>
@ -149,7 +149,7 @@ impl<'b, 'a, 'gcx, 'tcx> Gatherer<'b, 'a, 'gcx, 'tcx> {
return Err(MoveError::cannot_move_out_of(
mir.source_info(self.loc).span,
InteriorOfSliceOrArray {
ty: lv_ty, is_index: match proj.elem {
ty: place_ty, is_index: match proj.elem {
ProjectionElem::Index(..) => true,
_ => false
},
@ -159,7 +159,7 @@ impl<'b, 'a, 'gcx, 'tcx> Gatherer<'b, 'a, 'gcx, 'tcx> {
return Err(MoveError::cannot_move_out_of(
mir.source_info(self.loc).span,
InteriorOfSliceOrArray {
ty: lv_ty, is_index: true
ty: place_ty, is_index: true
})),
_ => {
// FIXME: still badly broken
@ -175,7 +175,7 @@ impl<'b, 'a, 'gcx, 'tcx> Gatherer<'b, 'a, 'gcx, 'tcx> {
&mut self.builder.data.path_map,
&mut self.builder.data.init_path_map,
Some(base),
lval.clone()
place.clone()
);
ent.insert(path);
Ok(path)
@ -265,16 +265,16 @@ struct Gatherer<'b, 'a: 'b, 'gcx: 'tcx, 'tcx: 'a> {
impl<'b, 'a, 'gcx, 'tcx> Gatherer<'b, 'a, 'gcx, 'tcx> {
fn gather_statement(&mut self, stmt: &Statement<'tcx>) {
match stmt.kind {
StatementKind::Assign(ref lval, ref rval) => {
self.create_move_path(lval);
StatementKind::Assign(ref place, ref rval) => {
self.create_move_path(place);
if let RvalueInitializationState::Shallow = rval.initialization_state() {
// Box starts out uninitialized - need to create a separate
// move-path for the interior so it will be separate from
// the exterior.
self.create_move_path(&lval.clone().deref());
self.gather_init(lval, InitKind::Shallow);
self.create_move_path(&place.clone().deref());
self.gather_init(place, InitKind::Shallow);
} else {
self.gather_init(lval, InitKind::Deep);
self.gather_init(place, InitKind::Deep);
}
self.gather_rvalue(rval);
}
@ -318,7 +318,7 @@ impl<'b, 'a, 'gcx, 'tcx> Gatherer<'b, 'a, 'gcx, 'tcx> {
Rvalue::NullaryOp(NullOp::Box, _) => {
// This returns an rvalue with uninitialized contents. We can't
// move out of it here because it is an rvalue - assignments always
// completely initialize their lvalue.
// completely initialize their place.
//
// However, this does not matter - MIR building is careful to
// only emit a shallow free for the partially-initialized
@ -339,7 +339,7 @@ impl<'b, 'a, 'gcx, 'tcx> Gatherer<'b, 'a, 'gcx, 'tcx> {
TerminatorKind::Unreachable => { }
TerminatorKind::Return => {
self.gather_move(&Place::Local(RETURN_POINTER));
self.gather_move(&Place::Local(RETURN_PLACE));
}
TerminatorKind::Assert { .. } |
@ -376,16 +376,16 @@ impl<'b, 'a, 'gcx, 'tcx> Gatherer<'b, 'a, 'gcx, 'tcx> {
match *operand {
Operand::Constant(..) |
Operand::Copy(..) => {} // not-a-move
Operand::Move(ref lval) => { // a move
self.gather_move(lval);
Operand::Move(ref place) => { // a move
self.gather_move(place);
}
}
}
fn gather_move(&mut self, lval: &Place<'tcx>) {
debug!("gather_move({:?}, {:?})", self.loc, lval);
fn gather_move(&mut self, place: &Place<'tcx>) {
debug!("gather_move({:?}, {:?})", self.loc, place);
let path = match self.move_path_for(lval) {
let path = match self.move_path_for(place) {
Ok(path) | Err(MoveError::UnionMove { path }) => path,
Err(error @ MoveError::IllegalMove { .. }) => {
self.builder.errors.push(error);
@ -395,16 +395,16 @@ impl<'b, 'a, 'gcx, 'tcx> Gatherer<'b, 'a, 'gcx, 'tcx> {
let move_out = self.builder.data.moves.push(MoveOut { path: path, source: self.loc });
debug!("gather_move({:?}, {:?}): adding move {:?} of {:?}",
self.loc, lval, move_out, path);
self.loc, place, move_out, path);
self.builder.data.path_map[path].push(move_out);
self.builder.data.loc_map[self.loc].push(move_out);
}
fn gather_init(&mut self, lval: &Place<'tcx>, kind: InitKind) {
debug!("gather_init({:?}, {:?})", self.loc, lval);
fn gather_init(&mut self, place: &Place<'tcx>, kind: InitKind) {
debug!("gather_init({:?}, {:?})", self.loc, place);
if let LookupResult::Exact(path) = self.builder.data.rev_lookup.find(lval) {
if let LookupResult::Exact(path) = self.builder.data.rev_lookup.find(place) {
let init = self.builder.data.inits.push(Init {
span: self.builder.mir.source_info(self.loc).span,
path,
@ -412,7 +412,7 @@ impl<'b, 'a, 'gcx, 'tcx> Gatherer<'b, 'a, 'gcx, 'tcx> {
});
debug!("gather_init({:?}, {:?}): adding init {:?} of {:?}",
self.loc, lval, init, path);
self.loc, place, init, path);
self.builder.data.init_path_map[path].push(init);
self.builder.data.init_loc_map[self.loc].push(init);

View file

@ -94,7 +94,7 @@ pub struct MovePath<'tcx> {
pub next_sibling: Option<MovePathIndex>,
pub first_child: Option<MovePathIndex>,
pub parent: Option<MovePathIndex>,
pub lvalue: Place<'tcx>,
pub place: Place<'tcx>,
}
impl<'tcx> fmt::Debug for MovePath<'tcx> {
@ -109,13 +109,13 @@ impl<'tcx> fmt::Debug for MovePath<'tcx> {
if let Some(next_sibling) = self.next_sibling {
write!(w, " next_sibling: {:?}", next_sibling)?;
}
write!(w, " lvalue: {:?} }}", self.lvalue)
write!(w, " place: {:?} }}", self.place)
}
}
impl<'tcx> fmt::Display for MovePath<'tcx> {
fn fmt(&self, w: &mut fmt::Formatter) -> fmt::Result {
write!(w, "{:?}", self.lvalue)
write!(w, "{:?}", self.place)
}
}
@ -224,11 +224,11 @@ impl fmt::Debug for Init {
pub struct MovePathLookup<'tcx> {
locals: IndexVec<Local, MovePathIndex>,
/// projections are made from a base-lvalue and a projection
/// elem. The base-lvalue will have a unique MovePathIndex; we use
/// projections are made from a base-place and a projection
/// elem. The base-place will have a unique MovePathIndex; we use
/// the latter as the index into the outer vector (narrowing
/// subsequent search so that it is solely relative to that
/// base-lvalue). For the remaining lookup, we map the projection
/// base-place). For the remaining lookup, we map the projection
/// elem to the associated MovePathIndex.
projections: FxHashMap<(MovePathIndex, AbstractElem<'tcx>), MovePathIndex>
}
@ -246,8 +246,8 @@ impl<'tcx> MovePathLookup<'tcx> {
// alternative will *not* create a MovePath on the fly for an
// unknown l-value, but will rather return the nearest available
// parent.
pub fn find(&self, lval: &Place<'tcx>) -> LookupResult {
match *lval {
pub fn find(&self, place: &Place<'tcx>) -> LookupResult {
match *place {
Place::Local(local) => LookupResult::Exact(self.locals[local]),
Place::Static(..) => LookupResult::Parent(None),
Place::Projection(ref proj) => {

View file

@ -116,7 +116,7 @@ fn apply_adjustment<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>,
},
};
overloaded_lvalue(cx, hir_expr, adjustment.target, Some(call), vec![expr.to_ref()])
overloaded_place(cx, hir_expr, adjustment.target, Some(call), vec![expr.to_ref()])
}
Adjust::Borrow(AutoBorrow::Ref(r, m)) => {
ExprKind::Borrow {
@ -335,7 +335,7 @@ fn make_mirror_unadjusted<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>,
hir::ExprIndex(ref lhs, ref index) => {
if cx.tables().is_method_call(expr) {
overloaded_lvalue(cx, expr, expr_ty, None, vec![lhs.to_ref(), index.to_ref()])
overloaded_place(cx, expr, expr_ty, None, vec![lhs.to_ref(), index.to_ref()])
} else {
ExprKind::Index {
lhs: lhs.to_ref(),
@ -346,7 +346,7 @@ fn make_mirror_unadjusted<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>,
hir::ExprUnary(hir::UnOp::UnDeref, ref arg) => {
if cx.tables().is_method_call(expr) {
overloaded_lvalue(cx, expr, expr_ty, None, vec![arg.to_ref()])
overloaded_place(cx, expr, expr_ty, None, vec![arg.to_ref()])
} else {
ExprKind::Deref { arg: arg.to_ref() }
}
@ -844,15 +844,15 @@ fn overloaded_operator<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>,
}
}
fn overloaded_lvalue<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>,
fn overloaded_place<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>,
expr: &'tcx hir::Expr,
lvalue_ty: Ty<'tcx>,
place_ty: Ty<'tcx>,
custom_callee: Option<(DefId, &'tcx Substs<'tcx>)>,
args: Vec<ExprRef<'tcx>>)
-> ExprKind<'tcx> {
// For an overloaded *x or x[y] expression of type T, the method
// call returns an &T and we must add the deref so that the types
// line up (this is because `*x` and `x[y]` represent lvalues):
// line up (this is because `*x` and `x[y]` represent places):
let recv_ty = match args[0] {
ExprRef::Hair(e) => cx.tables().expr_ty_adjusted(e),
@ -864,10 +864,10 @@ fn overloaded_lvalue<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>,
// `Deref(Mut)::Deref(_mut)` and `Index(Mut)::index(_mut)`.
let (region, mt) = match recv_ty.sty {
ty::TyRef(region, mt) => (region, mt),
_ => span_bug!(expr.span, "overloaded_lvalue: receiver is not a reference"),
_ => span_bug!(expr.span, "overloaded_place: receiver is not a reference"),
};
let ref_ty = cx.tcx.mk_ref(region, ty::TypeAndMut {
ty: lvalue_ty,
ty: place_ty,
mutbl: mt.mutbl,
});

View file

@ -384,14 +384,14 @@ impl<'a, 'tcx> CloneShimBuilder<'a, 'tcx> {
let rcvr = Place::Local(Local::new(1+0)).deref();
let ret_statement = self.make_statement(
StatementKind::Assign(
Place::Local(RETURN_POINTER),
Place::Local(RETURN_PLACE),
Rvalue::Use(Operand::Copy(rcvr))
)
);
self.block(vec![ret_statement], TerminatorKind::Return, false);
}
fn make_lvalue(&mut self, mutability: Mutability, ty: Ty<'tcx>) -> Place<'tcx> {
fn make_place(&mut self, mutability: Mutability, ty: Ty<'tcx>) -> Place<'tcx> {
let span = self.span;
Place::Local(
self.local_decls.push(temp_decl(mutability, ty, span))
@ -427,7 +427,7 @@ impl<'a, 'tcx> CloneShimBuilder<'a, 'tcx> {
},
});
let ref_loc = self.make_lvalue(
let ref_loc = self.make_place(
Mutability::Not,
tcx.mk_ref(tcx.types.re_erased, ty::TypeAndMut {
ty,
@ -435,7 +435,7 @@ impl<'a, 'tcx> CloneShimBuilder<'a, 'tcx> {
})
);
let loc = self.make_lvalue(Mutability::Not, ty);
let loc = self.make_place(Mutability::Not, ty);
// `let ref_loc: &ty = &rcvr_field;`
let statement = self.make_statement(
@ -466,7 +466,7 @@ impl<'a, 'tcx> CloneShimBuilder<'a, 'tcx> {
) {
let tcx = self.tcx;
let cond = self.make_lvalue(Mutability::Mut, tcx.types.bool);
let cond = self.make_place(Mutability::Mut, tcx.types.bool);
let compute_cond = self.make_statement(
StatementKind::Assign(
cond.clone(),
@ -502,8 +502,8 @@ impl<'a, 'tcx> CloneShimBuilder<'a, 'tcx> {
let rcvr = Place::Local(Local::new(1+0)).deref();
let beg = self.local_decls.push(temp_decl(Mutability::Mut, tcx.types.usize, span));
let end = self.make_lvalue(Mutability::Not, tcx.types.usize);
let ret = self.make_lvalue(Mutability::Mut, tcx.mk_array(ty, len));
let end = self.make_place(Mutability::Not, tcx.types.usize);
let ret = self.make_place(Mutability::Mut, tcx.mk_array(ty, len));
// BB #0
// `let mut beg = 0;`
@ -567,7 +567,7 @@ impl<'a, 'tcx> CloneShimBuilder<'a, 'tcx> {
// `return ret;`
let ret_statement = self.make_statement(
StatementKind::Assign(
Place::Local(RETURN_POINTER),
Place::Local(RETURN_PLACE),
Rvalue::Use(Operand::Move(ret.clone())),
)
);
@ -663,7 +663,7 @@ impl<'a, 'tcx> CloneShimBuilder<'a, 'tcx> {
// `return kind(returns[0], returns[1], ..., returns[tys.len() - 1]);`
let ret_statement = self.make_statement(
StatementKind::Assign(
Place::Local(RETURN_POINTER),
Place::Local(RETURN_PLACE),
Rvalue::Aggregate(
box kind,
returns.into_iter().map(Operand::Move).collect()
@ -749,8 +749,8 @@ fn build_call_shim<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
if let Some(untuple_args) = untuple_args {
args.extend(untuple_args.iter().enumerate().map(|(i, ity)| {
let arg_lv = Place::Local(Local::new(1+1));
Operand::Move(arg_lv.field(Field::new(i), *ity))
let arg_place = Place::Local(Local::new(1+1));
Operand::Move(arg_place.field(Field::new(i), *ity))
}));
} else {
args.extend((1..sig.inputs().len()).map(|i| {
@ -771,7 +771,7 @@ fn build_call_shim<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
block(&mut blocks, statements, TerminatorKind::Call {
func: callee,
args,
destination: Some((Place::Local(RETURN_POINTER),
destination: Some((Place::Local(RETURN_PLACE),
BasicBlock::new(1))),
cleanup: if let Adjustment::RefMut = rcvr_adjustment {
Some(BasicBlock::new(3))
@ -864,7 +864,7 @@ pub fn build_adt_ctor<'a, 'gcx, 'tcx>(infcx: &infer::InferCtxt<'a, 'gcx, 'tcx>,
statements: vec![Statement {
source_info,
kind: StatementKind::Assign(
Place::Local(RETURN_POINTER),
Place::Local(RETURN_PLACE),
Rvalue::Aggregate(
box AggregateKind::Adt(adt_def, variant_no, substs, None),
(1..sig.inputs().len()+1).map(|i| {

View file

@ -22,9 +22,9 @@ use transform::{MirPass, MirSource};
pub struct AddValidation;
/// Determine the "context" of the lval: Mutability and region.
fn lval_context<'a, 'tcx, D>(
lval: &Place<'tcx>,
/// Determine the "context" of the place: Mutability and region.
fn place_context<'a, 'tcx, D>(
place: &Place<'tcx>,
local_decls: &D,
tcx: TyCtxt<'a, 'tcx, 'tcx>
) -> (Option<region::Scope>, hir::Mutability)
@ -32,7 +32,7 @@ fn lval_context<'a, 'tcx, D>(
{
use rustc::mir::Place::*;
match *lval {
match *place {
Local { .. } => (None, hir::MutMutable),
Static(_) => (None, hir::MutImmutable),
Projection(ref proj) => {
@ -66,7 +66,7 @@ fn lval_context<'a, 'tcx, D>(
// This is already as restricted as it gets, no need to even recurse
context
} else {
let base_context = lval_context(&proj.base, local_decls, tcx);
let base_context = place_context(&proj.base, local_decls, tcx);
// The region of the outermost Deref is always most restrictive.
let re = context.0.or(base_context.0);
let mutbl = context.1.and(base_context.1);
@ -74,7 +74,7 @@ fn lval_context<'a, 'tcx, D>(
}
}
_ => lval_context(&proj.base, local_decls, tcx),
_ => place_context(&proj.base, local_decls, tcx),
}
}
}
@ -198,11 +198,11 @@ impl MirPass for AddValidation {
let restricted_validation = emit_validate == 1 && fn_contains_unsafe(tcx, src);
let local_decls = mir.local_decls.clone(); // FIXME: Find a way to get rid of this clone.
// Convert an lvalue to a validation operand.
let lval_to_operand = |lval: Place<'tcx>| -> ValidationOperand<'tcx, Place<'tcx>> {
let (re, mutbl) = lval_context(&lval, &local_decls, tcx);
let ty = lval.ty(&local_decls, tcx).to_ty(tcx);
ValidationOperand { lval, ty, re, mutbl }
// Convert a place to a validation operand.
let place_to_operand = |place: Place<'tcx>| -> ValidationOperand<'tcx, Place<'tcx>> {
let (re, mutbl) = place_context(&place, &local_decls, tcx);
let ty = place.ty(&local_decls, tcx).to_ty(tcx);
ValidationOperand { place, ty, re, mutbl }
};
// Emit an Acquire at the beginning of the given block. If we are in restricted emission
@ -237,7 +237,7 @@ impl MirPass for AddValidation {
};
// Gather all arguments, skip return value.
let operands = mir.local_decls.iter_enumerated().skip(1).take(mir.arg_count)
.map(|(local, _)| lval_to_operand(Place::Local(local))).collect();
.map(|(local, _)| place_to_operand(Place::Local(local))).collect();
emit_acquire(&mut mir.basic_blocks_mut()[START_BLOCK], source_info, operands);
}
@ -256,13 +256,13 @@ impl MirPass for AddValidation {
let release_stmt = Statement {
source_info,
kind: StatementKind::Validate(ValidationOp::Release,
destination.iter().map(|dest| lval_to_operand(dest.0.clone()))
destination.iter().map(|dest| place_to_operand(dest.0.clone()))
.chain(
args.iter().filter_map(|op| {
match op {
&Operand::Copy(ref lval) |
&Operand::Move(ref lval) =>
Some(lval_to_operand(lval.clone())),
&Operand::Copy(ref place) |
&Operand::Move(ref place) =>
Some(place_to_operand(place.clone())),
&Operand::Constant(..) => { None },
}
})
@ -275,16 +275,16 @@ impl MirPass for AddValidation {
returns.push((source_info, destination.0.clone(), destination.1));
}
}
Some(Terminator { kind: TerminatorKind::Drop { location: ref lval, .. },
Some(Terminator { kind: TerminatorKind::Drop { location: ref place, .. },
source_info }) |
Some(Terminator { kind: TerminatorKind::DropAndReplace { location: ref lval, .. },
Some(Terminator { kind: TerminatorKind::DropAndReplace { location: ref place, .. },
source_info }) => {
// Before the call: Release all arguments
if !restricted_validation {
let release_stmt = Statement {
source_info,
kind: StatementKind::Validate(ValidationOp::Release,
vec![lval_to_operand(lval.clone())]),
vec![place_to_operand(place.clone())]),
};
block_data.statements.push(release_stmt);
}
@ -296,11 +296,11 @@ impl MirPass for AddValidation {
}
}
// Now we go over the returns we collected to acquire the return values.
for (source_info, dest_lval, dest_block) in returns {
for (source_info, dest_place, dest_block) in returns {
emit_acquire(
&mut mir.basic_blocks_mut()[dest_block],
source_info,
vec![lval_to_operand(dest_lval)]
vec![place_to_operand(dest_place)]
);
}
@ -321,22 +321,20 @@ impl MirPass for AddValidation {
StatementKind::Assign(_, Rvalue::Ref(_, _, _)) => {
// Due to a lack of NLL; we can't capture anything directly here.
// Instead, we have to re-match and clone there.
let (dest_lval, re, src_lval) = match block_data.statements[i].kind {
StatementKind::Assign(ref dest_lval,
Rvalue::Ref(re, _, ref src_lval)) => {
(dest_lval.clone(), re, src_lval.clone())
let (dest_place, re, src_place) = match block_data.statements[i].kind {
StatementKind::Assign(ref dest_place,
Rvalue::Ref(re, _, ref src_place)) => {
(dest_place.clone(), re, src_place.clone())
},
_ => bug!("We already matched this."),
};
// So this is a ref, and we got all the data we wanted.
// Do an acquire of the result -- but only what it points to, so add a Deref
// projection.
let dest_lval = Projection { base: dest_lval, elem: ProjectionElem::Deref };
let dest_lval = Place::Projection(Box::new(dest_lval));
let acquire_stmt = Statement {
source_info: block_data.statements[i].source_info,
kind: StatementKind::Validate(ValidationOp::Acquire,
vec![lval_to_operand(dest_lval)]),
vec![place_to_operand(dest_place.deref())]),
};
block_data.statements.insert(i+1, acquire_stmt);
@ -349,7 +347,7 @@ impl MirPass for AddValidation {
};
let release_stmt = Statement {
source_info: block_data.statements[i].source_info,
kind: StatementKind::Validate(op, vec![lval_to_operand(src_lval)]),
kind: StatementKind::Validate(op, vec![place_to_operand(src_place)]),
};
block_data.statements.insert(i, release_stmt);
}
@ -360,13 +358,13 @@ impl MirPass for AddValidation {
{
// Due to a lack of NLL; we can't capture anything directly here.
// Instead, we have to re-match and clone there.
let (dest_lval, src_lval) = match block_data.statements[i].kind {
StatementKind::Assign(ref dest_lval,
Rvalue::Cast(_, Operand::Copy(ref src_lval), _)) |
StatementKind::Assign(ref dest_lval,
Rvalue::Cast(_, Operand::Move(ref src_lval), _)) =>
let (dest_place, src_place) = match block_data.statements[i].kind {
StatementKind::Assign(ref dest_place,
Rvalue::Cast(_, Operand::Copy(ref src_place), _)) |
StatementKind::Assign(ref dest_place,
Rvalue::Cast(_, Operand::Move(ref src_place), _)) =>
{
(dest_lval.clone(), src_lval.clone())
(dest_place.clone(), src_place.clone())
},
_ => bug!("We already matched this."),
};
@ -375,7 +373,7 @@ impl MirPass for AddValidation {
let acquire_stmt = Statement {
source_info: block_data.statements[i].source_info,
kind: StatementKind::Validate(ValidationOp::Acquire,
vec![lval_to_operand(dest_lval)]),
vec![place_to_operand(dest_place)]),
};
block_data.statements.insert(i+1, acquire_stmt);
@ -383,7 +381,7 @@ impl MirPass for AddValidation {
let release_stmt = Statement {
source_info: block_data.statements[i].source_info,
kind: StatementKind::Validate(ValidationOp::Release,
vec![lval_to_operand(src_lval)]),
vec![place_to_operand(src_place)]),
};
block_data.statements.insert(i, release_stmt);
}

View file

@ -135,12 +135,12 @@ impl<'a, 'tcx> Visitor<'tcx> for UnsafetyChecker<'a, 'tcx> {
self.super_rvalue(rvalue, location);
}
fn visit_lvalue(&mut self,
lvalue: &Place<'tcx>,
fn visit_place(&mut self,
place: &Place<'tcx>,
context: PlaceContext<'tcx>,
location: Location) {
if let PlaceContext::Borrow { .. } = context {
if util::is_disaligned(self.tcx, self.mir, self.param_env, lvalue) {
if util::is_disaligned(self.tcx, self.mir, self.param_env, place) {
let source_info = self.source_info;
let lint_root =
self.visibility_scope_info[source_info.scope].lint_root;
@ -152,7 +152,7 @@ impl<'a, 'tcx> Visitor<'tcx> for UnsafetyChecker<'a, 'tcx> {
}
}
match lvalue {
match place {
&Place::Projection(box Projection {
ref base, ref elem
}) => {
@ -180,7 +180,7 @@ impl<'a, 'tcx> Visitor<'tcx> for UnsafetyChecker<'a, 'tcx> {
_ => span_bug!(
self.source_info.span,
"non-field projection {:?} from union?",
lvalue)
place)
};
if elem_ty.moves_by_default(self.tcx, self.param_env,
self.source_info.span) {
@ -216,7 +216,7 @@ impl<'a, 'tcx> Visitor<'tcx> for UnsafetyChecker<'a, 'tcx> {
}
}
};
self.super_lvalue(lvalue, context, location);
self.super_place(place, context, location);
}
}

View file

@ -108,8 +108,8 @@ impl MirPass for CopyPropagation {
dest_local);
continue;
}
let dest_lvalue_def = dest_use_info.defs_not_including_drop().next().unwrap();
location = dest_lvalue_def.location;
let dest_place_def = dest_use_info.defs_not_including_drop().next().unwrap();
location = dest_place_def.location;
let basic_block = &mir[location.block];
let statement_index = location.statement_index;
@ -126,9 +126,9 @@ impl MirPass for CopyPropagation {
StatementKind::Assign(Place::Local(local), Rvalue::Use(ref operand)) if
local == dest_local => {
let maybe_action = match *operand {
Operand::Copy(ref src_lvalue) |
Operand::Move(ref src_lvalue) => {
Action::local_copy(&mir, &def_use_analysis, src_lvalue)
Operand::Copy(ref src_place) |
Operand::Move(ref src_place) => {
Action::local_copy(&mir, &def_use_analysis, src_place)
}
Operand::Constant(ref src_constant) => {
Action::constant(src_constant)
@ -202,10 +202,10 @@ enum Action<'tcx> {
}
impl<'tcx> Action<'tcx> {
fn local_copy(mir: &Mir<'tcx>, def_use_analysis: &DefUseAnalysis, src_lvalue: &Place<'tcx>)
fn local_copy(mir: &Mir<'tcx>, def_use_analysis: &DefUseAnalysis, src_place: &Place<'tcx>)
-> Option<Action<'tcx>> {
// The source must be a local.
let src_local = if let Place::Local(local) = *src_lvalue {
let src_local = if let Place::Local(local) = *src_place {
local
} else {
debug!(" Can't copy-propagate local: source is not a local");
@ -269,14 +269,14 @@ impl<'tcx> Action<'tcx> {
debug!(" Replacing all uses of {:?} with {:?} (local)",
dest_local,
src_local);
for lvalue_use in &def_use_analysis.local_info(dest_local).defs_and_uses {
if lvalue_use.context.is_storage_marker() {
mir.make_statement_nop(lvalue_use.location)
for place_use in &def_use_analysis.local_info(dest_local).defs_and_uses {
if place_use.context.is_storage_marker() {
mir.make_statement_nop(place_use.location)
}
}
for lvalue_use in &def_use_analysis.local_info(src_local).defs_and_uses {
if lvalue_use.context.is_storage_marker() {
mir.make_statement_nop(lvalue_use.location)
for place_use in &def_use_analysis.local_info(src_local).defs_and_uses {
if place_use.context.is_storage_marker() {
mir.make_statement_nop(place_use.location)
}
}
@ -297,22 +297,22 @@ impl<'tcx> Action<'tcx> {
dest_local,
src_constant);
let dest_local_info = def_use_analysis.local_info(dest_local);
for lvalue_use in &dest_local_info.defs_and_uses {
if lvalue_use.context.is_storage_marker() {
mir.make_statement_nop(lvalue_use.location)
for place_use in &dest_local_info.defs_and_uses {
if place_use.context.is_storage_marker() {
mir.make_statement_nop(place_use.location)
}
}
// Replace all uses of the destination local with the constant.
let mut visitor = ConstantPropagationVisitor::new(dest_local,
src_constant);
for dest_lvalue_use in &dest_local_info.defs_and_uses {
visitor.visit_location(mir, dest_lvalue_use.location)
for dest_place_use in &dest_local_info.defs_and_uses {
visitor.visit_location(mir, dest_place_use.location)
}
// Zap the assignment instruction if we eliminated all the uses. We won't have been
// able to do that if the destination was used in a projection, because projections
// must have lvalues on their LHS.
// must have places on their LHS.
let use_count = dest_local_info.use_count();
if visitor.uses_replaced == use_count {
debug!(" {} of {} use(s) replaced; deleting assignment",

View file

@ -92,7 +92,7 @@ impl MirPass for Deaggregator {
if adt_def.is_enum() {
let set_discriminant = Statement {
kind: StatementKind::SetDiscriminant {
lvalue: lhs.clone(),
place: lhs.clone(),
variant_index: variant,
},
source_info: src_info,

View file

@ -358,7 +358,7 @@ impl<'b, 'tcx> ElaborateDropsCtxt<'b, 'tcx> {
});
let path = self.move_data().rev_lookup.find(location);
debug!("collect_drop_flags: {:?}, lv {:?} ({:?})",
debug!("collect_drop_flags: {:?}, place {:?} ({:?})",
bb, location, path);
let path = match path {
@ -368,7 +368,7 @@ impl<'b, 'tcx> ElaborateDropsCtxt<'b, 'tcx> {
let (_maybe_live, maybe_dead) = init_data.state(parent);
if maybe_dead {
span_bug!(terminator.source_info.span,
"drop of untracked, uninitialized value {:?}, lv {:?} ({:?})",
"drop of untracked, uninitialized value {:?}, place {:?} ({:?})",
bb, location, path);
}
continue
@ -443,7 +443,7 @@ impl<'b, 'tcx> ElaborateDropsCtxt<'b, 'tcx> {
/// The desugaring drops the location if needed, and then writes
/// the value (including setting the drop flag) over it in *both* arms.
///
/// The `replace` terminator can also be called on lvalues that
/// The `replace` terminator can also be called on places that
/// are not tracked by elaboration (for example,
/// `replace x[i] <- tmp0`). The borrow checker requires that
/// these locations are initialized before the assignment,
@ -554,12 +554,12 @@ impl<'b, 'tcx> ElaborateDropsCtxt<'b, 'tcx> {
fn drop_flags_for_fn_rets(&mut self) {
for (bb, data) in self.mir.basic_blocks().iter_enumerated() {
if let TerminatorKind::Call {
destination: Some((ref lv, tgt)), cleanup: Some(_), ..
destination: Some((ref place, tgt)), cleanup: Some(_), ..
} = data.terminator().kind {
assert!(!self.patch.is_patched(bb));
let loc = Location { block: tgt, statement_index: 0 };
let path = self.move_data().rev_lookup.find(lv);
let path = self.move_data().rev_lookup.find(place);
on_lookup_result_bits(
self.tcx, self.mir, self.move_data(), path,
|child| self.set_drop_flag(loc, child, DropFlagState::Present)
@ -628,12 +628,12 @@ impl<'b, 'tcx> ElaborateDropsCtxt<'b, 'tcx> {
// so mark the return as initialized *before* the
// call.
if let TerminatorKind::Call {
destination: Some((ref lv, _)), cleanup: None, ..
destination: Some((ref place, _)), cleanup: None, ..
} = data.terminator().kind {
assert!(!self.patch.is_patched(bb));
let loc = Location { block: bb, statement_index: data.statements.len() };
let path = self.move_data().rev_lookup.find(lv);
let path = self.move_data().rev_lookup.find(place);
on_lookup_result_bits(
self.tcx, self.mir, self.move_data(), path,
|child| self.set_drop_flag(loc, child, DropFlagState::Present)

View file

@ -108,17 +108,17 @@ impl<'tcx> MutVisitor<'tcx> for DerefArgVisitor {
assert_ne!(*local, self_arg());
}
fn visit_lvalue(&mut self,
lvalue: &mut Place<'tcx>,
fn visit_place(&mut self,
place: &mut Place<'tcx>,
context: PlaceContext<'tcx>,
location: Location) {
if *lvalue == Place::Local(self_arg()) {
*lvalue = Place::Projection(Box::new(Projection {
base: lvalue.clone(),
if *place == Place::Local(self_arg()) {
*place = Place::Projection(Box::new(Projection {
base: place.clone(),
elem: ProjectionElem::Deref,
}));
} else {
self.super_lvalue(lvalue, context, location);
self.super_place(place, context, location);
}
}
}
@ -151,7 +151,7 @@ struct TransformVisitor<'a, 'tcx: 'a> {
// A list of suspension points, generated during the transform
suspension_points: Vec<SuspensionPoint>,
// The original RETURN_POINTER local
// The original RETURN_PLACE local
new_ret_local: Local,
}
@ -200,17 +200,17 @@ impl<'a, 'tcx> MutVisitor<'tcx> for TransformVisitor<'a, 'tcx> {
assert_eq!(self.remap.get(local), None);
}
fn visit_lvalue(&mut self,
lvalue: &mut Place<'tcx>,
fn visit_place(&mut self,
place: &mut Place<'tcx>,
context: PlaceContext<'tcx>,
location: Location) {
if let Place::Local(l) = *lvalue {
if let Place::Local(l) = *place {
// Replace an Local in the remap with a generator struct access
if let Some(&(ty, idx)) = self.remap.get(&l) {
*lvalue = self.make_field(idx, ty);
*place = self.make_field(idx, ty);
}
} else {
self.super_lvalue(lvalue, context, location);
self.super_place(place, context, location);
}
}
@ -244,7 +244,7 @@ impl<'a, 'tcx> MutVisitor<'tcx> for TransformVisitor<'a, 'tcx> {
// We must assign the value first in case it gets declared dead below
data.statements.push(Statement {
source_info,
kind: StatementKind::Assign(Place::Local(RETURN_POINTER),
kind: StatementKind::Assign(Place::Local(RETURN_PLACE),
self.make_state(state_idx, v)),
});
let state = if let Some(resume) = resume { // Yield
@ -310,7 +310,7 @@ fn replace_result_variable<'tcx>(ret_ty: Ty<'tcx>,
mir.local_decls.swap(0, new_ret_local.index());
RenameLocalVisitor {
from: RETURN_POINTER,
from: RETURN_PLACE,
to: new_ret_local,
}.visit_mir(mir);
@ -557,7 +557,7 @@ fn create_generator_drop_shim<'a, 'tcx>(
}
// Replace the return variable
mir.local_decls[RETURN_POINTER] = LocalDecl {
mir.local_decls[RETURN_PLACE] = LocalDecl {
mutability: Mutability::Mut,
ty: tcx.mk_nil(),
name: None,
@ -783,8 +783,8 @@ impl MirPass for StateTransform {
Kind::from(mir.return_ty())].iter());
let ret_ty = tcx.mk_adt(state_adt_ref, state_substs);
// We rename RETURN_POINTER which has type mir.return_ty to new_ret_local
// RETURN_POINTER then is a fresh unused local with type ret_ty.
// We rename RETURN_PLACE which has type mir.return_ty to new_ret_local
// RETURN_PLACE then is a fresh unused local with type ret_ty.
let new_ret_local = replace_result_variable(ret_ty, mir);
// Extract locals which are live across suspension point into `layout`

View file

@ -406,9 +406,9 @@ impl<'a, 'tcx> Inliner<'a, 'tcx> {
// `i : &mut usize`, then just duplicating the `a[*i]`
// Place could result in two different locations if `f`
// writes to `i`. To prevent this we need to create a temporary
// borrow of the lvalue and pass the destination as `*temp` instead.
fn dest_needs_borrow(lval: &Place) -> bool {
match *lval {
// borrow of the place and pass the destination as `*temp` instead.
fn dest_needs_borrow(place: &Place) -> bool {
match *place {
Place::Projection(ref p) => {
match p.elem {
ProjectionElem::Deref |
@ -456,8 +456,8 @@ impl<'a, 'tcx> Inliner<'a, 'tcx> {
// needs to generate the cast.
// FIXME: we should probably just generate correct MIR in the first place...
let arg = if let Operand::Move(ref lval) = args[0] {
lval.clone()
let arg = if let Operand::Move(ref place) = args[0] {
place.clone()
} else {
bug!("Constant arg to \"box_free\"");
};
@ -715,13 +715,13 @@ impl<'a, 'tcx> MutVisitor<'tcx> for Integrator<'a, 'tcx> {
local: &mut Local,
_ctxt: PlaceContext<'tcx>,
_location: Location) {
if *local == RETURN_POINTER {
if *local == RETURN_PLACE {
match self.destination {
Place::Local(l) => {
*local = l;
return;
},
ref lval => bug!("Return lvalue is {:?}, not local", lval)
ref place => bug!("Return place is {:?}, not local", place)
}
}
let idx = local.index() - 1;
@ -732,15 +732,15 @@ impl<'a, 'tcx> MutVisitor<'tcx> for Integrator<'a, 'tcx> {
*local = self.local_map[Local::new(idx - self.args.len())];
}
fn visit_lvalue(&mut self,
lvalue: &mut Place<'tcx>,
fn visit_place(&mut self,
place: &mut Place<'tcx>,
_ctxt: PlaceContext<'tcx>,
_location: Location) {
if let Place::Local(RETURN_POINTER) = *lvalue {
// Return pointer; update the lvalue itself
*lvalue = self.destination.clone();
if let Place::Local(RETURN_PLACE) = *place {
// Return pointer; update the place itself
*place = self.destination.clone();
} else {
self.super_lvalue(lvalue, _ctxt, _location);
self.super_place(place, _ctxt, _location);
}
}

View file

@ -52,14 +52,14 @@ impl<'tcx> MutVisitor<'tcx> for InstCombineVisitor<'tcx> {
fn visit_rvalue(&mut self, rvalue: &mut Rvalue<'tcx>, location: Location) {
if self.optimizations.and_stars.remove(&location) {
debug!("Replacing `&*`: {:?}", rvalue);
let new_lvalue = match *rvalue {
let new_place = match *rvalue {
Rvalue::Ref(_, _, Place::Projection(ref mut projection)) => {
// Replace with dummy
mem::replace(&mut projection.base, Place::Local(Local::new(0)))
}
_ => bug!("Detected `&*` but didn't find `&*`!"),
};
*rvalue = Rvalue::Use(Operand::Copy(new_lvalue))
*rvalue = Rvalue::Use(Operand::Copy(new_place))
}
if let Some(constant) = self.optimizations.arrays_lengths.remove(&location) {
@ -98,9 +98,9 @@ impl<'b, 'a, 'tcx> Visitor<'tcx> for OptimizationFinder<'b, 'a, 'tcx> {
}
}
if let Rvalue::Len(ref lvalue) = *rvalue {
let lvalue_ty = lvalue.ty(&self.mir.local_decls, self.tcx).to_ty(self.tcx);
if let TypeVariants::TyArray(_, len) = lvalue_ty.sty {
if let Rvalue::Len(ref place) = *rvalue {
let place_ty = place.ty(&self.mir.local_decls, self.tcx).to_ty(self.tcx);
if let TypeVariants::TyArray(_, len) = place_ty.sty {
let span = self.mir.source_info(location).span;
let ty = self.tcx.types.usize;
let literal = Literal::Value { value: len };

View file

@ -75,19 +75,19 @@ impl Lower128Bit {
};
let bin_statement = block.statements.pop().unwrap();
let (source_info, lvalue, lhs, mut rhs) = match bin_statement {
let (source_info, place, lhs, mut rhs) = match bin_statement {
Statement {
source_info,
kind: StatementKind::Assign(
lvalue,
place,
Rvalue::BinaryOp(_, lhs, rhs))
} => (source_info, lvalue, lhs, rhs),
} => (source_info, place, lhs, rhs),
Statement {
source_info,
kind: StatementKind::Assign(
lvalue,
place,
Rvalue::CheckedBinaryOp(_, lhs, rhs))
} => (source_info, lvalue, lhs, rhs),
} => (source_info, place, lhs, rhs),
_ => bug!("Statement doesn't match pattern any more?"),
};
@ -109,7 +109,7 @@ impl Lower128Bit {
}
let call_did = check_lang_item_type(
lang_item, &lvalue, &lhs, &rhs, local_decls, tcx);
lang_item, &place, &lhs, &rhs, local_decls, tcx);
let bb = BasicBlock::new(cur_len + new_blocks.len());
new_blocks.push(after_call);
@ -121,7 +121,7 @@ impl Lower128Bit {
func: Operand::function_handle(tcx, call_did,
Slice::empty(), source_info.span),
args: vec![lhs, rhs],
destination: Some((lvalue, bb)),
destination: Some((place, bb)),
cleanup: None,
},
});
@ -134,7 +134,7 @@ impl Lower128Bit {
fn check_lang_item_type<'a, 'tcx, D>(
lang_item: LangItem,
lvalue: &Place<'tcx>,
place: &Place<'tcx>,
lhs: &Operand<'tcx>,
rhs: &Operand<'tcx>,
local_decls: &D,
@ -147,8 +147,8 @@ fn check_lang_item_type<'a, 'tcx, D>(
let sig = tcx.no_late_bound_regions(&poly_sig).unwrap();
let lhs_ty = lhs.ty(local_decls, tcx);
let rhs_ty = rhs.ty(local_decls, tcx);
let lvalue_ty = lvalue.ty(local_decls, tcx).to_ty(tcx);
let expected = [lhs_ty, rhs_ty, lvalue_ty];
let place_ty = place.ty(local_decls, tcx).to_ty(tcx);
let expected = [lhs_ty, rhs_ty, place_ty];
assert_eq!(sig.inputs_and_output[..], expected,
"lang item {}", tcx.def_symbol_name(did));
did

View file

@ -189,9 +189,9 @@ impl<'cx, 'gcx, 'tcx> ConstraintGeneration<'cx, 'gcx, 'tcx> {
&mut self,
location: Location,
borrow_region: ty::Region<'tcx>,
borrowed_lv: &Place<'tcx>,
borrowed_place: &Place<'tcx>,
) {
if let Projection(ref proj) = *borrowed_lv {
if let Projection(ref proj) = *borrowed_place {
let PlaceProjection { ref base, ref elem } = **proj;
if let ProjectionElem::Deref = *elem {
@ -232,8 +232,8 @@ impl<'cx, 'gcx, 'tcx> Visitor<'tcx> for ConstraintGeneration<'cx, 'gcx, 'tcx> {
// where L is the path that is borrowed. In that case, we have
// to add the reborrow constraints (which don't fall out
// naturally from the type-checker).
if let Rvalue::Ref(region, _bk, ref borrowed_lv) = *rvalue {
self.add_reborrow_constraint(location, region, borrowed_lv);
if let Rvalue::Ref(region, _bk, ref borrowed_place) = *rvalue {
self.add_reborrow_constraint(location, region, borrowed_place);
}
self.super_rvalue(rvalue, location);

View file

@ -316,7 +316,7 @@ impl<'a, 'tcx> Promoter<'a, 'tcx> {
statement_index: usize::MAX
});
self.assign(RETURN_POINTER, rvalue, span);
self.assign(RETURN_PLACE, rvalue, span);
self.source.promoted.push(self.promoted);
}
}
@ -373,8 +373,8 @@ pub fn promote_candidates<'a, 'tcx>(mir: &mut Mir<'tcx>,
}
};
// Declare return pointer local
let initial_locals = iter::once(LocalDecl::new_return_pointer(ty, span))
// Declare return place local
let initial_locals = iter::once(LocalDecl::new_return_place(ty, span))
.collect();
let mut promoter = Promoter {

View file

@ -51,7 +51,7 @@ bitflags! {
// Function argument.
const FN_ARGUMENT = 1 << 2;
// Static lvalue or move from a static.
// Static place or move from a static.
const STATIC = 1 << 3;
// Reference to a static.
@ -261,7 +261,7 @@ impl<'a, 'tcx> Qualifier<'a, 'tcx, 'tcx> {
store(&mut self.temp_qualif[index])
}
Place::Local(index) if self.mir.local_kind(index) == LocalKind::ReturnPointer => {
debug!("store to return pointer {:?}", index);
debug!("store to return place {:?}", index);
store(&mut self.return_qualif)
}
@ -280,7 +280,7 @@ impl<'a, 'tcx> Qualifier<'a, 'tcx, 'tcx> {
// This must be an explicit assignment.
_ => {
// Catch more errors in the destination.
self.visit_lvalue(dest, PlaceContext::Store, location);
self.visit_place(dest, PlaceContext::Store, location);
self.statement_like();
}
}
@ -438,11 +438,11 @@ impl<'a, 'tcx> Visitor<'tcx> for Qualifier<'a, 'tcx, 'tcx> {
}
}
fn visit_lvalue(&mut self,
lvalue: &Place<'tcx>,
fn visit_place(&mut self,
place: &Place<'tcx>,
context: PlaceContext<'tcx>,
location: Location) {
match *lvalue {
match *place {
Place::Local(ref local) => self.visit_local(local, context, location),
Place::Static(ref global) => {
self.add(Qualif::STATIC);
@ -467,7 +467,7 @@ impl<'a, 'tcx> Visitor<'tcx> for Qualifier<'a, 'tcx, 'tcx> {
}
Place::Projection(ref proj) => {
self.nest(|this| {
this.super_lvalue(lvalue, context, location);
this.super_place(place, context, location);
match proj.elem {
ProjectionElem::Deref => {
if !this.try_consume() {
@ -502,7 +502,7 @@ impl<'a, 'tcx> Visitor<'tcx> for Qualifier<'a, 'tcx, 'tcx> {
"cannot refer to the interior of another \
static, use a constant instead");
}
let ty = lvalue.ty(this.mir, this.tcx).to_ty(this.tcx);
let ty = place.ty(this.mir, this.tcx).to_ty(this.tcx);
this.qualif.restrict(ty, this.tcx, this.param_env);
}
@ -519,15 +519,15 @@ impl<'a, 'tcx> Visitor<'tcx> for Qualifier<'a, 'tcx, 'tcx> {
fn visit_operand(&mut self, operand: &Operand<'tcx>, location: Location) {
match *operand {
Operand::Copy(ref lvalue) |
Operand::Move(ref lvalue) => {
Operand::Copy(ref place) |
Operand::Move(ref place) => {
self.nest(|this| {
this.super_operand(operand, location);
this.try_consume();
});
// Mark the consumed locals to indicate later drops are noops.
if let Place::Local(local) = *lvalue {
if let Place::Local(local) = *place {
self.local_needs_drop[local] = None;
}
}
@ -555,7 +555,7 @@ impl<'a, 'tcx> Visitor<'tcx> for Qualifier<'a, 'tcx, 'tcx> {
}
fn visit_rvalue(&mut self, rvalue: &Rvalue<'tcx>, location: Location) {
// Recurse through operands and lvalues.
// Recurse through operands and places.
self.super_rvalue(rvalue, location);
match *rvalue {
@ -572,20 +572,20 @@ impl<'a, 'tcx> Visitor<'tcx> for Qualifier<'a, 'tcx, 'tcx> {
Rvalue::Discriminant(..) => {}
Rvalue::Len(_) => {
// Static lvalues in consts would have errored already,
// Static places in consts would have errored already,
// don't treat length checks as reads from statics.
self.qualif = self.qualif - Qualif::STATIC;
}
Rvalue::Ref(_, kind, ref lvalue) => {
// Static lvalues in consts would have errored already,
Rvalue::Ref(_, kind, ref place) => {
// Static places in consts would have errored already,
// only keep track of references to them here.
if self.qualif.intersects(Qualif::STATIC) {
self.qualif = self.qualif - Qualif::STATIC;
self.add(Qualif::STATIC_REF);
}
let ty = lvalue.ty(self.mir, self.tcx).to_ty(self.tcx);
let ty = place.ty(self.mir, self.tcx).to_ty(self.tcx);
if kind == BorrowKind::Mut {
// In theory, any zero-sized value could be borrowed
// mutably without consequences. However, only &mut []
@ -635,7 +635,7 @@ impl<'a, 'tcx> Visitor<'tcx> for Qualifier<'a, 'tcx, 'tcx> {
let candidate = Candidate::Ref(location);
if !self.qualif.intersects(Qualif::NEVER_PROMOTE) {
// We can only promote direct borrows of temps.
if let Place::Local(local) = *lvalue {
if let Place::Local(local) = *place {
if self.mir.local_kind(local) == LocalKind::Temp {
self.promotion_candidates.push(candidate);
}
@ -829,14 +829,14 @@ impl<'a, 'tcx> Visitor<'tcx> for Qualifier<'a, 'tcx, 'tcx> {
}
self.assign(dest, location);
}
} else if let TerminatorKind::Drop { location: ref lvalue, .. } = *kind {
} else if let TerminatorKind::Drop { location: ref place, .. } = *kind {
self.super_terminator_kind(bb, kind, location);
// Deny *any* live drops anywhere other than functions.
if self.mode != Mode::Fn {
// HACK(eddyb) Emulate a bit of dataflow analysis,
// conservatively, that drop elaboration will do.
let needs_drop = if let Place::Local(local) = *lvalue {
let needs_drop = if let Place::Local(local) = *place {
self.local_needs_drop[local]
} else {
None
@ -844,7 +844,7 @@ impl<'a, 'tcx> Visitor<'tcx> for Qualifier<'a, 'tcx, 'tcx> {
if let Some(span) = needs_drop {
// Double-check the type being dropped, to minimize false positives.
let ty = lvalue.ty(self.mir, self.tcx).to_ty(self.tcx);
let ty = place.ty(self.mir, self.tcx).to_ty(self.tcx);
if ty.needs_drop(self.tcx, self.param_env) {
struct_span_err!(self.tcx.sess, span, E0493,
"destructors cannot be evaluated at compile-time")
@ -905,8 +905,8 @@ impl<'a, 'tcx> Visitor<'tcx> for Qualifier<'a, 'tcx, 'tcx> {
self.nest(|this| {
this.visit_source_info(&statement.source_info);
match statement.kind {
StatementKind::Assign(ref lvalue, ref rvalue) => {
this.visit_assign(bb, lvalue, rvalue, location);
StatementKind::Assign(ref place, ref rvalue) => {
this.visit_assign(bb, place, rvalue, location);
}
StatementKind::SetDiscriminant { .. } |
StatementKind::StorageLive(_) |

View file

@ -123,13 +123,13 @@ fn each_block<'a, 'tcx, O>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
None => return,
};
assert!(args.len() == 1);
let peek_arg_lval = match args[0] {
mir::Operand::Copy(ref lval @ mir::Place::Local(_)) |
mir::Operand::Move(ref lval @ mir::Place::Local(_)) => Some(lval),
let peek_arg_place = match args[0] {
mir::Operand::Copy(ref place @ mir::Place::Local(_)) |
mir::Operand::Move(ref place @ mir::Place::Local(_)) => Some(place),
_ => None,
};
let peek_arg_lval = match peek_arg_lval {
let peek_arg_place = match peek_arg_place {
Some(arg) => arg,
None => {
tcx.sess.diagnostic().span_err(
@ -143,8 +143,8 @@ fn each_block<'a, 'tcx, O>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
let mut kill = results.0.sets.kill_set_for(bb.index()).to_owned();
// Emulate effect of all statements in the block up to (but not
// including) the borrow within `peek_arg_lval`. Do *not* include
// call to `peek_arg_lval` itself (since we are peeking the state
// including) the borrow within `peek_arg_place`. Do *not* include
// call to `peek_arg_place` itself (since we are peeking the state
// of the argument at time immediate preceding Call to
// `rustc_peek`).
@ -154,9 +154,9 @@ fn each_block<'a, 'tcx, O>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
for (j, stmt) in statements.iter().enumerate() {
debug!("rustc_peek: ({:?},{}) {:?}", bb, j, stmt);
let (lvalue, rvalue) = match stmt.kind {
mir::StatementKind::Assign(ref lvalue, ref rvalue) => {
(lvalue, rvalue)
let (place, rvalue) = match stmt.kind {
mir::StatementKind::Assign(ref place, ref rvalue) => {
(place, rvalue)
}
mir::StatementKind::StorageLive(_) |
mir::StatementKind::StorageDead(_) |
@ -169,14 +169,14 @@ fn each_block<'a, 'tcx, O>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
"sanity_check should run before Deaggregator inserts SetDiscriminant"),
};
if lvalue == peek_arg_lval {
if let mir::Rvalue::Ref(_, mir::BorrowKind::Shared, ref peeking_at_lval) = *rvalue {
if place == peek_arg_place {
if let mir::Rvalue::Ref(_, mir::BorrowKind::Shared, ref peeking_at_place) = *rvalue {
// Okay, our search is over.
match move_data.rev_lookup.find(peeking_at_lval) {
match move_data.rev_lookup.find(peeking_at_place) {
LookupResult::Exact(peek_mpi) => {
let bit_state = sets.on_entry.contains(&peek_mpi);
debug!("rustc_peek({:?} = &{:?}) bit_state: {}",
lvalue, peeking_at_lval, bit_state);
place, peeking_at_place, bit_state);
if !bit_state {
tcx.sess.span_err(span, "rustc_peek: bit not set");
}
@ -196,10 +196,10 @@ fn each_block<'a, 'tcx, O>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
}
}
let lhs_mpi = move_data.rev_lookup.find(lvalue);
let lhs_mpi = move_data.rev_lookup.find(place);
debug!("rustc_peek: computing effect on lvalue: {:?} ({:?}) in stmt: {:?}",
lvalue, lhs_mpi, stmt);
debug!("rustc_peek: computing effect on place: {:?} ({:?}) in stmt: {:?}",
place, lhs_mpi, stmt);
// reset GEN and KILL sets before emulating their effect.
for e in sets.gen_set.words_mut() { *e = 0; }
for e in sets.kill_set.words_mut() { *e = 0; }

View file

@ -104,13 +104,13 @@ impl<'a, 'b, 'gcx, 'tcx> Visitor<'tcx> for TypeVerifier<'a, 'b, 'gcx, 'tcx> {
}
}
fn visit_lvalue(
fn visit_place(
&mut self,
lvalue: &Place<'tcx>,
place: &Place<'tcx>,
context: PlaceContext,
location: Location,
) {
self.sanitize_lvalue(lvalue, location, context);
self.sanitize_place(place, location, context);
}
fn visit_constant(&mut self, constant: &Constant<'tcx>, location: Location) {
@ -164,18 +164,18 @@ impl<'a, 'b, 'gcx, 'tcx> TypeVerifier<'a, 'b, 'gcx, 'tcx> {
}
}
fn sanitize_lvalue(&mut self,
lvalue: &Place<'tcx>,
fn sanitize_place(&mut self,
place: &Place<'tcx>,
location: Location,
context: PlaceContext)
-> PlaceTy<'tcx> {
debug!("sanitize_lvalue: {:?}", lvalue);
let lvalue_ty = match *lvalue {
debug!("sanitize_place: {:?}", place);
let place_ty = match *place {
Place::Local(index) => PlaceTy::Ty {
ty: self.mir.local_decls[index].ty,
},
Place::Static(box Static { def_id, ty: sty }) => {
let sty = self.sanitize_type(lvalue, sty);
let sty = self.sanitize_type(place, sty);
let ty = self.tcx().type_of(def_id);
let ty = self.cx.normalize(&ty, location);
if let Err(terr) = self.cx
@ -183,7 +183,7 @@ impl<'a, 'b, 'gcx, 'tcx> TypeVerifier<'a, 'b, 'gcx, 'tcx> {
{
span_mirbug!(
self,
lvalue,
place,
"bad static type ({:?}: {:?}): {:?}",
ty,
sty,
@ -198,7 +198,7 @@ impl<'a, 'b, 'gcx, 'tcx> TypeVerifier<'a, 'b, 'gcx, 'tcx> {
} else {
PlaceContext::Projection(Mutability::Not)
};
let base_ty = self.sanitize_lvalue(&proj.base, location, base_context);
let base_ty = self.sanitize_place(&proj.base, location, base_context);
if let PlaceTy::Ty { ty } = base_ty {
if ty.references_error() {
assert!(self.errors_reported);
@ -207,27 +207,27 @@ impl<'a, 'b, 'gcx, 'tcx> TypeVerifier<'a, 'b, 'gcx, 'tcx> {
};
}
}
self.sanitize_projection(base_ty, &proj.elem, lvalue, location)
self.sanitize_projection(base_ty, &proj.elem, place, location)
}
};
if let PlaceContext::Copy = context {
let ty = lvalue_ty.to_ty(self.tcx());
let ty = place_ty.to_ty(self.tcx());
if self.cx.infcx.type_moves_by_default(self.cx.param_env, ty, DUMMY_SP) {
span_mirbug!(self, lvalue,
span_mirbug!(self, place,
"attempted copy of non-Copy type ({:?})", ty);
}
}
lvalue_ty
place_ty
}
fn sanitize_projection(
&mut self,
base: PlaceTy<'tcx>,
pi: &PlaceElem<'tcx>,
lvalue: &Place<'tcx>,
place: &Place<'tcx>,
location: Location,
) -> PlaceTy<'tcx> {
debug!("sanitize_projection: {:?} {:?} {:?}", base, pi, lvalue);
debug!("sanitize_projection: {:?} {:?} {:?}", base, pi, place);
let tcx = self.tcx();
let base_ty = base.to_ty(tcx);
let span = self.last_span;
@ -236,7 +236,7 @@ impl<'a, 'b, 'gcx, 'tcx> TypeVerifier<'a, 'b, 'gcx, 'tcx> {
let deref_ty = base_ty.builtin_deref(true, ty::LvaluePreference::NoPreference);
PlaceTy::Ty {
ty: deref_ty.map(|t| t.ty).unwrap_or_else(|| {
span_mirbug_and_err!(self, lvalue, "deref of non-pointer {:?}", base_ty)
span_mirbug_and_err!(self, place, "deref of non-pointer {:?}", base_ty)
}),
}
}
@ -249,7 +249,7 @@ impl<'a, 'b, 'gcx, 'tcx> TypeVerifier<'a, 'b, 'gcx, 'tcx> {
} else {
PlaceTy::Ty {
ty: base_ty.builtin_index().unwrap_or_else(|| {
span_mirbug_and_err!(self, lvalue, "index of non-array {:?}", base_ty)
span_mirbug_and_err!(self, place, "index of non-array {:?}", base_ty)
}),
}
}
@ -258,7 +258,7 @@ impl<'a, 'b, 'gcx, 'tcx> TypeVerifier<'a, 'b, 'gcx, 'tcx> {
// consider verifying in-bounds
PlaceTy::Ty {
ty: base_ty.builtin_index().unwrap_or_else(|| {
span_mirbug_and_err!(self, lvalue, "index of non-array {:?}", base_ty)
span_mirbug_and_err!(self, place, "index of non-array {:?}", base_ty)
}),
}
}
@ -272,14 +272,14 @@ impl<'a, 'b, 'gcx, 'tcx> TypeVerifier<'a, 'b, 'gcx, 'tcx> {
} else {
span_mirbug_and_err!(
self,
lvalue,
place,
"taking too-small slice of {:?}",
base_ty
)
}
}
ty::TySlice(..) => base_ty,
_ => span_mirbug_and_err!(self, lvalue, "slice of non-array {:?}", base_ty),
_ => span_mirbug_and_err!(self, place, "slice of non-array {:?}", base_ty),
},
},
ProjectionElem::Downcast(adt_def1, index) => match base_ty.sty {
@ -288,7 +288,7 @@ impl<'a, 'b, 'gcx, 'tcx> TypeVerifier<'a, 'b, 'gcx, 'tcx> {
PlaceTy::Ty {
ty: span_mirbug_and_err!(
self,
lvalue,
place,
"cast to variant #{:?} but enum only has {:?}",
index,
adt_def.variants.len()
@ -305,7 +305,7 @@ impl<'a, 'b, 'gcx, 'tcx> TypeVerifier<'a, 'b, 'gcx, 'tcx> {
_ => PlaceTy::Ty {
ty: span_mirbug_and_err!(
self,
lvalue,
place,
"can't downcast {:?} as {:?}",
base_ty,
adt_def1
@ -313,13 +313,13 @@ impl<'a, 'b, 'gcx, 'tcx> TypeVerifier<'a, 'b, 'gcx, 'tcx> {
},
},
ProjectionElem::Field(field, fty) => {
let fty = self.sanitize_type(lvalue, fty);
match self.field_ty(lvalue, base, field, location) {
let fty = self.sanitize_type(place, fty);
match self.field_ty(place, base, field, location) {
Ok(ty) => {
if let Err(terr) = self.cx.eq_types(span, ty, fty, location.at_self()) {
span_mirbug!(
self,
lvalue,
place,
"bad field access ({:?}: {:?}): {:?}",
ty,
fty,
@ -329,7 +329,7 @@ impl<'a, 'b, 'gcx, 'tcx> TypeVerifier<'a, 'b, 'gcx, 'tcx> {
}
Err(FieldAccessError::OutOfRange { field_count }) => span_mirbug!(
self,
lvalue,
place,
"accessed field #{} but variant only has {}",
field.index(),
field_count
@ -551,17 +551,17 @@ impl<'a, 'gcx, 'tcx> TypeChecker<'a, 'gcx, 'tcx> {
debug!("check_stmt: {:?}", stmt);
let tcx = self.tcx();
match stmt.kind {
StatementKind::Assign(ref lv, ref rv) => {
let lv_ty = lv.ty(mir, tcx).to_ty(tcx);
StatementKind::Assign(ref place, ref rv) => {
let place_ty = place.ty(mir, tcx).to_ty(tcx);
let rv_ty = rv.ty(mir, tcx);
if let Err(terr) =
self.sub_types(rv_ty, lv_ty, location.at_successor_within_block())
self.sub_types(rv_ty, place_ty, location.at_successor_within_block())
{
span_mirbug!(
self,
stmt,
"bad assignment ({:?} = {:?}): {:?}",
lv_ty,
place_ty,
rv_ty,
terr
);
@ -569,17 +569,17 @@ impl<'a, 'gcx, 'tcx> TypeChecker<'a, 'gcx, 'tcx> {
self.check_rvalue(mir, rv, location);
}
StatementKind::SetDiscriminant {
ref lvalue,
ref place,
variant_index,
} => {
let lvalue_type = lvalue.ty(mir, tcx).to_ty(tcx);
let adt = match lvalue_type.sty {
let place_type = place.ty(mir, tcx).to_ty(tcx);
let adt = match place_type.sty {
TypeVariants::TyAdt(adt, _) if adt.is_enum() => adt,
_ => {
span_bug!(
stmt.source_info.span,
"bad set discriminant ({:?} = {:?}): lhs is not an enum",
lvalue,
place,
variant_index
);
}
@ -588,7 +588,7 @@ impl<'a, 'gcx, 'tcx> TypeChecker<'a, 'gcx, 'tcx> {
span_bug!(
stmt.source_info.span,
"bad set discriminant ({:?} = {:?}): value of of range",
lvalue,
place,
variant_index
);
};
@ -627,19 +627,19 @@ impl<'a, 'gcx, 'tcx> TypeChecker<'a, 'gcx, 'tcx> {
target,
unwind,
} => {
let lv_ty = location.ty(mir, tcx).to_ty(tcx);
let place_ty = location.ty(mir, tcx).to_ty(tcx);
let rv_ty = value.ty(mir, tcx);
let locations = Locations {
from_location: term_location,
at_location: target.start_location(),
};
if let Err(terr) = self.sub_types(rv_ty, lv_ty, locations) {
if let Err(terr) = self.sub_types(rv_ty, place_ty, locations) {
span_mirbug!(
self,
term,
"bad DropAndReplace ({:?} = {:?}): {:?}",
lv_ty,
place_ty,
rv_ty,
terr
);
@ -653,12 +653,12 @@ impl<'a, 'gcx, 'tcx> TypeChecker<'a, 'gcx, 'tcx> {
from_location: term_location,
at_location: unwind.start_location(),
};
if let Err(terr) = self.sub_types(rv_ty, lv_ty, locations) {
if let Err(terr) = self.sub_types(rv_ty, place_ty, locations) {
span_mirbug!(
self,
term,
"bad DropAndReplace ({:?} = {:?}): {:?}",
lv_ty,
place_ty,
rv_ty,
terr
);
@ -1209,8 +1209,8 @@ trait AtLocation {
/// its successor within the block is the at-location. This means
/// that any required region relationships must hold only upon
/// **exiting** the statement/terminator indicated by `self`. This
/// is for example used when you have a `lv = rv` statement: it
/// indicates that the `typeof(rv) <: typeof(lv)` as of the
/// is for example used when you have a `place = rv` statement: it
/// indicates that the `typeof(rv) <: typeof(place)` as of the
/// **next** statement.
fn at_successor_within_block(self) -> Locations;
}

View file

@ -12,32 +12,32 @@
use rustc::ty::{self, TyCtxt};
use rustc::mir::*;
/// Return `true` if this lvalue is allowed to be less aligned
/// Return `true` if this place is allowed to be less aligned
/// than its containing struct (because it is within a packed
/// struct).
pub fn is_disaligned<'a, 'tcx, L>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
local_decls: &L,
param_env: ty::ParamEnv<'tcx>,
lvalue: &Place<'tcx>)
place: &Place<'tcx>)
-> bool
where L: HasLocalDecls<'tcx>
{
debug!("is_disaligned({:?})", lvalue);
if !is_within_packed(tcx, local_decls, lvalue) {
debug!("is_disaligned({:?}) - not within packed", lvalue);
debug!("is_disaligned({:?})", place);
if !is_within_packed(tcx, local_decls, place) {
debug!("is_disaligned({:?}) - not within packed", place);
return false
}
let ty = lvalue.ty(local_decls, tcx).to_ty(tcx);
let ty = place.ty(local_decls, tcx).to_ty(tcx);
match tcx.layout_raw(param_env.and(ty)) {
Ok(layout) if layout.align.abi() == 1 => {
// if the alignment is 1, the type can't be further
// disaligned.
debug!("is_disaligned({:?}) - align = 1", lvalue);
debug!("is_disaligned({:?}) - align = 1", place);
false
}
_ => {
debug!("is_disaligned({:?}) - true", lvalue);
debug!("is_disaligned({:?}) - true", place);
true
}
}
@ -45,14 +45,14 @@ pub fn is_disaligned<'a, 'tcx, L>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
fn is_within_packed<'a, 'tcx, L>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
local_decls: &L,
lvalue: &Place<'tcx>)
place: &Place<'tcx>)
-> bool
where L: HasLocalDecls<'tcx>
{
let mut lvalue = lvalue;
let mut place = place;
while let &Place::Projection(box Projection {
ref base, ref elem
}) = lvalue {
}) = place {
match *elem {
// encountered a Deref, which is ABI-aligned
ProjectionElem::Deref => break,
@ -67,7 +67,7 @@ fn is_within_packed<'a, 'tcx, L>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
}
_ => {}
}
lvalue = base;
place = base;
}
false

View file

@ -64,10 +64,10 @@ impl<'tcx> DefUseAnalysis<'tcx> {
where F: for<'a> FnMut(&'a mut Local,
PlaceContext<'tcx>,
Location) {
for lvalue_use in &self.info[local].defs_and_uses {
for place_use in &self.info[local].defs_and_uses {
MutateUseVisitor::new(local,
&mut callback,
mir).visit_location(mir, lvalue_use.location)
mir).visit_location(mir, place_use.location)
}
}
@ -108,7 +108,7 @@ impl<'tcx> Info<'tcx> {
}
pub fn def_count(&self) -> usize {
self.defs_and_uses.iter().filter(|lvalue_use| lvalue_use.context.is_mutating_use()).count()
self.defs_and_uses.iter().filter(|place_use| place_use.context.is_mutating_use()).count()
}
pub fn def_count_not_including_drop(&self) -> usize {
@ -118,14 +118,14 @@ impl<'tcx> Info<'tcx> {
pub fn defs_not_including_drop(
&self,
) -> iter::Filter<slice::Iter<Use<'tcx>>, fn(&&Use<'tcx>) -> bool> {
self.defs_and_uses.iter().filter(|lvalue_use| {
lvalue_use.context.is_mutating_use() && !lvalue_use.context.is_drop()
self.defs_and_uses.iter().filter(|place_use| {
place_use.context.is_mutating_use() && !place_use.context.is_drop()
})
}
pub fn use_count(&self) -> usize {
self.defs_and_uses.iter().filter(|lvalue_use| {
lvalue_use.context.is_nonmutating_use()
self.defs_and_uses.iter().filter(|place_use| {
place_use.context.is_nonmutating_use()
}).count()
}
}

View file

@ -105,7 +105,7 @@ struct DropCtxt<'l, 'b: 'l, 'tcx: 'b, D>
source_info: SourceInfo,
lvalue: &'l Place<'tcx>,
place: &'l Place<'tcx>,
path: D::Path,
succ: BasicBlock,
unwind: Unwind,
@ -114,7 +114,7 @@ struct DropCtxt<'l, 'b: 'l, 'tcx: 'b, D>
pub fn elaborate_drop<'b, 'tcx, D>(
elaborator: &mut D,
source_info: SourceInfo,
lvalue: &Place<'tcx>,
place: &Place<'tcx>,
path: D::Path,
succ: BasicBlock,
unwind: Unwind,
@ -122,15 +122,15 @@ pub fn elaborate_drop<'b, 'tcx, D>(
where D: DropElaborator<'b, 'tcx>
{
DropCtxt {
elaborator, source_info, lvalue, path, succ, unwind
elaborator, source_info, place, path, succ, unwind
}.elaborate_drop(bb)
}
impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D>
where D: DropElaborator<'b, 'tcx>
{
fn lvalue_ty(&self, lvalue: &Place<'tcx>) -> Ty<'tcx> {
lvalue.ty(self.elaborator.mir(), self.tcx()).to_ty(self.tcx())
fn place_ty(&self, place: &Place<'tcx>) -> Ty<'tcx> {
place.ty(self.elaborator.mir(), self.tcx()).to_ty(self.tcx())
}
fn tcx(&self) -> TyCtxt<'b, 'tcx, 'tcx> {
@ -169,7 +169,7 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D>
let loc = self.terminator_loc(bb);
self.elaborator.clear_drop_flag(loc, self.path, DropFlagMode::Deep);
self.elaborator.patch().patch_terminator(bb, TerminatorKind::Drop {
location: self.lvalue.clone(),
location: self.place.clone(),
target: self.succ,
unwind: self.unwind.into_option(),
});
@ -191,10 +191,10 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D>
}
}
/// Return the lvalue and move path for each field of `variant`,
/// Return the place and move path for each field of `variant`,
/// (the move path is `None` if the field is a rest field).
fn move_paths_for_fields(&self,
base_lv: &Place<'tcx>,
base_place: &Place<'tcx>,
variant_path: D::Path,
variant: &'tcx ty::VariantDef,
substs: &'tcx Substs<'tcx>)
@ -209,32 +209,32 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D>
&f.ty(self.tcx(), substs),
self.elaborator.param_env()
);
(base_lv.clone().field(field, field_ty), subpath)
(base_place.clone().field(field, field_ty), subpath)
}).collect()
}
fn drop_subpath(&mut self,
lvalue: &Place<'tcx>,
place: &Place<'tcx>,
path: Option<D::Path>,
succ: BasicBlock,
unwind: Unwind)
-> BasicBlock
{
if let Some(path) = path {
debug!("drop_subpath: for std field {:?}", lvalue);
debug!("drop_subpath: for std field {:?}", place);
DropCtxt {
elaborator: self.elaborator,
source_info: self.source_info,
path, lvalue, succ, unwind,
path, place, succ, unwind,
}.elaborated_drop_block()
} else {
debug!("drop_subpath: for rest field {:?}", lvalue);
debug!("drop_subpath: for rest field {:?}", place);
DropCtxt {
elaborator: self.elaborator,
source_info: self.source_info,
lvalue, succ, unwind,
place, succ, unwind,
// Using `self.path` here to condition the drop on
// our own drop flag.
path: self.path
@ -256,8 +256,8 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D>
{
Some(succ).into_iter().chain(
fields.iter().rev().zip(unwind_ladder)
.map(|(&(ref lv, path), &unwind_succ)| {
succ = self.drop_subpath(lv, path, succ, unwind_succ);
.map(|(&(ref place, path), &unwind_succ)| {
succ = self.drop_subpath(place, path, succ, unwind_succ);
succ
})
).collect()
@ -302,8 +302,8 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D>
debug!("drop_ladder({:?}, {:?})", self, fields);
let mut fields = fields;
fields.retain(|&(ref lvalue, _)| {
self.lvalue_ty(lvalue).needs_drop(self.tcx(), self.elaborator.param_env())
fields.retain(|&(ref place, _)| {
self.place_ty(place).needs_drop(self.tcx(), self.elaborator.param_env())
});
debug!("drop_ladder - fields needing drop: {:?}", fields);
@ -328,7 +328,7 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D>
debug!("open_drop_for_tuple({:?}, {:?})", self, tys);
let fields = tys.iter().enumerate().map(|(i, &ty)| {
(self.lvalue.clone().field(Field::new(i), ty),
(self.place.clone().field(Field::new(i), ty),
self.elaborator.field_subpath(self.path, Field::new(i)))
}).collect();
@ -340,7 +340,7 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D>
{
debug!("open_drop_for_box({:?}, {:?})", self, ty);
let interior = self.lvalue.clone().deref();
let interior = self.place.clone().deref();
let interior_path = self.elaborator.deref_subpath(self.path);
let succ = self.succ; // FIXME(#6393)
@ -386,7 +386,7 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D>
let (succ, unwind) = self.drop_ladder_bottom();
if !adt.is_enum() {
let fields = self.move_paths_for_fields(
self.lvalue,
self.place,
self.path,
&adt.variants[0],
substs
@ -416,11 +416,11 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D>
let subpath = self.elaborator.downcast_subpath(
self.path, variant_index);
if let Some(variant_path) = subpath {
let base_lv = self.lvalue.clone().elem(
let base_place = self.place.clone().elem(
ProjectionElem::Downcast(adt, variant_index)
);
let fields = self.move_paths_for_fields(
&base_lv,
&base_place,
variant_path,
&adt.variants[variant_index],
substs);
@ -492,7 +492,7 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D>
// way lies only trouble.
let discr_ty = adt.repr.discr_type().to_ty(self.tcx());
let discr = Place::Local(self.new_temp(discr_ty));
let discr_rv = Rvalue::Discriminant(self.lvalue.clone());
let discr_rv = Rvalue::Discriminant(self.place.clone());
let switch_block = BasicBlockData {
statements: vec![self.assign(&discr, discr_rv)],
terminator: Some(Terminator {
@ -517,26 +517,26 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D>
let tcx = self.tcx();
let drop_trait = tcx.lang_items().drop_trait().unwrap();
let drop_fn = tcx.associated_items(drop_trait).next().unwrap();
let ty = self.lvalue_ty(self.lvalue);
let ty = self.place_ty(self.place);
let substs = tcx.mk_substs(iter::once(Kind::from(ty)));
let ref_ty = tcx.mk_ref(tcx.types.re_erased, ty::TypeAndMut {
ty,
mutbl: hir::Mutability::MutMutable
});
let ref_lvalue = self.new_temp(ref_ty);
let ref_place = self.new_temp(ref_ty);
let unit_temp = Place::Local(self.new_temp(tcx.mk_nil()));
let result = BasicBlockData {
statements: vec![self.assign(
&Place::Local(ref_lvalue),
Rvalue::Ref(tcx.types.re_erased, BorrowKind::Mut, self.lvalue.clone())
&Place::Local(ref_place),
Rvalue::Ref(tcx.types.re_erased, BorrowKind::Mut, self.place.clone())
)],
terminator: Some(Terminator {
kind: TerminatorKind::Call {
func: Operand::function_handle(tcx, drop_fn.def_id, substs,
self.source_info.span),
args: vec![Operand::Move(Place::Local(ref_lvalue))],
args: vec![Operand::Move(Place::Local(ref_place))],
destination: Some((unit_temp, succ)),
cleanup: unwind.into_option(),
},
@ -572,8 +572,8 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D>
ptr_based: bool)
-> BasicBlock
{
let copy = |lv: &Place<'tcx>| Operand::Copy(lv.clone());
let move_ = |lv: &Place<'tcx>| Operand::Move(lv.clone());
let copy = |place: &Place<'tcx>| Operand::Copy(place.clone());
let move_ = |place: &Place<'tcx>| Operand::Move(place.clone());
let tcx = self.tcx();
let ref_ty = tcx.mk_ref(tcx.types.re_erased, ty::TypeAndMut {
@ -591,7 +591,7 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D>
(Rvalue::Ref(
tcx.types.re_erased,
BorrowKind::Mut,
self.lvalue.clone().index(cur)),
self.place.clone().index(cur)),
Rvalue::BinaryOp(BinOp::Add, copy(&Place::Local(cur)), one))
};
@ -643,7 +643,7 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D>
let tcx = self.tcx();
let move_ = |lv: &Place<'tcx>| Operand::Move(lv.clone());
let move_ = |place: &Place<'tcx>| Operand::Move(place.clone());
let size = &Place::Local(self.new_temp(tcx.types.usize));
let size_is_zero = &Place::Local(self.new_temp(tcx.types.bool));
let base_block = BasicBlockData {
@ -667,7 +667,7 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D>
self.elaborator.patch().new_block(base_block)
}
// create a pair of drop-loops of `lvalue`, which drops its contents
// create a pair of drop-loops of `place`, which drops its contents
// even in the case of 1 panic. If `ptr_based`, create a pointer loop,
// otherwise create an index loop.
fn drop_loop_pair(&mut self, ety: Ty<'tcx>, ptr_based: bool) -> BasicBlock {
@ -708,15 +708,15 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D>
let cur = Place::Local(cur);
let zero = self.constant_usize(0);
let mut drop_block_stmts = vec![];
drop_block_stmts.push(self.assign(&length, Rvalue::Len(self.lvalue.clone())));
drop_block_stmts.push(self.assign(&length, Rvalue::Len(self.place.clone())));
if ptr_based {
let tmp_ty = tcx.mk_mut_ptr(self.lvalue_ty(self.lvalue));
let tmp_ty = tcx.mk_mut_ptr(self.place_ty(self.place));
let tmp = Place::Local(self.new_temp(tmp_ty));
// tmp = &LV;
// cur = tmp as *mut T;
// end = Offset(cur, len);
drop_block_stmts.push(self.assign(&tmp, Rvalue::Ref(
tcx.types.re_erased, BorrowKind::Mut, self.lvalue.clone()
tcx.types.re_erased, BorrowKind::Mut, self.place.clone()
)));
drop_block_stmts.push(self.assign(&cur, Rvalue::Cast(
CastKind::Misc, Operand::Move(tmp.clone()), iter_ty
@ -752,7 +752,7 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D>
/// This creates a "drop ladder" that drops the needed fields of the
/// ADT, both in the success case or if one of the destructors fail.
fn open_drop<'a>(&mut self) -> BasicBlock {
let ty = self.lvalue_ty(self.lvalue);
let ty = self.place_ty(self.place);
match ty.sty {
ty::TyClosure(def_id, substs) |
// Note that `elaborate_drops` only drops the upvars of a generator,
@ -786,13 +786,13 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D>
}
}
/// Return a basic block that drop an lvalue using the context
/// Return a basic block that drop a place using the context
/// and path in `c`. If `mode` is something, also clear `c`
/// according to it.
///
/// if FLAG(self.path)
/// if let Some(mode) = mode: FLAG(self.path)[mode] = false
/// drop(self.lv)
/// drop(self.place)
fn complete_drop<'a>(&mut self,
drop_mode: Option<DropFlagMode>,
succ: BasicBlock,
@ -855,7 +855,7 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D>
let call = TerminatorKind::Call {
func: Operand::function_handle(tcx, free_func, substs, self.source_info.span),
args: vec![Operand::Move(self.lvalue.clone())],
args: vec![Operand::Move(self.place.clone())],
destination: Some((unit_temp, target)),
cleanup: None
}; // FIXME(#6393)
@ -868,7 +868,7 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D>
fn drop_block<'a>(&mut self, target: BasicBlock, unwind: Unwind) -> BasicBlock {
let block = TerminatorKind::Drop {
location: self.lvalue.clone(),
location: self.place.clone(),
target,
unwind: unwind.into_option()
};

View file

@ -127,8 +127,8 @@ impl<'tcx> MirPatch<'tcx> {
self.new_statements.push((loc, stmt));
}
pub fn add_assign(&mut self, loc: Location, lv: Place<'tcx>, rv: Rvalue<'tcx>) {
self.add_statement(loc, StatementKind::Assign(lv, rv));
pub fn add_assign(&mut self, loc: Location, place: Place<'tcx>, rv: Rvalue<'tcx>) {
self.add_statement(loc, StatementKind::Assign(place, rv));
}
pub fn apply(self, mir: &mut Mir<'tcx>) {

View file

@ -344,12 +344,12 @@ pub fn write_mir_intro<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>,
}
}
// Print return pointer
// Print return place
let indented_retptr = format!("{}let mut {:?}: {};",
INDENT,
RETURN_POINTER,
mir.local_decls[RETURN_POINTER].ty);
writeln!(w, "{0:1$} // return pointer",
RETURN_PLACE,
mir.local_decls[RETURN_PLACE].ty);
writeln!(w, "{0:1$} // return place",
indented_retptr,
ALIGN)?;

View file

@ -188,41 +188,41 @@ impl<'a, 'tcx> mir_visit::Visitor<'tcx> for StatCollector<'a, 'tcx> {
self.super_operand(operand, location);
}
fn visit_lvalue(&mut self,
lvalue: &Place<'tcx>,
fn visit_place(&mut self,
place: &Place<'tcx>,
context: mir_visit::PlaceContext<'tcx>,
location: Location) {
self.record("Place", lvalue);
self.record(match *lvalue {
self.record("Place", place);
self.record(match *place {
Place::Local(..) => "Place::Local",
Place::Static(..) => "Place::Static",
Place::Projection(..) => "Place::Projection",
}, lvalue);
self.super_lvalue(lvalue, context, location);
}, place);
self.super_place(place, context, location);
}
fn visit_projection(&mut self,
lvalue: &PlaceProjection<'tcx>,
place: &PlaceProjection<'tcx>,
context: mir_visit::PlaceContext<'tcx>,
location: Location) {
self.record("PlaceProjection", lvalue);
self.super_projection(lvalue, context, location);
self.record("PlaceProjection", place);
self.super_projection(place, context, location);
}
fn visit_projection_elem(&mut self,
lvalue: &PlaceElem<'tcx>,
place: &PlaceElem<'tcx>,
context: mir_visit::PlaceContext<'tcx>,
location: Location) {
self.record("PlaceElem", lvalue);
self.record(match *lvalue {
self.record("PlaceElem", place);
self.record(match *place {
ProjectionElem::Deref => "PlaceElem::Deref",
ProjectionElem::Subslice { .. } => "PlaceElem::Subslice",
ProjectionElem::Field(..) => "PlaceElem::Field",
ProjectionElem::Index(..) => "PlaceElem::Index",
ProjectionElem::ConstantIndex { .. } => "PlaceElem::ConstantIndex",
ProjectionElem::Downcast(..) => "PlaceElem::Downcast",
}, lvalue);
self.super_projection_elem(lvalue, context, location);
}, place);
self.super_projection_elem(place, context, location);
}
fn visit_constant(&mut self,

View file

@ -30,7 +30,7 @@ use cabi_sparc64;
use cabi_nvptx;
use cabi_nvptx64;
use cabi_hexagon;
use mir::lvalue::{Alignment, PlaceRef};
use mir::place::{Alignment, PlaceRef};
use mir::operand::OperandValue;
use type_::Type;
use type_of::{LayoutLlvmExt, PointerKind};
@ -545,14 +545,14 @@ impl<'a, 'tcx> ArgType<'tcx> {
self.mode == PassMode::Ignore
}
/// Get the LLVM type for an lvalue of the original Rust type of
/// Get the LLVM type for an place of the original Rust type of
/// this argument/return, i.e. the result of `type_of::type_of`.
pub fn memory_ty(&self, ccx: &CrateContext<'a, 'tcx>) -> Type {
self.layout.llvm_type(ccx)
}
/// Store a direct/indirect value described by this ArgType into a
/// lvalue for the original Rust type of this argument/return.
/// place for the original Rust type of this argument/return.
/// Can be used for both storing formal arguments into Rust variables
/// or results of call/invoke instructions into their destinations.
pub fn store(&self, bcx: &Builder<'a, 'tcx>, val: ValueRef, dst: PlaceRef<'tcx>) {

View file

@ -18,7 +18,7 @@ use builder::Builder;
use rustc::hir;
use mir::lvalue::PlaceRef;
use mir::place::PlaceRef;
use mir::operand::OperandValue;
use std::ffi::CString;
@ -37,15 +37,15 @@ pub fn trans_inline_asm<'a, 'tcx>(
// Prepare the output operands
let mut indirect_outputs = vec![];
for (i, (out, lvalue)) in ia.outputs.iter().zip(&outputs).enumerate() {
for (i, (out, place)) in ia.outputs.iter().zip(&outputs).enumerate() {
if out.is_rw {
inputs.push(lvalue.load(bcx).immediate());
inputs.push(place.load(bcx).immediate());
ext_constraints.push(i.to_string());
}
if out.is_indirect {
indirect_outputs.push(lvalue.load(bcx).immediate());
indirect_outputs.push(place.load(bcx).immediate());
} else {
output_types.push(lvalue.layout.llvm_type(bcx.ccx));
output_types.push(place.layout.llvm_type(bcx.ccx));
}
}
if !indirect_outputs.is_empty() {
@ -100,9 +100,9 @@ pub fn trans_inline_asm<'a, 'tcx>(
// Again, based on how many outputs we have
let outputs = ia.outputs.iter().zip(&outputs).filter(|&(ref o, _)| !o.is_indirect);
for (i, (_, &lvalue)) in outputs.enumerate() {
for (i, (_, &place)) in outputs.enumerate() {
let v = if num_outputs == 1 { r } else { bcx.extract_value(r, i as u64) };
OperandValue::Immediate(v).store(bcx, lvalue);
OperandValue::Immediate(v).store(bcx, place);
}
// Store mark in a metadata node so we can map LLVM errors

View file

@ -50,7 +50,7 @@ use rustc::session::config::{self, NoDebugInfo};
use rustc::session::Session;
use rustc_incremental;
use allocator;
use mir::lvalue::PlaceRef;
use mir::place::PlaceRef;
use attributes;
use builder::Builder;
use callee;

View file

@ -14,7 +14,7 @@ use intrinsics::{self, Intrinsic};
use llvm;
use llvm::{ValueRef};
use abi::{Abi, FnType, PassMode};
use mir::lvalue::{PlaceRef, Alignment};
use mir::place::{PlaceRef, Alignment};
use mir::operand::{OperandRef, OperandValue};
use base::*;
use common::*;

View file

@ -22,7 +22,7 @@ use rustc::ty::layout::LayoutOf;
use type_of::LayoutLlvmExt;
use super::MirContext;
pub fn lvalue_locals<'a, 'tcx>(mircx: &MirContext<'a, 'tcx>) -> BitVector {
pub fn memory_locals<'a, 'tcx>(mircx: &MirContext<'a, 'tcx>) -> BitVector {
let mir = mircx.mir;
let mut analyzer = LocalAnalyzer::new(mircx);
@ -44,16 +44,16 @@ pub fn lvalue_locals<'a, 'tcx>(mircx: &MirContext<'a, 'tcx>) -> BitVector {
// (e.g. structs) into an alloca unconditionally, just so
// that we don't have to deal with having two pathways
// (gep vs extractvalue etc).
analyzer.mark_as_lvalue(mir::Local::new(index));
analyzer.mark_as_memory(mir::Local::new(index));
}
}
analyzer.lvalue_locals
analyzer.memory_locals
}
struct LocalAnalyzer<'mir, 'a: 'mir, 'tcx: 'a> {
cx: &'mir MirContext<'a, 'tcx>,
lvalue_locals: BitVector,
memory_locals: BitVector,
seen_assigned: BitVector
}
@ -61,7 +61,7 @@ impl<'mir, 'a, 'tcx> LocalAnalyzer<'mir, 'a, 'tcx> {
fn new(mircx: &'mir MirContext<'a, 'tcx>) -> LocalAnalyzer<'mir, 'a, 'tcx> {
let mut analyzer = LocalAnalyzer {
cx: mircx,
lvalue_locals: BitVector::new(mircx.mir.local_decls.len()),
memory_locals: BitVector::new(mircx.mir.local_decls.len()),
seen_assigned: BitVector::new(mircx.mir.local_decls.len())
};
@ -73,14 +73,14 @@ impl<'mir, 'a, 'tcx> LocalAnalyzer<'mir, 'a, 'tcx> {
analyzer
}
fn mark_as_lvalue(&mut self, local: mir::Local) {
debug!("marking {:?} as lvalue", local);
self.lvalue_locals.insert(local.index());
fn mark_as_memory(&mut self, local: mir::Local) {
debug!("marking {:?} as memory", local);
self.memory_locals.insert(local.index());
}
fn mark_assigned(&mut self, local: mir::Local) {
if !self.seen_assigned.insert(local.index()) {
self.mark_as_lvalue(local);
self.mark_as_memory(local);
}
}
}
@ -88,18 +88,18 @@ impl<'mir, 'a, 'tcx> LocalAnalyzer<'mir, 'a, 'tcx> {
impl<'mir, 'a, 'tcx> Visitor<'tcx> for LocalAnalyzer<'mir, 'a, 'tcx> {
fn visit_assign(&mut self,
block: mir::BasicBlock,
lvalue: &mir::Place<'tcx>,
place: &mir::Place<'tcx>,
rvalue: &mir::Rvalue<'tcx>,
location: Location) {
debug!("visit_assign(block={:?}, lvalue={:?}, rvalue={:?})", block, lvalue, rvalue);
debug!("visit_assign(block={:?}, place={:?}, rvalue={:?})", block, place, rvalue);
if let mir::Place::Local(index) = *lvalue {
if let mir::Place::Local(index) = *place {
self.mark_assigned(index);
if !self.cx.rvalue_creates_operand(rvalue) {
self.mark_as_lvalue(index);
self.mark_as_memory(index);
}
} else {
self.visit_lvalue(lvalue, PlaceContext::Store, location);
self.visit_place(place, PlaceContext::Store, location);
}
self.visit_rvalue(rvalue, location);
@ -121,8 +121,8 @@ impl<'mir, 'a, 'tcx> Visitor<'tcx> for LocalAnalyzer<'mir, 'a, 'tcx> {
// box_free(x) shares with `drop x` the property that it
// is not guaranteed to be statically dominated by the
// definition of x, so x must always be in an alloca.
if let mir::Operand::Move(ref lvalue) = args[0] {
self.visit_lvalue(lvalue, PlaceContext::Drop, location);
if let mir::Operand::Move(ref place) = args[0] {
self.visit_place(place, PlaceContext::Drop, location);
}
}
_ => {}
@ -131,14 +131,14 @@ impl<'mir, 'a, 'tcx> Visitor<'tcx> for LocalAnalyzer<'mir, 'a, 'tcx> {
self.super_terminator_kind(block, kind, location);
}
fn visit_lvalue(&mut self,
lvalue: &mir::Place<'tcx>,
fn visit_place(&mut self,
place: &mir::Place<'tcx>,
context: PlaceContext<'tcx>,
location: Location) {
debug!("visit_lvalue(lvalue={:?}, context={:?})", lvalue, context);
debug!("visit_place(place={:?}, context={:?})", place, context);
let ccx = self.cx.ccx;
if let mir::Place::Projection(ref proj) = *lvalue {
if let mir::Place::Projection(ref proj) = *place {
// Allow uses of projections that are ZSTs or from scalar fields.
let is_consume = match context {
PlaceContext::Copy | PlaceContext::Move => true,
@ -160,20 +160,20 @@ impl<'mir, 'a, 'tcx> Visitor<'tcx> for LocalAnalyzer<'mir, 'a, 'tcx> {
if layout.is_llvm_immediate() || layout.is_llvm_scalar_pair() {
// Recurse with the same context, instead of `Projection`,
// potentially stopping at non-operand projections,
// which would trigger `mark_as_lvalue` on locals.
self.visit_lvalue(&proj.base, context, location);
// which would trigger `mark_as_memory` on locals.
self.visit_place(&proj.base, context, location);
return;
}
}
}
// A deref projection only reads the pointer, never needs the lvalue.
// A deref projection only reads the pointer, never needs the place.
if let mir::ProjectionElem::Deref = proj.elem {
return self.visit_lvalue(&proj.base, PlaceContext::Copy, location);
return self.visit_place(&proj.base, PlaceContext::Copy, location);
}
}
self.super_lvalue(lvalue, context, location);
self.super_place(place, context, location);
}
fn visit_local(&mut self,
@ -195,16 +195,16 @@ impl<'mir, 'a, 'tcx> Visitor<'tcx> for LocalAnalyzer<'mir, 'a, 'tcx> {
PlaceContext::Store |
PlaceContext::Borrow { .. } |
PlaceContext::Projection(..) => {
self.mark_as_lvalue(index);
self.mark_as_memory(index);
}
PlaceContext::Drop => {
let ty = mir::Place::Local(index).ty(self.cx.mir, self.cx.ccx.tcx());
let ty = self.cx.monomorphize(&ty.to_ty(self.cx.ccx.tcx()));
// Only need the lvalue if we're actually dropping it.
// Only need the place if we're actually dropping it.
if self.cx.ccx.shared().type_needs_drop(ty) {
self.mark_as_lvalue(index);
self.mark_as_memory(index);
}
}
}

View file

@ -31,7 +31,7 @@ use syntax_pos::Pos;
use super::{MirContext, LocalRef};
use super::constant::Const;
use super::lvalue::{Alignment, PlaceRef};
use super::place::{Alignment, PlaceRef};
use super::operand::OperandRef;
use super::operand::OperandValue::{Pair, Ref, Immediate};
@ -214,7 +214,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
}
PassMode::Direct(_) | PassMode::Pair(..) => {
let op = self.trans_consume(&bcx, &mir::Place::Local(mir::RETURN_POINTER));
let op = self.trans_consume(&bcx, &mir::Place::Local(mir::RETURN_PLACE));
if let Ref(llval, align) = op.val {
bcx.load(llval, align.non_abi())
} else {
@ -223,13 +223,13 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
}
PassMode::Cast(cast_ty) => {
let op = match self.locals[mir::RETURN_POINTER] {
let op = match self.locals[mir::RETURN_PLACE] {
LocalRef::Operand(Some(op)) => op,
LocalRef::Operand(None) => bug!("use of return before def"),
LocalRef::Place(tr_lvalue) => {
LocalRef::Place(tr_place) => {
OperandRef {
val: Ref(tr_lvalue.llval, tr_lvalue.alignment),
layout: tr_lvalue.layout
val: Ref(tr_place.llval, tr_place.alignment),
layout: tr_place.layout
}
}
};
@ -241,7 +241,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
}
Ref(llval, align) => {
assert_eq!(align, Alignment::AbiAligned,
"return pointer is unaligned!");
"return place is unaligned!");
llval
}
};
@ -268,9 +268,9 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
return
}
let lvalue = self.trans_lvalue(&bcx, location);
let mut args: &[_] = &[lvalue.llval, lvalue.llextra];
args = &args[..1 + lvalue.has_extra() as usize];
let place = self.trans_place(&bcx, location);
let mut args: &[_] = &[place.llval, place.llextra];
args = &args[..1 + place.has_extra() as usize];
let (drop_fn, fn_ty) = match ty.sty {
ty::TyDynamic(..) => {
let fn_ty = common::instance_ty(bcx.ccx.tcx(), &drop_fn);
@ -278,7 +278,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
let sig = bcx.tcx().erase_late_bound_regions_and_normalize(&sig);
let fn_ty = FnType::new_vtable(bcx.ccx, sig, &[]);
args = &args[..1];
(meth::DESTRUCTOR.get_fn(&bcx, lvalue.llextra, &fn_ty), fn_ty)
(meth::DESTRUCTOR.get_fn(&bcx, place.llextra, &fn_ty), fn_ty)
}
_ => {
(callee::get_fn(bcx.ccx, drop_fn),
@ -792,7 +792,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
match self.locals[index] {
LocalRef::Place(dest) => dest,
LocalRef::Operand(None) => {
// Handle temporary lvalues, specifically Operand ones, as
// Handle temporary places, specifically Operand ones, as
// they don't have allocas
return if fn_ret.is_indirect() {
// Odd, but possible, case, we have an operand temporary,
@ -813,11 +813,11 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
};
}
LocalRef::Operand(Some(_)) => {
bug!("lvalue local already assigned to");
bug!("place local already assigned to");
}
}
} else {
self.trans_lvalue(bcx, dest)
self.trans_place(bcx, dest)
};
if fn_ret.is_indirect() {
match dest.alignment {
@ -845,15 +845,15 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
dst: &mir::Place<'tcx>) {
if let mir::Place::Local(index) = *dst {
match self.locals[index] {
LocalRef::Place(lvalue) => self.trans_transmute_into(bcx, src, lvalue),
LocalRef::Place(place) => self.trans_transmute_into(bcx, src, place),
LocalRef::Operand(None) => {
let dst_layout = bcx.ccx.layout_of(self.monomorphized_lvalue_ty(dst));
let dst_layout = bcx.ccx.layout_of(self.monomorphized_place_ty(dst));
assert!(!dst_layout.ty.has_erasable_regions());
let lvalue = PlaceRef::alloca(bcx, dst_layout, "transmute_temp");
lvalue.storage_live(bcx);
self.trans_transmute_into(bcx, src, lvalue);
let op = lvalue.load(bcx);
lvalue.storage_dead(bcx);
let place = PlaceRef::alloca(bcx, dst_layout, "transmute_temp");
place.storage_live(bcx);
self.trans_transmute_into(bcx, src, place);
let op = place.load(bcx);
place.storage_dead(bcx);
self.locals[index] = LocalRef::Operand(Some(op));
}
LocalRef::Operand(Some(op)) => {
@ -862,7 +862,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
}
}
} else {
let dst = self.trans_lvalue(bcx, dst);
let dst = self.trans_place(bcx, dst);
self.trans_transmute_into(bcx, src, dst);
}
}
@ -918,8 +918,8 @@ enum ReturnDest<'tcx> {
Nothing,
// Store the return value to the pointer
Store(PlaceRef<'tcx>),
// Stores an indirect return value to an operand local lvalue
// Stores an indirect return value to an operand local place
IndirectOperand(PlaceRef<'tcx>, mir::Local),
// Stores a direct return value to an operand local lvalue
// Stores a direct return value to an operand local place
DirectOperand(mir::Local)
}

View file

@ -42,7 +42,7 @@ use syntax::ast;
use std::fmt;
use std::ptr;
use super::lvalue::Alignment;
use super::place::Alignment;
use super::operand::{OperandRef, OperandValue};
use super::MirContext;
@ -156,7 +156,7 @@ impl<'a, 'tcx> Const<'tcx> {
self.get_pair(ccx)
}
fn as_lvalue(&self) -> ConstPlace<'tcx> {
fn as_place(&self) -> ConstPlace<'tcx> {
ConstPlace {
base: Base::Value(self.llval),
llextra: ptr::null_mut(),
@ -210,7 +210,7 @@ enum Base {
Static(ValueRef)
}
/// An lvalue as seen from a constant.
/// An place as seen from a constant.
#[derive(Copy, Clone)]
struct ConstPlace<'tcx> {
base: Base,
@ -348,7 +348,7 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> {
mir::TerminatorKind::Goto { target } => target,
mir::TerminatorKind::Return => {
failure?;
return self.locals[mir::RETURN_POINTER].clone().unwrap_or_else(|| {
return self.locals[mir::RETURN_PLACE].clone().unwrap_or_else(|| {
span_bug!(span, "no returned value in constant");
});
}
@ -437,17 +437,17 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> {
}
}
fn const_lvalue(&self, lvalue: &mir::Place<'tcx>, span: Span)
fn const_place(&self, place: &mir::Place<'tcx>, span: Span)
-> Result<ConstPlace<'tcx>, ConstEvalErr<'tcx>> {
let tcx = self.ccx.tcx();
if let mir::Place::Local(index) = *lvalue {
if let mir::Place::Local(index) = *place {
return self.locals[index].clone().unwrap_or_else(|| {
span_bug!(span, "{:?} not initialized", lvalue)
}).map(|v| v.as_lvalue());
span_bug!(span, "{:?} not initialized", place)
}).map(|v| v.as_place());
}
let lvalue = match *lvalue {
let place = match *place {
mir::Place::Local(_) => bug!(), // handled above
mir::Place::Static(box mir::Static { def_id, ty }) => {
ConstPlace {
@ -457,7 +457,7 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> {
}
}
mir::Place::Projection(ref projection) => {
let tr_base = self.const_lvalue(&projection.base, span)?;
let tr_base = self.const_place(&projection.base, span)?;
let projected_ty = PlaceTy::Ty { ty: tr_base.ty }
.projection_ty(tcx, &projection.elem);
let base = tr_base.to_const(span);
@ -533,16 +533,16 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> {
}
}
};
Ok(lvalue)
Ok(place)
}
fn const_operand(&self, operand: &mir::Operand<'tcx>, span: Span)
-> Result<Const<'tcx>, ConstEvalErr<'tcx>> {
debug!("const_operand({:?} @ {:?})", operand, span);
let result = match *operand {
mir::Operand::Copy(ref lvalue) |
mir::Operand::Move(ref lvalue) => {
Ok(self.const_lvalue(lvalue, span)?.to_const(span))
mir::Operand::Copy(ref place) |
mir::Operand::Move(ref place) => {
Ok(self.const_place(place, span)?.to_const(span))
}
mir::Operand::Constant(ref constant) => {
@ -779,14 +779,14 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> {
Const::new(val, cast_ty)
}
mir::Rvalue::Ref(_, bk, ref lvalue) => {
let tr_lvalue = self.const_lvalue(lvalue, span)?;
mir::Rvalue::Ref(_, bk, ref place) => {
let tr_place = self.const_place(place, span)?;
let ty = tr_lvalue.ty;
let ty = tr_place.ty;
let ref_ty = tcx.mk_ref(tcx.types.re_erased,
ty::TypeAndMut { ty: ty, mutbl: bk.to_mutbl_lossy() });
let base = match tr_lvalue.base {
let base = match tr_place.base {
Base::Value(llval) => {
// FIXME: may be wrong for &*(&simd_vec as &fmt::Debug)
let align = if self.ccx.shared().type_is_sized(ty) {
@ -807,14 +807,14 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> {
let ptr = if self.ccx.shared().type_is_sized(ty) {
base
} else {
C_fat_ptr(self.ccx, base, tr_lvalue.llextra)
C_fat_ptr(self.ccx, base, tr_place.llextra)
};
Const::new(ptr, ref_ty)
}
mir::Rvalue::Len(ref lvalue) => {
let tr_lvalue = self.const_lvalue(lvalue, span)?;
Const::new(tr_lvalue.len(self.ccx), tcx.types.usize)
mir::Rvalue::Len(ref place) => {
let tr_place = self.const_place(place, span)?;
Const::new(tr_place.len(self.ccx), tcx.types.usize)
}
mir::Rvalue::BinaryOp(op, ref lhs, ref rhs) => {

View file

@ -35,7 +35,7 @@ use rustc_data_structures::indexed_vec::{IndexVec, Idx};
pub use self::constant::trans_static_initializer;
use self::analyze::CleanupKind;
use self::lvalue::{Alignment, PlaceRef};
use self::place::{Alignment, PlaceRef};
use rustc::mir::traversal;
use self::operand::{OperandRef, OperandValue};
@ -87,7 +87,7 @@ pub struct MirContext<'a, 'tcx:'a> {
/// - the type of the local must be judged "immediate" by `is_llvm_immediate`
/// - the operand must never be referenced indirectly
/// - we should not take its address using the `&` operator
/// - nor should it appear in an lvalue path like `tmp.a`
/// - nor should it appear in a place path like `tmp.a`
/// - the operand must be defined by an rvalue that can generate immediate
/// values
///
@ -244,11 +244,11 @@ pub fn trans_mir<'a, 'tcx: 'a>(
},
};
let lvalue_locals = analyze::lvalue_locals(&mircx);
let memory_locals = analyze::memory_locals(&mircx);
// Allocate variable and temp allocas
mircx.locals = {
let args = arg_local_refs(&bcx, &mircx, &mircx.scopes, &lvalue_locals);
let args = arg_local_refs(&bcx, &mircx, &mircx.scopes, &memory_locals);
let mut allocate_local = |local| {
let decl = &mir.local_decls[local];
@ -260,30 +260,30 @@ pub fn trans_mir<'a, 'tcx: 'a>(
let debug_scope = mircx.scopes[decl.source_info.scope];
let dbg = debug_scope.is_valid() && bcx.sess().opts.debuginfo == FullDebugInfo;
if !lvalue_locals.contains(local.index()) && !dbg {
if !memory_locals.contains(local.index()) && !dbg {
debug!("alloc: {:?} ({}) -> operand", local, name);
return LocalRef::new_operand(bcx.ccx, layout);
}
debug!("alloc: {:?} ({}) -> lvalue", local, name);
let lvalue = PlaceRef::alloca(&bcx, layout, &name.as_str());
debug!("alloc: {:?} ({}) -> place", local, name);
let place = PlaceRef::alloca(&bcx, layout, &name.as_str());
if dbg {
let (scope, span) = mircx.debug_loc(decl.source_info);
declare_local(&bcx, &mircx.debug_context, name, layout.ty, scope,
VariableAccess::DirectVariable { alloca: lvalue.llval },
VariableAccess::DirectVariable { alloca: place.llval },
VariableKind::LocalVariable, span);
}
LocalRef::Place(lvalue)
LocalRef::Place(place)
} else {
// Temporary or return pointer
if local == mir::RETURN_POINTER && mircx.fn_ty.ret.is_indirect() {
debug!("alloc: {:?} (return pointer) -> lvalue", local);
// Temporary or return place
if local == mir::RETURN_PLACE && mircx.fn_ty.ret.is_indirect() {
debug!("alloc: {:?} (return place) -> place", local);
let llretptr = llvm::get_param(llfn, 0);
LocalRef::Place(PlaceRef::new_sized(llretptr,
layout,
Alignment::AbiAligned))
} else if lvalue_locals.contains(local.index()) {
debug!("alloc: {:?} -> lvalue", local);
} else if memory_locals.contains(local.index()) {
debug!("alloc: {:?} -> place", local);
LocalRef::Place(PlaceRef::alloca(&bcx, layout, &format!("{:?}", local)))
} else {
// If this is an immediate local, we do not create an
@ -295,7 +295,7 @@ pub fn trans_mir<'a, 'tcx: 'a>(
}
};
let retptr = allocate_local(mir::RETURN_POINTER);
let retptr = allocate_local(mir::RETURN_PLACE);
iter::once(retptr)
.chain(args.into_iter())
.chain(mir.vars_and_temps_iter().map(allocate_local))
@ -355,12 +355,12 @@ fn create_funclets<'a, 'tcx>(
}
/// Produce, for each argument, a `ValueRef` pointing at the
/// argument's value. As arguments are lvalues, these are always
/// argument's value. As arguments are places, these are always
/// indirect.
fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
mircx: &MirContext<'a, 'tcx>,
scopes: &IndexVec<mir::VisibilityScope, debuginfo::MirDebugScope>,
lvalue_locals: &BitVector)
memory_locals: &BitVector)
-> Vec<LocalRef<'tcx>> {
let mir = mircx.mir;
let tcx = bcx.tcx();
@ -400,18 +400,18 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
_ => bug!("spread argument isn't a tuple?!")
};
let lvalue = PlaceRef::alloca(bcx, bcx.ccx.layout_of(arg_ty), &name);
let place = PlaceRef::alloca(bcx, bcx.ccx.layout_of(arg_ty), &name);
for i in 0..tupled_arg_tys.len() {
let arg = &mircx.fn_ty.args[idx];
idx += 1;
arg.store_fn_arg(bcx, &mut llarg_idx, lvalue.project_field(bcx, i));
arg.store_fn_arg(bcx, &mut llarg_idx, place.project_field(bcx, i));
}
// Now that we have one alloca that contains the aggregate value,
// we can create one debuginfo entry for the argument.
arg_scope.map(|scope| {
let variable_access = VariableAccess::DirectVariable {
alloca: lvalue.llval
alloca: place.llval
};
declare_local(
bcx,
@ -424,7 +424,7 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
);
});
return LocalRef::Place(lvalue);
return LocalRef::Place(place);
}
let arg = &mircx.fn_ty.args[idx];
@ -433,7 +433,7 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
llarg_idx += 1;
}
if arg_scope.is_none() && !lvalue_locals.contains(local.index()) {
if arg_scope.is_none() && !memory_locals.contains(local.index()) {
// We don't have to cast or keep the argument in the alloca.
// FIXME(eddyb): We should figure out how to use llvm.dbg.value instead
// of putting everything in allocas just so we can use llvm.dbg.declare.
@ -467,7 +467,7 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
}
}
let lvalue = if arg.is_indirect() {
let place = if arg.is_indirect() {
// Don't copy an indirect argument to an alloca, the caller
// already put it in a temporary alloca and gave it up.
// FIXME: lifetimes
@ -487,13 +487,13 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
// need to insert a deref here, but the C ABI uses a pointer and a copy using the
// byval attribute, for which LLVM does the deref itself, so we must not add it.
let mut variable_access = VariableAccess::DirectVariable {
alloca: lvalue.llval
alloca: place.llval
};
if let PassMode::Indirect(ref attrs) = arg.mode {
if !attrs.contains(ArgAttribute::ByVal) {
variable_access = VariableAccess::IndirectVariable {
alloca: lvalue.llval,
alloca: place.llval,
address_operations: &deref_op,
};
}
@ -535,10 +535,10 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
let alloc = PlaceRef::alloca(bcx,
bcx.ccx.layout_of(tcx.mk_mut_ptr(arg.layout.ty)),
"__debuginfo_env_ptr");
bcx.store(lvalue.llval, alloc.llval, None);
bcx.store(place.llval, alloc.llval, None);
alloc.llval
} else {
lvalue.llval
place.llval
};
for (i, (decl, ty)) in mir.upvar_decls.iter().zip(upvar_tys).enumerate() {
@ -580,14 +580,14 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
);
}
});
LocalRef::Place(lvalue)
LocalRef::Place(place)
}).collect()
}
mod analyze;
mod block;
mod constant;
pub mod lvalue;
pub mod place;
pub mod operand;
mod rvalue;
mod statement;

View file

@ -25,7 +25,7 @@ use std::fmt;
use std::ptr;
use super::{MirContext, LocalRef};
use super::lvalue::{Alignment, PlaceRef};
use super::place::{Alignment, PlaceRef};
/// The representation of a Rust value. The enum variant is in fact
/// uniquely determined by the value's type, but is kept as a
@ -243,20 +243,20 @@ impl<'a, 'tcx> OperandValue {
impl<'a, 'tcx> MirContext<'a, 'tcx> {
fn maybe_trans_consume_direct(&mut self,
bcx: &Builder<'a, 'tcx>,
lvalue: &mir::Place<'tcx>)
place: &mir::Place<'tcx>)
-> Option<OperandRef<'tcx>>
{
debug!("maybe_trans_consume_direct(lvalue={:?})", lvalue);
debug!("maybe_trans_consume_direct(place={:?})", place);
// watch out for locals that do not have an
// alloca; they are handled somewhat differently
if let mir::Place::Local(index) = *lvalue {
if let mir::Place::Local(index) = *place {
match self.locals[index] {
LocalRef::Operand(Some(o)) => {
return Some(o);
}
LocalRef::Operand(None) => {
bug!("use of {:?} before def", lvalue);
bug!("use of {:?} before def", place);
}
LocalRef::Place(..) => {
// use path below
@ -265,7 +265,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
}
// Moves out of scalar and scalar pair fields are trivial.
if let &mir::Place::Projection(ref proj) = lvalue {
if let &mir::Place::Projection(ref proj) = place {
if let mir::ProjectionElem::Field(ref f, _) = proj.elem {
if let Some(o) = self.maybe_trans_consume_direct(bcx, &proj.base) {
return Some(o.extract_field(bcx, f.index()));
@ -278,12 +278,12 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
pub fn trans_consume(&mut self,
bcx: &Builder<'a, 'tcx>,
lvalue: &mir::Place<'tcx>)
place: &mir::Place<'tcx>)
-> OperandRef<'tcx>
{
debug!("trans_consume(lvalue={:?})", lvalue);
debug!("trans_consume(place={:?})", place);
let ty = self.monomorphized_lvalue_ty(lvalue);
let ty = self.monomorphized_place_ty(place);
let layout = bcx.ccx.layout_of(ty);
// ZSTs don't require any actual memory access.
@ -291,13 +291,13 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
return OperandRef::new_zst(bcx.ccx, layout);
}
if let Some(o) = self.maybe_trans_consume_direct(bcx, lvalue) {
if let Some(o) = self.maybe_trans_consume_direct(bcx, place) {
return o;
}
// for most lvalues, to consume them we just load them
// for most places, to consume them we just load them
// out from their home
self.trans_lvalue(bcx, lvalue).load(bcx)
self.trans_place(bcx, place).load(bcx)
}
pub fn trans_operand(&mut self,
@ -308,9 +308,9 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
debug!("trans_operand(operand={:?})", operand);
match *operand {
mir::Operand::Copy(ref lvalue) |
mir::Operand::Move(ref lvalue) => {
self.trans_consume(bcx, lvalue)
mir::Operand::Copy(ref place) |
mir::Operand::Move(ref place) => {
self.trans_consume(bcx, place)
}
mir::Operand::Constant(ref constant) => {

View file

@ -74,16 +74,16 @@ impl Alignment {
#[derive(Copy, Clone, Debug)]
pub struct PlaceRef<'tcx> {
/// Pointer to the contents of the lvalue
/// Pointer to the contents of the place
pub llval: ValueRef,
/// This lvalue's extra data if it is unsized, or null
/// This place's extra data if it is unsized, or null
pub llextra: ValueRef,
/// Monomorphized type of this lvalue, including variant information
/// Monomorphized type of this place, including variant information
pub layout: TyLayout<'tcx>,
/// Whether this lvalue is known to be aligned according to its layout
/// Whether this place is known to be aligned according to its layout
pub alignment: Alignment,
}
@ -444,27 +444,27 @@ impl<'a, 'tcx> PlaceRef<'tcx> {
}
impl<'a, 'tcx> MirContext<'a, 'tcx> {
pub fn trans_lvalue(&mut self,
pub fn trans_place(&mut self,
bcx: &Builder<'a, 'tcx>,
lvalue: &mir::Place<'tcx>)
place: &mir::Place<'tcx>)
-> PlaceRef<'tcx> {
debug!("trans_lvalue(lvalue={:?})", lvalue);
debug!("trans_place(place={:?})", place);
let ccx = bcx.ccx;
let tcx = ccx.tcx();
if let mir::Place::Local(index) = *lvalue {
if let mir::Place::Local(index) = *place {
match self.locals[index] {
LocalRef::Place(lvalue) => {
return lvalue;
LocalRef::Place(place) => {
return place;
}
LocalRef::Operand(..) => {
bug!("using operand local {:?} as lvalue", lvalue);
bug!("using operand local {:?} as place", place);
}
}
}
let result = match *lvalue {
let result = match *place {
mir::Place::Local(_) => bug!(), // handled above
mir::Place::Static(box mir::Static { def_id, ty }) => {
PlaceRef::new_sized(consts::get_static(ccx, def_id),
@ -479,7 +479,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
self.trans_consume(bcx, base).deref(bcx.ccx)
}
mir::Place::Projection(ref projection) => {
let tr_base = self.trans_lvalue(bcx, &projection.base);
let tr_base = self.trans_place(bcx, &projection.base);
match projection.elem {
mir::ProjectionElem::Deref => bug!(),
@ -519,7 +519,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
C_usize(bcx.ccx, (from as u64) + (to as u64)));
}
// Cast the lvalue pointer type to the new
// Cast the place pointer type to the new
// array or slice type (*[%_; new_len]).
subslice.llval = bcx.pointercast(subslice.llval,
subslice.layout.llvm_type(bcx.ccx).ptr_to());
@ -532,14 +532,14 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
}
}
};
debug!("trans_lvalue(lvalue={:?}) => {:?}", lvalue, result);
debug!("trans_place(place={:?}) => {:?}", place, result);
result
}
pub fn monomorphized_lvalue_ty(&self, lvalue: &mir::Place<'tcx>) -> Ty<'tcx> {
pub fn monomorphized_place_ty(&self, place: &mir::Place<'tcx>) -> Ty<'tcx> {
let tcx = self.ccx.tcx();
let lvalue_ty = lvalue.ty(self.mir, tcx);
self.monomorphize(&lvalue_ty.to_ty(tcx))
let place_ty = place.ty(self.mir, tcx);
self.monomorphize(&place_ty.to_ty(tcx))
}
}

View file

@ -32,7 +32,7 @@ use value::Value;
use super::{MirContext, LocalRef};
use super::constant::const_scalar_checked_binop;
use super::operand::{OperandRef, OperandValue};
use super::lvalue::PlaceRef;
use super::place::PlaceRef;
impl<'a, 'tcx> MirContext<'a, 'tcx> {
pub fn trans_rvalue(&mut self,
@ -334,17 +334,17 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
})
}
mir::Rvalue::Ref(_, bk, ref lvalue) => {
let tr_lvalue = self.trans_lvalue(&bcx, lvalue);
mir::Rvalue::Ref(_, bk, ref place) => {
let tr_place = self.trans_place(&bcx, place);
let ty = tr_lvalue.layout.ty;
let ty = tr_place.layout.ty;
// Note: lvalues are indirect, so storing the `llval` into the
// Note: places are indirect, so storing the `llval` into the
// destination effectively creates a reference.
let val = if !bcx.ccx.shared().type_has_metadata(ty) {
OperandValue::Immediate(tr_lvalue.llval)
OperandValue::Immediate(tr_place.llval)
} else {
OperandValue::Pair(tr_lvalue.llval, tr_lvalue.llextra)
OperandValue::Pair(tr_place.llval, tr_place.llextra)
};
(bcx, OperandRef {
val,
@ -355,8 +355,8 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
})
}
mir::Rvalue::Len(ref lvalue) => {
let size = self.evaluate_array_len(&bcx, lvalue);
mir::Rvalue::Len(ref place) => {
let size = self.evaluate_array_len(&bcx, place);
let operand = OperandRef {
val: OperandValue::Immediate(size),
layout: bcx.ccx.layout_of(bcx.tcx().types.usize),
@ -424,9 +424,9 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
})
}
mir::Rvalue::Discriminant(ref lvalue) => {
mir::Rvalue::Discriminant(ref place) => {
let discr_ty = rvalue.ty(&*self.mir, bcx.tcx());
let discr = self.trans_lvalue(&bcx, lvalue)
let discr = self.trans_place(&bcx, place)
.trans_get_discr(&bcx, discr_ty);
(bcx, OperandRef {
val: OperandValue::Immediate(discr),
@ -486,11 +486,11 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
fn evaluate_array_len(&mut self,
bcx: &Builder<'a, 'tcx>,
lvalue: &mir::Place<'tcx>) -> ValueRef
place: &mir::Place<'tcx>) -> ValueRef
{
// ZST are passed as operands and require special handling
// because trans_lvalue() panics if Local is operand.
if let mir::Place::Local(index) = *lvalue {
// because trans_place() panics if Local is operand.
if let mir::Place::Local(index) = *place {
if let LocalRef::Operand(Some(op)) = self.locals[index] {
if let ty::TyArray(_, n) = op.layout.ty.sty {
let n = n.val.to_const_int().unwrap().to_u64().unwrap();
@ -499,7 +499,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
}
}
// use common size calculation for non zero-sized types
let tr_value = self.trans_lvalue(&bcx, lvalue);
let tr_value = self.trans_place(&bcx, place);
return tr_value.len(bcx.ccx);
}

View file

@ -25,8 +25,8 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
self.set_debug_loc(&bcx, statement.source_info);
match statement.kind {
mir::StatementKind::Assign(ref lvalue, ref rvalue) => {
if let mir::Place::Local(index) = *lvalue {
mir::StatementKind::Assign(ref place, ref rvalue) => {
if let mir::Place::Local(index) = *place {
match self.locals[index] {
LocalRef::Place(tr_dest) => {
self.trans_rvalue(bcx, tr_dest, rvalue)
@ -49,30 +49,30 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
}
}
} else {
let tr_dest = self.trans_lvalue(&bcx, lvalue);
let tr_dest = self.trans_place(&bcx, place);
self.trans_rvalue(bcx, tr_dest, rvalue)
}
}
mir::StatementKind::SetDiscriminant{ref lvalue, variant_index} => {
self.trans_lvalue(&bcx, lvalue)
mir::StatementKind::SetDiscriminant{ref place, variant_index} => {
self.trans_place(&bcx, place)
.trans_set_discr(&bcx, variant_index);
bcx
}
mir::StatementKind::StorageLive(local) => {
if let LocalRef::Place(tr_lval) = self.locals[local] {
tr_lval.storage_live(&bcx);
if let LocalRef::Place(tr_place) = self.locals[local] {
tr_place.storage_live(&bcx);
}
bcx
}
mir::StatementKind::StorageDead(local) => {
if let LocalRef::Place(tr_lval) = self.locals[local] {
tr_lval.storage_dead(&bcx);
if let LocalRef::Place(tr_place) = self.locals[local] {
tr_place.storage_dead(&bcx);
}
bcx
}
mir::StatementKind::InlineAsm { ref asm, ref outputs, ref inputs } => {
let outputs = outputs.iter().map(|output| {
self.trans_lvalue(&bcx, output)
self.trans_place(&bcx, output)
}).collect();
let input_vals = inputs.iter().map(|input| {