rustc: encode scalar pairs in layout ABI.

This commit is contained in:
Eduard-Mihai Burtescu 2017-10-06 10:25:35 +03:00
parent f1b7cd9925
commit cdeb4b0d25
15 changed files with 267 additions and 138 deletions

View file

@ -757,6 +757,7 @@ impl FieldPlacement {
pub enum Abi { pub enum Abi {
Uninhabited, Uninhabited,
Scalar(Scalar), Scalar(Scalar),
ScalarPair(Scalar, Scalar),
Vector, Vector,
Aggregate { Aggregate {
/// If true, the size is exact, otherwise it's only a lower bound. /// If true, the size is exact, otherwise it's only a lower bound.
@ -769,7 +770,10 @@ impl Abi {
/// Returns true if the layout corresponds to an unsized type. /// Returns true if the layout corresponds to an unsized type.
pub fn is_unsized(&self) -> bool { pub fn is_unsized(&self) -> bool {
match *self { match *self {
Abi::Uninhabited | Abi::Scalar(_) | Abi::Vector => false, Abi::Uninhabited |
Abi::Scalar(_) |
Abi::ScalarPair(..) |
Abi::Vector => false,
Abi::Aggregate { sized, .. } => !sized Abi::Aggregate { sized, .. } => !sized
} }
} }
@ -777,7 +781,10 @@ impl Abi {
/// Returns true if the fields of the layout are packed. /// Returns true if the fields of the layout are packed.
pub fn is_packed(&self) -> bool { pub fn is_packed(&self) -> bool {
match *self { match *self {
Abi::Uninhabited | Abi::Scalar(_) | Abi::Vector => false, Abi::Uninhabited |
Abi::Scalar(_) |
Abi::ScalarPair(..) |
Abi::Vector => false,
Abi::Aggregate { packed, .. } => packed Abi::Aggregate { packed, .. } => packed
} }
} }
@ -905,13 +912,32 @@ impl<'a, 'tcx> CachedLayout {
-> Result<&'tcx Self, LayoutError<'tcx>> { -> Result<&'tcx Self, LayoutError<'tcx>> {
let cx = (tcx, param_env); let cx = (tcx, param_env);
let dl = cx.data_layout(); let dl = cx.data_layout();
let scalar = |value: Primitive| { let scalar_unit = |value: Primitive| {
let bits = value.size(dl).bits(); let bits = value.size(dl).bits();
assert!(bits <= 128); assert!(bits <= 128);
tcx.intern_layout(CachedLayout::scalar(cx, Scalar { Scalar {
value, value,
valid_range: 0..=(!0 >> (128 - bits)) valid_range: 0..=(!0 >> (128 - bits))
})) }
};
let scalar = |value: Primitive| {
tcx.intern_layout(CachedLayout::scalar(cx, scalar_unit(value)))
};
let scalar_pair = |a: Scalar, b: Scalar| {
let align = a.value.align(dl).max(b.value.align(dl)).max(dl.aggregate_align);
let b_offset = a.value.size(dl).abi_align(b.value.align(dl));
let size = (b_offset + b.value.size(dl)).abi_align(align);
CachedLayout {
variants: Variants::Single { index: 0 },
fields: FieldPlacement::Arbitrary {
offsets: vec![Size::from_bytes(0), b_offset],
memory_index: vec![0, 1]
},
abi: Abi::ScalarPair(a, b),
align,
primitive_align: align,
size
}
}; };
#[derive(Copy, Clone, Debug)] #[derive(Copy, Clone, Debug)]
@ -1049,19 +1075,54 @@ impl<'a, 'tcx> CachedLayout {
memory_index = inverse_memory_index; memory_index = inverse_memory_index;
} }
let size = min_size.abi_align(align);
let mut abi = Abi::Aggregate {
sized,
packed
};
// Look for a scalar pair, as an ABI optimization.
// FIXME(eddyb) ignore extra ZST fields and field ordering.
if sized && !packed && fields.len() == 2 {
match (&fields[0].abi, &fields[1].abi) {
(&Abi::Scalar(ref a), &Abi::Scalar(ref b)) => {
let pair = scalar_pair(a.clone(), b.clone());
let pair_offsets = match pair.fields {
FieldPlacement::Arbitrary {
ref offsets,
ref memory_index
} => {
assert_eq!(memory_index, &[0, 1]);
offsets
}
_ => bug!()
};
if offsets[0] == pair_offsets[0] &&
offsets[1] == pair_offsets[1] &&
memory_index[0] == 0 &&
memory_index[1] == 1 &&
align == pair.align &&
primitive_align == pair.primitive_align &&
size == pair.size {
// We can use `ScalarPair` only when it matches our
// already computed layout (including `#[repr(C)]`).
abi = pair.abi;
}
}
_ => {}
}
}
Ok(CachedLayout { Ok(CachedLayout {
variants: Variants::Single { index: 0 }, variants: Variants::Single { index: 0 },
fields: FieldPlacement::Arbitrary { fields: FieldPlacement::Arbitrary {
offsets, offsets,
memory_index memory_index
}, },
abi: Abi::Aggregate { abi,
sized,
packed
},
align, align,
primitive_align, primitive_align,
size: min_size.abi_align(align) size
}) })
}; };
let univariant = |fields: &[TyLayout], repr: &ReprOptions, kind| { let univariant = |fields: &[TyLayout], repr: &ReprOptions, kind| {
@ -1070,45 +1131,34 @@ impl<'a, 'tcx> CachedLayout {
assert!(!ty.has_infer_types()); assert!(!ty.has_infer_types());
let ptr_layout = |pointee: Ty<'tcx>| { let ptr_layout = |pointee: Ty<'tcx>| {
let mut data_ptr = scalar_unit(Pointer);
if !ty.is_unsafe_ptr() {
data_ptr.valid_range.start = 1;
}
let pointee = tcx.normalize_associated_type_in_env(&pointee, param_env); let pointee = tcx.normalize_associated_type_in_env(&pointee, param_env);
if pointee.is_sized(tcx, param_env, DUMMY_SP) { if pointee.is_sized(tcx, param_env, DUMMY_SP) {
let non_zero = !ty.is_unsafe_ptr(); return Ok(tcx.intern_layout(CachedLayout::scalar(cx, data_ptr)));
let bits = Pointer.size(dl).bits();
return Ok(tcx.intern_layout(CachedLayout::scalar(cx, Scalar {
value: Pointer,
valid_range: (non_zero as u128)..=(!0 >> (128 - bits))
})));
} }
let unsized_part = tcx.struct_tail(pointee); let unsized_part = tcx.struct_tail(pointee);
let metadata = match unsized_part.sty { let metadata = match unsized_part.sty {
ty::TyForeign(..) => return Ok(scalar(Pointer)), ty::TyForeign(..) => {
ty::TySlice(_) | ty::TyStr => { return Ok(tcx.intern_layout(CachedLayout::scalar(cx, data_ptr)));
Int(dl.ptr_sized_integer(), false) }
ty::TySlice(_) | ty::TyStr => {
scalar_unit(Int(dl.ptr_sized_integer(), false))
}
ty::TyDynamic(..) => {
let mut vtable = scalar_unit(Pointer);
vtable.valid_range.start = 1;
vtable
} }
ty::TyDynamic(..) => Pointer,
_ => return Err(LayoutError::Unknown(unsized_part)) _ => return Err(LayoutError::Unknown(unsized_part))
}; };
// Effectively a (ptr, meta) tuple. // Effectively a (ptr, meta) tuple.
let align = Pointer.align(dl).max(metadata.align(dl)); Ok(tcx.intern_layout(scalar_pair(data_ptr, metadata)))
let meta_offset = Pointer.size(dl);
assert_eq!(meta_offset, meta_offset.abi_align(metadata.align(dl)));
let fields = FieldPlacement::Arbitrary {
offsets: vec![Size::from_bytes(0), meta_offset],
memory_index: vec![0, 1]
};
Ok(tcx.intern_layout(CachedLayout {
variants: Variants::Single { index: 0 },
fields,
abi: Abi::Aggregate {
sized: true,
packed: false
},
align,
primitive_align: align,
size: (meta_offset + metadata.size(dl)).abi_align(align)
}))
}; };
Ok(match ty.sty { Ok(match ty.sty {
@ -1134,11 +1184,9 @@ impl<'a, 'tcx> CachedLayout {
ty::TyFloat(FloatTy::F32) => scalar(F32), ty::TyFloat(FloatTy::F32) => scalar(F32),
ty::TyFloat(FloatTy::F64) => scalar(F64), ty::TyFloat(FloatTy::F64) => scalar(F64),
ty::TyFnPtr(_) => { ty::TyFnPtr(_) => {
let bits = Pointer.size(dl).bits(); let mut ptr = scalar_unit(Pointer);
tcx.intern_layout(CachedLayout::scalar(cx, Scalar { ptr.valid_range.start = 1;
value: Pointer, tcx.intern_layout(CachedLayout::scalar(cx, ptr))
valid_range: 1..=(!0 >> (128 - bits))
}))
} }
// The never type. // The never type.
@ -2194,7 +2242,7 @@ impl<'a, 'tcx> TyLayout<'tcx> {
pub fn is_zst(&self) -> bool { pub fn is_zst(&self) -> bool {
match self.abi { match self.abi {
Abi::Uninhabited => true, Abi::Uninhabited => true,
Abi::Scalar(_) => false, Abi::Scalar(_) | Abi::ScalarPair(..) => false,
Abi::Vector => self.size.bytes() == 0, Abi::Vector => self.size.bytes() == 0,
Abi::Aggregate { sized, .. } => sized && self.size.bytes() == 0 Abi::Aggregate { sized, .. } => sized && self.size.bytes() == 0
} }
@ -2347,6 +2395,10 @@ impl<'gcx> HashStable<StableHashingContext<'gcx>> for Abi {
Scalar(ref value) => { Scalar(ref value) => {
value.hash_stable(hcx, hasher); value.hash_stable(hcx, hasher);
} }
ScalarPair(ref a, ref b) => {
a.hash_stable(hcx, hasher);
b.hash_stable(hcx, hasher);
}
Vector => {} Vector => {}
Aggregate { packed, sized } => { Aggregate { packed, sized } => {
packed.hash_stable(hcx, hasher); packed.hash_stable(hcx, hasher);

View file

@ -311,6 +311,7 @@ impl<'tcx> LayoutExt<'tcx> for TyLayout<'tcx> {
layout::Abi::Uninhabited | layout::Abi::Uninhabited |
layout::Abi::Scalar(_) | layout::Abi::Scalar(_) |
layout::Abi::Vector => false, layout::Abi::Vector => false,
layout::Abi::ScalarPair(..) |
layout::Abi::Aggregate { .. } => true layout::Abi::Aggregate { .. } => true
} }
} }
@ -340,6 +341,7 @@ impl<'tcx> LayoutExt<'tcx> for TyLayout<'tcx> {
}) })
} }
layout::Abi::ScalarPair(..) |
layout::Abi::Aggregate { .. } => { layout::Abi::Aggregate { .. } => {
let mut total = Size::from_bytes(0); let mut total = Size::from_bytes(0);
let mut result = None; let mut result = None;
@ -745,10 +747,13 @@ impl<'a, 'tcx> FnType<'tcx> {
arg.attrs.set(ArgAttribute::NonNull); arg.attrs.set(ArgAttribute::NonNull);
} }
} }
_ => {} _ => {
// Nothing to do for non-pointer types.
return;
}
} }
if let Some(pointee) = arg.layout.pointee_info(ccx) { if let Some(pointee) = arg.layout.pointee_info_at(ccx, Size::from_bytes(0)) {
if let Some(kind) = pointee.safe { if let Some(kind) = pointee.safe {
arg.attrs.pointee_size = pointee.size; arg.attrs.pointee_size = pointee.size;
arg.attrs.pointee_align = Some(pointee.align); arg.attrs.pointee_align = Some(pointee.align);

View file

@ -88,6 +88,7 @@ fn classify_arg<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &ArgType<'tcx>)
} }
} }
layout::Abi::ScalarPair(..) |
layout::Abi::Aggregate { .. } => { layout::Abi::Aggregate { .. } => {
match layout.variants { match layout.variants {
layout::Variants::Single { .. } => { layout::Variants::Single { .. } => {

View file

@ -18,6 +18,7 @@ pub fn compute_abi_info(fty: &mut FnType) {
let fixup = |a: &mut ArgType| { let fixup = |a: &mut ArgType| {
match a.layout.abi { match a.layout.abi {
layout::Abi::Uninhabited => {} layout::Abi::Uninhabited => {}
layout::Abi::ScalarPair(..) |
layout::Abi::Aggregate { .. } => { layout::Abi::Aggregate { .. } => {
match a.layout.size.bits() { match a.layout.size.bits() {
8 => a.cast_to(Reg::i8()), 8 => a.cast_to(Reg::i8()),

View file

@ -232,16 +232,9 @@ pub fn C_str_slice(cx: &CrateContext, s: InternedString) -> ValueRef {
} }
pub fn C_fat_ptr(cx: &CrateContext, ptr: ValueRef, meta: ValueRef) -> ValueRef { pub fn C_fat_ptr(cx: &CrateContext, ptr: ValueRef, meta: ValueRef) -> ValueRef {
let empty = C_array(Type::i8(cx), &[]);
assert_eq!(abi::FAT_PTR_ADDR, 0); assert_eq!(abi::FAT_PTR_ADDR, 0);
assert_eq!(abi::FAT_PTR_EXTRA, 1); assert_eq!(abi::FAT_PTR_EXTRA, 1);
C_struct(cx, &[ C_struct(cx, &[ptr, meta], false)
empty,
ptr,
empty,
meta,
empty
], false)
} }
pub fn C_struct(cx: &CrateContext, elts: &[ValueRef], packed: bool) -> ValueRef { pub fn C_struct(cx: &CrateContext, elts: &[ValueRef], packed: bool) -> ValueRef {

View file

@ -31,7 +31,7 @@ use rustc::middle::trans::Stats;
use rustc_data_structures::stable_hasher::StableHashingContextProvider; use rustc_data_structures::stable_hasher::StableHashingContextProvider;
use rustc::session::config::{self, NoDebugInfo}; use rustc::session::config::{self, NoDebugInfo};
use rustc::session::Session; use rustc::session::Session;
use rustc::ty::layout::{LayoutError, LayoutOf, TyLayout}; use rustc::ty::layout::{LayoutError, LayoutOf, Size, TyLayout};
use rustc::ty::{self, Ty, TyCtxt}; use rustc::ty::{self, Ty, TyCtxt};
use rustc::util::nodemap::FxHashMap; use rustc::util::nodemap::FxHashMap;
use rustc_trans_utils; use rustc_trans_utils;
@ -103,7 +103,7 @@ pub struct LocalCrateContext<'a, 'tcx: 'a> {
lltypes: RefCell<FxHashMap<(Ty<'tcx>, Option<usize>), Type>>, lltypes: RefCell<FxHashMap<(Ty<'tcx>, Option<usize>), Type>>,
scalar_lltypes: RefCell<FxHashMap<Ty<'tcx>, Type>>, scalar_lltypes: RefCell<FxHashMap<Ty<'tcx>, Type>>,
pointee_infos: RefCell<FxHashMap<Ty<'tcx>, Option<PointeeInfo>>>, pointee_infos: RefCell<FxHashMap<(Ty<'tcx>, Size), Option<PointeeInfo>>>,
isize_ty: Type, isize_ty: Type,
dbg_cx: Option<debuginfo::CrateDebugContext<'tcx>>, dbg_cx: Option<debuginfo::CrateDebugContext<'tcx>>,
@ -516,7 +516,8 @@ impl<'b, 'tcx> CrateContext<'b, 'tcx> {
&self.local().scalar_lltypes &self.local().scalar_lltypes
} }
pub fn pointee_infos<'a>(&'a self) -> &'a RefCell<FxHashMap<Ty<'tcx>, Option<PointeeInfo>>> { pub fn pointee_infos<'a>(&'a self)
-> &'a RefCell<FxHashMap<(Ty<'tcx>, Size), Option<PointeeInfo>>> {
&self.local().pointee_infos &self.local().pointee_infos
} }

View file

@ -35,7 +35,7 @@ pub fn lvalue_locals<'a, 'tcx>(mircx: &MirContext<'a, 'tcx>) -> BitVector {
if layout.is_llvm_immediate() { if layout.is_llvm_immediate() {
// These sorts of types are immediates that we can store // These sorts of types are immediates that we can store
// in an ValueRef without an alloca. // in an ValueRef without an alloca.
} else if layout.is_llvm_scalar_pair(mircx.ccx) { } else if layout.is_llvm_scalar_pair() {
// We allow pairs and uses of any of their 2 fields. // We allow pairs and uses of any of their 2 fields.
} else { } else {
// These sorts of types require an alloca. Note that // These sorts of types require an alloca. Note that
@ -146,7 +146,7 @@ impl<'mir, 'a, 'tcx> Visitor<'tcx> for LocalAnalyzer<'mir, 'a, 'tcx> {
let ty = self.cx.monomorphize(&ty.to_ty(self.cx.ccx.tcx())); let ty = self.cx.monomorphize(&ty.to_ty(self.cx.ccx.tcx()));
let layout = self.cx.ccx.layout_of(ty); let layout = self.cx.ccx.layout_of(ty);
if layout.is_llvm_scalar_pair(self.cx.ccx) { if layout.is_llvm_scalar_pair() {
return; return;
} }
} }

View file

@ -117,7 +117,12 @@ impl<'a, 'tcx> Const<'tcx> {
} }
fn get_field(&self, ccx: &CrateContext<'a, 'tcx>, i: usize) -> ValueRef { fn get_field(&self, ccx: &CrateContext<'a, 'tcx>, i: usize) -> ValueRef {
const_get_elt(self.llval, ccx.layout_of(self.ty).llvm_field_index(i)) let layout = ccx.layout_of(self.ty);
if let layout::Abi::ScalarPair(..) = layout.abi {
const_get_elt(self.llval, i as u64)
} else {
const_get_elt(self.llval, layout.llvm_field_index(i))
}
} }
fn get_pair(&self, ccx: &CrateContext<'a, 'tcx>) -> (ValueRef, ValueRef) { fn get_pair(&self, ccx: &CrateContext<'a, 'tcx>) -> (ValueRef, ValueRef) {
@ -143,7 +148,7 @@ impl<'a, 'tcx> Const<'tcx> {
let llty = layout.immediate_llvm_type(ccx); let llty = layout.immediate_llvm_type(ccx);
let llvalty = val_ty(self.llval); let llvalty = val_ty(self.llval);
let val = if llty == llvalty && layout.is_llvm_scalar_pair(ccx) { let val = if llty == llvalty && layout.is_llvm_scalar_pair() {
let (a, b) = self.get_pair(ccx); let (a, b) = self.get_pair(ccx);
OperandValue::Pair(a, b) OperandValue::Pair(a, b)
} else if llty == llvalty && layout.is_llvm_immediate() { } else if llty == llvalty && layout.is_llvm_immediate() {
@ -1174,6 +1179,14 @@ fn build_const_struct<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
-> Const<'tcx> { -> Const<'tcx> {
assert_eq!(vals.len(), layout.fields.count()); assert_eq!(vals.len(), layout.fields.count());
if let layout::Abi::ScalarPair(..) = layout.abi {
assert_eq!(vals.len(), 2);
return Const::new(C_struct(ccx, &[
vals[0].llval,
vals[1].llval,
], false), layout.ty);
}
// offset of current value // offset of current value
let mut offset = Size::from_bytes(0); let mut offset = Size::from_bytes(0);
let mut cfields = Vec::new(); let mut cfields = Vec::new();

View file

@ -175,10 +175,13 @@ impl<'a, 'tcx> LvalueRef<'tcx> {
load load
}; };
OperandValue::Immediate(base::to_immediate(bcx, llval, self.layout)) OperandValue::Immediate(base::to_immediate(bcx, llval, self.layout))
} else if self.layout.is_llvm_scalar_pair(bcx.ccx) { } else if self.layout.is_llvm_scalar_pair() {
OperandValue::Pair( let load = |i| {
self.project_field(bcx, 0).load(bcx).immediate(), let x = self.project_field(bcx, i).load(bcx).immediate();
self.project_field(bcx, 1).load(bcx).immediate()) // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types.
bcx.bitcast(x, self.layout.scalar_pair_element_llvm_type(bcx.ccx, i))
};
OperandValue::Pair(load(0), load(1))
} else { } else {
OperandValue::Ref(self.llval, self.alignment) OperandValue::Ref(self.llval, self.alignment)
}; };
@ -190,17 +193,23 @@ impl<'a, 'tcx> LvalueRef<'tcx> {
pub fn project_field(self, bcx: &Builder<'a, 'tcx>, ix: usize) -> LvalueRef<'tcx> { pub fn project_field(self, bcx: &Builder<'a, 'tcx>, ix: usize) -> LvalueRef<'tcx> {
let ccx = bcx.ccx; let ccx = bcx.ccx;
let field = self.layout.field(ccx, ix); let field = self.layout.field(ccx, ix);
let offset = self.layout.fields.offset(ix).bytes(); let offset = self.layout.fields.offset(ix);
let alignment = self.alignment | Alignment::from(self.layout); let alignment = self.alignment | Alignment::from(self.layout);
let simple = || { let simple = || {
// Unions and newtypes only use an offset of 0.
let llval = if offset.bytes() == 0 {
self.llval
} else if let layout::Abi::ScalarPair(ref a, ref b) = self.layout.abi {
// Offsets have to match either first or second field.
assert_eq!(offset, a.value.size(ccx).abi_align(b.value.align(ccx)));
bcx.struct_gep(self.llval, 1)
} else {
bcx.struct_gep(self.llval, self.layout.llvm_field_index(ix))
};
LvalueRef { LvalueRef {
// Unions and newtypes only use an offset of 0. // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types.
llval: if offset == 0 { llval: bcx.pointercast(llval, field.llvm_type(ccx).ptr_to()),
bcx.pointercast(self.llval, field.llvm_type(ccx).ptr_to())
} else {
bcx.struct_gep(self.llval, self.layout.llvm_field_index(ix))
},
llextra: if ccx.shared().type_has_metadata(field.ty) { llextra: if ccx.shared().type_has_metadata(field.ty) {
self.llextra self.llextra
} else { } else {
@ -249,7 +258,7 @@ impl<'a, 'tcx> LvalueRef<'tcx> {
let meta = self.llextra; let meta = self.llextra;
let unaligned_offset = C_usize(ccx, offset); let unaligned_offset = C_usize(ccx, offset.bytes());
// Get the alignment of the field // Get the alignment of the field
let (_, align) = glue::size_and_align_of_dst(bcx, field.ty, meta); let (_, align) = glue::size_and_align_of_dst(bcx, field.ty, meta);

View file

@ -123,11 +123,8 @@ impl<'a, 'tcx> OperandRef<'tcx> {
self, llty); self, llty);
// Reconstruct the immediate aggregate. // Reconstruct the immediate aggregate.
let mut llpair = C_undef(llty); let mut llpair = C_undef(llty);
let elems = [a, b]; llpair = bcx.insert_value(llpair, a, 0);
for i in 0..2 { llpair = bcx.insert_value(llpair, b, 1);
let elem = base::from_immediate(bcx, elems[i]);
llpair = bcx.insert_value(llpair, elem, self.layout.llvm_field_index(i));
}
llpair llpair
} else { } else {
self.immediate() self.immediate()
@ -139,18 +136,13 @@ impl<'a, 'tcx> OperandRef<'tcx> {
llval: ValueRef, llval: ValueRef,
layout: TyLayout<'tcx>) layout: TyLayout<'tcx>)
-> OperandRef<'tcx> { -> OperandRef<'tcx> {
let val = if layout.is_llvm_scalar_pair(bcx.ccx) { let val = if layout.is_llvm_scalar_pair() {
debug!("Operand::from_immediate_or_packed_pair: unpacking {:?} @ {:?}", debug!("Operand::from_immediate_or_packed_pair: unpacking {:?} @ {:?}",
llval, layout); llval, layout);
// Deconstruct the immediate aggregate. // Deconstruct the immediate aggregate.
let a = bcx.extract_value(llval, layout.llvm_field_index(0)); OperandValue::Pair(bcx.extract_value(llval, 0),
let a = base::to_immediate(bcx, a, layout.field(bcx.ccx, 0)); bcx.extract_value(llval, 1))
let b = bcx.extract_value(llval, layout.llvm_field_index(1));
let b = base::to_immediate(bcx, b, layout.field(bcx.ccx, 1));
OperandValue::Pair(a, b)
} else { } else {
OperandValue::Immediate(llval) OperandValue::Immediate(llval)
}; };
@ -175,8 +167,11 @@ impl<'a, 'tcx> OperandValue {
} }
OperandValue::Pair(a, b) => { OperandValue::Pair(a, b) => {
for (i, &x) in [a, b].iter().enumerate() { for (i, &x) in [a, b].iter().enumerate() {
OperandValue::Immediate(x) let field = dest.project_field(bcx, i);
.store(bcx, dest.project_field(bcx, i)); // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types.
let x = bcx.bitcast(x, field.layout.immediate_llvm_type(bcx.ccx));
bcx.store(base::from_immediate(bcx, x),
field.llval, field.alignment.non_abi());
} }
} }
} }
@ -214,10 +209,15 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
match (o.val, &proj.elem) { match (o.val, &proj.elem) {
(OperandValue::Pair(a, b), (OperandValue::Pair(a, b),
&mir::ProjectionElem::Field(ref f, ty)) => { &mir::ProjectionElem::Field(ref f, ty)) => {
let layout = bcx.ccx.layout_of(self.monomorphize(&ty));
let llval = [a, b][f.index()]; let llval = [a, b][f.index()];
// HACK(eddyb) have to bitcast pointers
// until LLVM removes pointee types.
let llval = bcx.bitcast(llval,
layout.immediate_llvm_type(bcx.ccx));
return OperandRef { return OperandRef {
val: OperandValue::Immediate(llval), val: OperandValue::Immediate(llval),
layout: bcx.ccx.layout_of(self.monomorphize(&ty)) layout
}; };
} }
_ => {} _ => {}

View file

@ -29,6 +29,12 @@ fn uncached_llvm_type<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
return Type::vector(&layout.field(ccx, 0).llvm_type(ccx), return Type::vector(&layout.field(ccx, 0).llvm_type(ccx),
layout.fields.count() as u64); layout.fields.count() as u64);
} }
layout::Abi::ScalarPair(..) => {
return Type::struct_(ccx, &[
layout.scalar_pair_element_llvm_type(ccx, 0),
layout.scalar_pair_element_llvm_type(ccx, 1),
], false);
}
layout::Abi::Uninhabited | layout::Abi::Uninhabited |
layout::Abi::Aggregate { .. } => {} layout::Abi::Aggregate { .. } => {}
} }
@ -174,12 +180,15 @@ pub struct PointeeInfo {
pub trait LayoutLlvmExt<'tcx> { pub trait LayoutLlvmExt<'tcx> {
fn is_llvm_immediate(&self) -> bool; fn is_llvm_immediate(&self) -> bool;
fn is_llvm_scalar_pair<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> bool; fn is_llvm_scalar_pair<'a>(&self) -> bool;
fn llvm_type<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> Type; fn llvm_type<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> Type;
fn immediate_llvm_type<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> Type; fn immediate_llvm_type<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> Type;
fn scalar_pair_element_llvm_type<'a>(&self, ccx: &CrateContext<'a, 'tcx>,
index: usize) -> Type;
fn over_align(&self) -> Option<Align>; fn over_align(&self) -> Option<Align>;
fn llvm_field_index(&self, index: usize) -> u64; fn llvm_field_index(&self, index: usize) -> u64;
fn pointee_info<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> Option<PointeeInfo>; fn pointee_info_at<'a>(&self, ccx: &CrateContext<'a, 'tcx>, offset: Size)
-> Option<PointeeInfo>;
} }
impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> { impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> {
@ -188,26 +197,18 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> {
layout::Abi::Uninhabited | layout::Abi::Uninhabited |
layout::Abi::Scalar(_) | layout::Abi::Scalar(_) |
layout::Abi::Vector => true, layout::Abi::Vector => true,
layout::Abi::ScalarPair(..) => false,
layout::Abi::Aggregate { .. } => self.is_zst() layout::Abi::Aggregate { .. } => self.is_zst()
} }
} }
fn is_llvm_scalar_pair<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> bool { fn is_llvm_scalar_pair<'a>(&self) -> bool {
match self.fields { match self.abi {
layout::FieldPlacement::Arbitrary { .. } => { layout::Abi::ScalarPair(..) => true,
// There must be only 2 fields. layout::Abi::Uninhabited |
if self.fields.count() != 2 { layout::Abi::Scalar(_) |
return false; layout::Abi::Vector |
} layout::Abi::Aggregate { .. } => false
// The two fields must be both scalars.
match (&self.field(ccx, 0).abi, &self.field(ccx, 1).abi) {
(&layout::Abi::Scalar(_), &layout::Abi::Scalar(_)) => true,
_ => false
}
}
_ => false
} }
} }
@ -248,7 +249,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> {
} }
_ => { _ => {
// If we know the alignment, pick something better than i8. // If we know the alignment, pick something better than i8.
if let Some(pointee) = self.pointee_info(ccx) { if let Some(pointee) = self.pointee_info_at(ccx, Size::from_bytes(0)) {
Type::pointee_for_abi_align(ccx, pointee.align) Type::pointee_for_abi_align(ccx, pointee.align)
} else { } else {
Type::i8(ccx) Type::i8(ccx)
@ -310,6 +311,59 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> {
self.llvm_type(ccx) self.llvm_type(ccx)
} }
fn scalar_pair_element_llvm_type<'a>(&self, ccx: &CrateContext<'a, 'tcx>,
index: usize) -> Type {
// HACK(eddyb) special-case fat pointers until LLVM removes
// pointee types, to avoid bitcasting every `OperandRef::deref`.
match self.ty.sty {
ty::TyRef(..) |
ty::TyRawPtr(_) => {
return self.field(ccx, index).llvm_type(ccx);
}
ty::TyAdt(def, _) if def.is_box() => {
return self.field(ccx, index).llvm_type(ccx);
}
_ => {}
}
let (a, b) = match self.abi {
layout::Abi::ScalarPair(ref a, ref b) => (a, b),
_ => bug!("TyLayout::scalar_pair_element_llty({:?}): not applicable", self)
};
let scalar = [a, b][index];
// Make sure to return the same type `immediate_llvm_type` would,
// to avoid dealing with two types and the associated conversions.
// This means that `(bool, bool)` is represented as `{i1, i1}`,
// both in memory and as an immediate, while `bool` is typically
// `i8` in memory and only `i1` when immediate. While we need to
// load/store `bool` as `i8` to avoid crippling LLVM optimizations,
// `i1` in a LLVM aggregate is valid and mostly equivalent to `i8`.
if scalar.is_bool() {
return Type::i1(ccx);
}
match scalar.value {
layout::Int(i, _) => Type::from_integer(ccx, i),
layout::F32 => Type::f32(ccx),
layout::F64 => Type::f64(ccx),
layout::Pointer => {
// If we know the alignment, pick something better than i8.
let offset = if index == 0 {
Size::from_bytes(0)
} else {
a.value.size(ccx).abi_align(b.value.align(ccx))
};
let pointee = if let Some(pointee) = self.pointee_info_at(ccx, offset) {
Type::pointee_for_abi_align(ccx, pointee.align)
} else {
Type::i8(ccx)
};
pointee.ptr_to()
}
}
}
fn over_align(&self) -> Option<Align> { fn over_align(&self) -> Option<Align> {
if self.align != self.primitive_align { if self.align != self.primitive_align {
Some(self.align) Some(self.align)
@ -319,8 +373,12 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> {
} }
fn llvm_field_index(&self, index: usize) -> u64 { fn llvm_field_index(&self, index: usize) -> u64 {
if let layout::Abi::Scalar(_) = self.abi { match self.abi {
bug!("TyLayout::llvm_field_index({:?}): not applicable", self); layout::Abi::Scalar(_) |
layout::Abi::ScalarPair(..) => {
bug!("TyLayout::llvm_field_index({:?}): not applicable", self)
}
_ => {}
} }
match self.fields { match self.fields {
layout::FieldPlacement::Union(_) => { layout::FieldPlacement::Union(_) => {
@ -337,20 +395,15 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> {
} }
} }
fn pointee_info<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> Option<PointeeInfo> { fn pointee_info_at<'a>(&self, ccx: &CrateContext<'a, 'tcx>, offset: Size)
// We only handle thin pointers here. -> Option<PointeeInfo> {
match self.abi { if let Some(&pointee) = ccx.pointee_infos().borrow().get(&(self.ty, offset)) {
layout::Abi::Scalar(layout::Scalar { value: layout::Pointer, .. }) => {}
_ => return None
}
if let Some(&pointee) = ccx.pointee_infos().borrow().get(&self.ty) {
return pointee; return pointee;
} }
let mut result = None; let mut result = None;
match self.ty.sty { match self.ty.sty {
ty::TyRawPtr(mt) => { ty::TyRawPtr(mt) if offset.bytes() == 0 => {
let (size, align) = ccx.size_and_align_of(mt.ty); let (size, align) = ccx.size_and_align_of(mt.ty);
result = Some(PointeeInfo { result = Some(PointeeInfo {
size, size,
@ -359,7 +412,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> {
}); });
} }
ty::TyRef(_, mt) => { ty::TyRef(_, mt) if offset.bytes() == 0 => {
let (size, align) = ccx.size_and_align_of(mt.ty); let (size, align) = ccx.size_and_align_of(mt.ty);
let kind = match mt.mutbl { let kind = match mt.mutbl {
@ -385,7 +438,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> {
}); });
} }
ty::TyAdt(def, _) if def.is_box() => { ty::TyAdt(def, _) if def.is_box() && offset.bytes() == 0 => {
let (size, align) = ccx.size_and_align_of(self.ty.boxed_ty()); let (size, align) = ccx.size_and_align_of(self.ty.boxed_ty());
result = Some(PointeeInfo { result = Some(PointeeInfo {
size, size,
@ -408,7 +461,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> {
// to work as long as we don't start using more // to work as long as we don't start using more
// niches than just null (e.g. the first page // niches than just null (e.g. the first page
// of the address space, or unaligned pointers). // of the address space, or unaligned pointers).
if self.fields.offset(0).bytes() == 0 { if self.fields.offset(0) == offset {
Some(self.for_variant(ccx, dataful_variant)) Some(self.for_variant(ccx, dataful_variant))
} else { } else {
None None
@ -425,12 +478,16 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> {
} }
if let Some(variant) = data_variant { if let Some(variant) = data_variant {
let ptr_end = offset + layout::Pointer.size(ccx);
for i in 0..variant.fields.count() { for i in 0..variant.fields.count() {
let field = variant.field(ccx, i); let field_start = variant.fields.offset(i);
if field.size == self.size { if field_start <= offset {
// We found the pointer field, use its information. let field = variant.field(ccx, i);
result = field.pointee_info(ccx); if ptr_end <= field_start + field.size {
break; // We found the right field, look inside it.
result = field.pointee_info_at(ccx, offset - field_start);
break;
}
} }
} }
} }
@ -447,7 +504,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> {
} }
} }
ccx.pointee_infos().borrow_mut().insert(self.ty, result); ccx.pointee_infos().borrow_mut().insert((self.ty, offset), result);
result result
} }
} }

View file

@ -24,9 +24,9 @@ pub fn helper(_: usize) {
pub fn no_op_slice_adjustment(x: &[u8]) -> &[u8] { pub fn no_op_slice_adjustment(x: &[u8]) -> &[u8] {
// We used to generate an extra alloca and memcpy for the block's trailing expression value, so // We used to generate an extra alloca and memcpy for the block's trailing expression value, so
// check that we copy directly to the return value slot // check that we copy directly to the return value slot
// CHECK: %0 = insertvalue { [0 x i8], [0 x i8]*, [0 x i8], [[USIZE]], [0 x i8] } undef, [0 x i8]* %x.0, 1 // CHECK: %0 = insertvalue { [0 x i8]*, [[USIZE]] } undef, [0 x i8]* %x.0, 0
// CHECK: %1 = insertvalue { [0 x i8], [0 x i8]*, [0 x i8], [[USIZE]], [0 x i8] } %0, [[USIZE]] %x.1, 3 // CHECK: %1 = insertvalue { [0 x i8]*, [[USIZE]] } %0, [[USIZE]] %x.1, 1
// CHECK: ret { [0 x i8], [0 x i8]*, [0 x i8], [[USIZE]], [0 x i8] } %1 // CHECK: ret { [0 x i8]*, [[USIZE]] } %1
{ x } { x }
} }

View file

@ -133,7 +133,7 @@ pub fn trait_borrow(_: &Drop) {
pub fn trait_box(_: Box<Drop>) { pub fn trait_box(_: Box<Drop>) {
} }
// CHECK: { [0 x i8], [0 x i16]*, [0 x i8], [[USIZE]], [0 x i8] } @return_slice([0 x i16]* noalias nonnull readonly %x.0, [[USIZE]] %x.1) // CHECK: { [0 x i16]*, [[USIZE]] } @return_slice([0 x i16]* noalias nonnull readonly %x.0, [[USIZE]] %x.1)
#[no_mangle] #[no_mangle]
pub fn return_slice(x: &[u16]) -> &[u16] { pub fn return_slice(x: &[u16]) -> &[u16] {
x x

View file

@ -54,9 +54,6 @@ pub struct PackedPair(u8, u32);
// CHECK-LABEL: @pkd_pair // CHECK-LABEL: @pkd_pair
#[no_mangle] #[no_mangle]
pub fn pkd_pair(pair1: &mut PackedPair, pair2: &mut PackedPair) { pub fn pkd_pair(pair1: &mut PackedPair, pair2: &mut PackedPair) {
// CHECK: [[V1:%[a-z0-9]+]] = load i8, i8* %{{.*}}, align 1 // CHECK: call void @llvm.memcpy.{{.*}}(i8* %{{.*}}, i8* %{{.*}}, i{{[0-9]+}} 5, i32 1, i1 false)
// CHECK: [[V2:%[a-z0-9]+]] = load i32, i32* %{{.*}}, align 1
// CHECK: store i8 [[V1]], i8* {{.*}}, align 1
// CHECK: store i32 [[V2]], i32* {{.*}}, align 1
*pair2 = *pair1; *pair2 = *pair1;
} }

View file

@ -24,9 +24,9 @@ pub fn helper(_: usize) {
pub fn ref_dst(s: &[u8]) { pub fn ref_dst(s: &[u8]) {
// We used to generate an extra alloca and memcpy to ref the dst, so check that we copy // We used to generate an extra alloca and memcpy to ref the dst, so check that we copy
// directly to the alloca for "x" // directly to the alloca for "x"
// CHECK: [[X0:%[0-9]+]] = bitcast { [0 x i8], [0 x i8]*, [0 x i8], [[USIZE]], [0 x i8] }* %x to [0 x i8]** // CHECK: [[X0:%[0-9]+]] = bitcast { [0 x i8]*, [[USIZE]] }* %x to [0 x i8]**
// CHECK: store [0 x i8]* %s.0, [0 x i8]** [[X0]] // CHECK: store [0 x i8]* %s.0, [0 x i8]** [[X0]]
// CHECK: [[X1:%[0-9]+]] = getelementptr {{.*}} { [0 x i8], [0 x i8]*, [0 x i8], [[USIZE]], [0 x i8] }* %x, i32 0, i32 3 // CHECK: [[X1:%[0-9]+]] = getelementptr {{.*}} { [0 x i8]*, [[USIZE]] }* %x, i32 0, i32 1
// CHECK: store [[USIZE]] %s.1, [[USIZE]]* [[X1]] // CHECK: store [[USIZE]] %s.1, [[USIZE]]* [[X1]]
let x = &*s; let x = &*s;