Rename load_value_pair to load_scalar_pair and fix dynamic dispatch with arbitrary self types

This commit is contained in:
bjorn3 2019-03-02 21:09:28 +01:00
parent 57feadcc13
commit 2ce5387b7c
8 changed files with 135 additions and 30 deletions

View file

@ -0,0 +1,83 @@
// Adapted from rustc run-pass test suite
#![feature(no_core, arbitrary_self_types, box_syntax)]
#![feature(rustc_attrs)]
#![feature(start, lang_items)]
#![no_core]
extern crate mini_core;
use mini_core::*;
use mini_core::libc::*;
macro_rules! assert_eq {
($l:expr, $r: expr) => {
if $l != $r {
panic(&(stringify!($l != $r), file!(), line!(), 0));
}
}
}
struct Ptr<T: ?Sized>(Box<T>);
impl<T: ?Sized> Deref for Ptr<T> {
type Target = T;
fn deref(&self) -> &T {
&*self.0
}
}
impl<T: Unsize<U> + ?Sized, U: ?Sized> CoerceUnsized<Ptr<U>> for Ptr<T> {}
impl<T: Unsize<U> + ?Sized, U: ?Sized> DispatchFromDyn<Ptr<U>> for Ptr<T> {}
struct Wrapper<T: ?Sized>(T);
impl<T: ?Sized> Deref for Wrapper<T> {
type Target = T;
fn deref(&self) -> &T {
&self.0
}
}
impl<T: CoerceUnsized<U>, U> CoerceUnsized<Wrapper<U>> for Wrapper<T> {}
impl<T: DispatchFromDyn<U>, U> DispatchFromDyn<Wrapper<U>> for Wrapper<T> {}
trait Trait {
// This method isn't object-safe yet. Unsized by-value `self` is object-safe (but not callable
// without unsized_locals), but wrappers arond `Self` currently are not.
// FIXME (mikeyhew) uncomment this when unsized rvalues object-safety is implemented
// fn wrapper(self: Wrapper<Self>) -> i32;
fn ptr_wrapper(self: Ptr<Wrapper<Self>>) -> i32;
fn wrapper_ptr(self: Wrapper<Ptr<Self>>) -> i32;
fn wrapper_ptr_wrapper(self: Wrapper<Ptr<Wrapper<Self>>>) -> i32;
}
impl Trait for i32 {
fn ptr_wrapper(self: Ptr<Wrapper<Self>>) -> i32 {
**self
}
fn wrapper_ptr(self: Wrapper<Ptr<Self>>) -> i32 {
**self
}
fn wrapper_ptr_wrapper(self: Wrapper<Ptr<Wrapper<Self>>>) -> i32 {
***self
}
}
#[start]
fn main(_: isize, _: *const *const u8) -> isize {
let pw = Ptr(box Wrapper(5)) as Ptr<Wrapper<dyn Trait>>;
assert_eq!(pw.ptr_wrapper(), 5);
let wp = Wrapper(Ptr(box 6)) as Wrapper<Ptr<dyn Trait>>;
assert_eq!(wp.wrapper_ptr(), 6);
let wpw = Wrapper(Ptr(box Wrapper(7))) as Wrapper<Ptr<Wrapper<dyn Trait>>>;
assert_eq!(wpw.wrapper_ptr_wrapper(), 7);
0
}

View file

@ -27,6 +27,7 @@ impl<'a, T: ?Sized+Unsize<U>, U: ?Sized> DispatchFromDyn<&'a mut U> for &'a mut
impl<T: ?Sized+Unsize<U>, U: ?Sized> DispatchFromDyn<*const U> for *const T {}
// *mut T -> *mut U
impl<T: ?Sized+Unsize<U>, U: ?Sized> DispatchFromDyn<*mut U> for *mut T {}
impl<T: ?Sized + Unsize<U>, U: ?Sized> DispatchFromDyn<Box<U>> for Box<T> {}
#[lang = "receiver"]
pub trait Receiver {}
@ -216,6 +217,15 @@ impl PartialEq for usize {
}
}
impl PartialEq for i32 {
fn eq(&self, other: &i32) -> bool {
(*self) == (*other)
}
fn ne(&self, other: &i32) -> bool {
(*self) != (*other)
}
}
impl PartialEq for isize {
fn eq(&self, other: &isize) -> bool {
(*self) == (*other)
@ -306,6 +316,13 @@ pub unsafe fn drop_in_place<T: ?Sized>(to_drop: *mut T) {
drop_in_place(to_drop);
}
#[lang = "deref"]
pub trait Deref {
type Target: ?Sized;
fn deref(&self) -> &Self::Target;
}
#[lang = "owned_box"]
pub struct Box<T: ?Sized>(*mut T);
@ -317,6 +334,14 @@ impl<T: ?Sized> Drop for Box<T> {
}
}
impl<T> Deref for Box<T> {
type Target = T;
fn deref(&self) -> &Self::Target {
&**self
}
}
#[lang = "exchange_malloc"]
// Make it available to jited mini_core_hello_world
// FIXME remove next line when jit supports linking rlibs

View file

@ -537,7 +537,7 @@ fn trans_stmt<'a, 'tcx: 'a>(
lval.write_cvalue(fx, operand.unchecked_cast_to(dest_layout));
} else {
// fat-ptr -> thin-ptr
let (ptr, _extra) = operand.load_value_pair(fx);
let (ptr, _extra) = operand.load_scalar_pair(fx);
lval.write_cvalue(fx, CValue::ByVal(ptr, dest_layout))
}
} else if let ty::Adt(adt_def, _substs) = from_ty.sty {
@ -1101,8 +1101,8 @@ fn trans_ptr_binop<'a, 'tcx: 'a>(
Offset (_) bug; // Handled above
}
} else {
let (lhs_ptr, lhs_extra) = lhs.load_value_pair(fx);
let (rhs_ptr, rhs_extra) = rhs.load_value_pair(fx);
let (lhs_ptr, lhs_extra) = lhs.load_scalar_pair(fx);
let (rhs_ptr, rhs_extra) = rhs.load_scalar_pair(fx);
let res = match bin_op {
BinOp::Eq => {
let ptr_eq = fx.bcx.ins().icmp(IntCC::Equal, lhs_ptr, rhs_ptr);

View file

@ -166,33 +166,33 @@ impl<'tcx> CValue<'tcx> {
fx.bcx.ins().load(clif_ty, MemFlags::new(), addr, 0)
}
CValue::ByVal(value, _layout) => value,
CValue::ByValPair(_, _, _layout) => bug!("Please use load_value_pair for ByValPair"),
CValue::ByValPair(_, _, _layout) => bug!("Please use load_scalar_pair for ByValPair"),
}
}
pub fn load_value_pair<'a>(self, fx: &mut FunctionCx<'a, 'tcx, impl Backend>) -> (Value, Value)
/// Load a value pair with layout.abi of scalar pair
pub fn load_scalar_pair<'a>(self, fx: &mut FunctionCx<'a, 'tcx, impl Backend>) -> (Value, Value)
where
'tcx: 'a,
{
match self {
CValue::ByRef(addr, layout) => {
assert_eq!(
layout.size.bytes(),
fx.tcx.data_layout.pointer_size.bytes() * 2
let (a, b) = match &layout.abi {
layout::Abi::ScalarPair(a, b) => (a.clone(), b.clone()),
_ => unreachable!(),
};
let clif_ty1 = crate::abi::scalar_to_clif_type(fx.tcx, a.clone());
let clif_ty2 = crate::abi::scalar_to_clif_type(fx.tcx, b);
let val1 = fx.bcx.ins().load(clif_ty1, MemFlags::new(), addr, 0);
let val2 = fx.bcx.ins().load(
clif_ty2,
MemFlags::new(),
addr,
a.value.size(&fx.tcx).bytes() as i32,
);
let val1_offset = layout.fields.offset(0).bytes() as i32;
let val2_offset = layout.fields.offset(1).bytes() as i32;
let val1 = fx
.bcx
.ins()
.load(fx.pointer_type, MemFlags::new(), addr, val1_offset);
let val2 = fx
.bcx
.ins()
.load(fx.pointer_type, MemFlags::new(), addr, val2_offset);
(val1, val2)
}
CValue::ByVal(_, _layout) => bug!("Please use load_value for ByVal"),
CValue::ByVal(_, _layout) => bug!("Please use load_scalar for ByVal"),
CValue::ByValPair(val1, val2, _layout) => (val1, val2),
}
}

View file

@ -156,7 +156,7 @@ pub fn codegen_intrinsic_call<'a, 'tcx: 'a>(
size_of_val, <T> (c ptr) {
let layout = fx.layout_of(T);
let size = if layout.is_unsized() {
let (_ptr, info) = ptr.load_value_pair(fx);
let (_ptr, info) = ptr.load_scalar_pair(fx);
let (size, _align) = crate::unsize::size_and_align_of_dst(fx, layout.ty, info);
size
} else {
@ -175,7 +175,7 @@ pub fn codegen_intrinsic_call<'a, 'tcx: 'a>(
min_align_of_val, <T> (c ptr) {
let layout = fx.layout_of(T);
let align = if layout.is_unsized() {
let (_ptr, info) = ptr.load_value_pair(fx);
let (_ptr, info) = ptr.load_scalar_pair(fx);
let (_size, align) = crate::unsize::size_and_align_of_dst(fx, layout.ty, info);
align
} else {

View file

@ -96,7 +96,7 @@ pub fn coerce_unsized_into<'a, 'tcx: 'a>(
{
// fat-ptr to fat-ptr unsize preserves the vtable
// i.e., &'a fmt::Debug+Send => &'a fmt::Debug
src.load_value_pair(fx)
src.load_scalar_pair(fx)
} else {
let base = src.load_scalar(fx);
unsize_thin_ptr(fx, base, src_ty, dst_ty)

View file

@ -50,14 +50,7 @@ pub fn get_ptr_and_method_ref<'a, 'tcx: 'a>(
arg: CValue<'tcx>,
idx: usize,
) -> (Value, Value) {
let arg = if arg.layout().ty.is_box() {
// Cast `Box<T>` to `*mut T` so `load_value_pair` works
arg.unchecked_cast_to(fx.layout_of(fx.tcx.mk_mut_ptr(arg.layout().ty.boxed_ty())))
} else {
arg
};
let (ptr, vtable) = arg.load_value_pair(fx);
let (ptr, vtable) = arg.load_scalar_pair(fx);
let usize_size = fx.layout_of(fx.tcx.types.usize).size.bytes();
let func_ref = fx.bcx.ins().load(
pointer_ty(fx.tcx),

View file

@ -17,6 +17,10 @@ echo "[AOT] mini_core_hello_world"
$RUSTC example/mini_core_hello_world.rs --crate-name mini_core_hello_world --crate-type bin
./target/out/mini_core_hello_world abc bcd
echo "[AOT] arbitrary_self_types_pointers_and_wrappers"
$RUSTC example/arbitrary_self_types_pointers_and_wrappers.rs --crate-name arbitrary_self_types_pointers_and_wrappers --crate-type bin
./target/out/arbitrary_self_types_pointers_and_wrappers
echo "[BUILD] sysroot"
time ./build_sysroot/build_sysroot.sh