rustc: Root values spilled via do_spill()

This commit is contained in:
Patrick Walton 2011-09-02 15:12:27 -07:00
parent 00470fef12
commit e68f687179
4 changed files with 76 additions and 42 deletions

View file

@ -1983,36 +1983,39 @@ fn call_cmp_glue(cx: &@block_ctxt, lhs: ValueRef, rhs: ValueRef, t: ty::t,
// We can't use call_tydesc_glue_full() and friends here because compare // We can't use call_tydesc_glue_full() and friends here because compare
// glue has a special signature. // glue has a special signature.
let lllhs = spill_if_immediate(cx, lhs, t); let bcx = cx;
let llrhs = spill_if_immediate(cx, rhs, t);
let llrawlhsptr = BitCast(cx, lllhs, T_ptr(T_i8())); let r = spill_if_immediate(bcx, lhs, t);
let llrawrhsptr = BitCast(cx, llrhs, T_ptr(T_i8())); let lllhs = r.val; bcx = r.bcx;
r = spill_if_immediate(bcx, rhs, t);
let llrhs = r.val; bcx = r.bcx;
let llrawlhsptr = BitCast(bcx, lllhs, T_ptr(T_i8()));
let llrawrhsptr = BitCast(bcx, llrhs, T_ptr(T_i8()));
let ti = none::<@tydesc_info>; let ti = none::<@tydesc_info>;
let r = get_tydesc(cx, t, false, tps_normal, ti).result; r = get_tydesc(bcx, t, false, tps_normal, ti).result;
lazily_emit_tydesc_glue(cx, abi::tydesc_field_cmp_glue, ti); let lltydesc = r.val; bcx = r.bcx;
let lltydesc = r.val; lazily_emit_tydesc_glue(bcx, abi::tydesc_field_cmp_glue, ti);
let lltydescs = let lltydescs = GEP(bcx, lltydesc,
GEP(r.bcx, lltydesc,
[C_int(0), C_int(abi::tydesc_field_first_param)]); [C_int(0), C_int(abi::tydesc_field_first_param)]);
lltydescs = Load(r.bcx, lltydescs); lltydescs = Load(bcx, lltydescs);
let llfn; let llfn;
alt ti { alt ti {
none. { none. {
let llfnptr = let llfnptr = GEP(bcx, lltydesc,
GEP(r.bcx, lltydesc, [C_int(0), C_int(abi::tydesc_field_cmp_glue)]);
[C_int(0), C_int(abi::tydesc_field_cmp_glue)]); llfn = Load(bcx, llfnptr);
llfn = Load(r.bcx, llfnptr);
} }
some(sti) { llfn = option::get(sti.cmp_glue); } some(sti) { llfn = option::get(sti.cmp_glue); }
} }
let llcmpresultptr = alloca(r.bcx, T_i1()); let llcmpresultptr = alloca(bcx, T_i1());
let llargs: [ValueRef] = let llargs: [ValueRef] =
[llcmpresultptr, r.bcx.fcx.lltaskptr, lltydesc, lltydescs, [llcmpresultptr, bcx.fcx.lltaskptr, lltydesc, lltydescs,
llrawlhsptr, llrawrhsptr, llop]; llrawlhsptr, llrawrhsptr, llop];
Call(r.bcx, llfn, llargs); Call(bcx, llfn, llargs);
ret rslt(r.bcx, Load(r.bcx, llcmpresultptr)); ret rslt(bcx, Load(bcx, llcmpresultptr));
} }
// Compares two values. Performs the simple scalar comparison if the types are // Compares two values. Performs the simple scalar comparison if the types are
@ -3625,7 +3628,10 @@ fn trans_arg_expr(cx: &@block_ctxt, arg: &ty::arg, lldestty0: TypeRef,
add_clean_temp(bcx, val, e_ty); add_clean_temp(bcx, val, e_ty);
} else { } else {
if ty::type_is_vec(ccx.tcx, e_ty) { if ty::type_is_vec(ccx.tcx, e_ty) {
let arg_copy = do_spill(bcx, Load(bcx, val)); let r = do_spill(bcx, Load(bcx, val), e_ty);
bcx = r.bcx;
let arg_copy = r.val;
bcx = take_ty(bcx, arg_copy, e_ty); bcx = take_ty(bcx, arg_copy, e_ty);
val = Load(bcx, arg_copy); val = Load(bcx, arg_copy);
} else { } else {
@ -3635,7 +3641,8 @@ fn trans_arg_expr(cx: &@block_ctxt, arg: &ty::arg, lldestty0: TypeRef,
add_clean_temp(bcx, val, e_ty); add_clean_temp(bcx, val, e_ty);
} }
} else if type_is_immediate(ccx, e_ty) && !lv.is_mem { } else if type_is_immediate(ccx, e_ty) && !lv.is_mem {
val = do_spill(bcx, val); let r = do_spill(bcx, val, e_ty);
val = r.val; bcx = r.bcx;
} }
if !is_bot && ty::type_contains_params(ccx.tcx, arg.ty) { if !is_bot && ty::type_contains_params(ccx.tcx, arg.ty) {
@ -3812,11 +3819,12 @@ fn trans_call(in_cx: &@block_ctxt, f: &@ast::expr,
bcx = args_res.bcx; bcx = args_res.bcx;
let llargs = args_res.args; let llargs = args_res.args;
let llretslot = args_res.retslot; let llretslot = args_res.retslot;
/* /*
log "calling: " + val_str(bcx_ccx(cx).tn, faddr); log_err "calling: " + val_str(bcx_ccx(cx).tn, faddr);
for arg: ValueRef in llargs { for arg: ValueRef in llargs {
log "arg: " + val_str(bcx_ccx(cx).tn, arg); log_err "arg: " + val_str(bcx_ccx(cx).tn, arg);
} }
*/ */
@ -4172,16 +4180,29 @@ fn type_is_immediate(ccx: &@crate_ctxt, t: ty::t) -> bool {
ty::type_is_native(ccx.tcx, t) || ty::type_is_vec(ccx.tcx, t); ty::type_is_native(ccx.tcx, t) || ty::type_is_vec(ccx.tcx, t);
} }
fn do_spill(cx: &@block_ctxt, v: ValueRef) -> ValueRef { fn do_spill(cx: &@block_ctxt, v: ValueRef, t: ty::t) -> result {
// We have a value but we have to spill it to pass by alias. // We have a value but we have to spill it, and root it, to pass by alias.
let bcx = cx;
let r = alloc_ty(bcx, t);
bcx = r.bcx;
let llptr = r.val;
Store(bcx, v, llptr);
ret rslt(bcx, llptr);
}
// Since this function does *not* root, it is the caller's responsibility to
// ensure that the referent is pointed to by a root.
fn do_spill_noroot(cx: &@block_ctxt, v: ValueRef) -> ValueRef {
let llptr = alloca(cx, val_ty(v)); let llptr = alloca(cx, val_ty(v));
Store(cx, v, llptr); Store(cx, v, llptr);
ret llptr; ret llptr;
} }
fn spill_if_immediate(cx: &@block_ctxt, v: ValueRef, t: ty::t) -> ValueRef { fn spill_if_immediate(cx: &@block_ctxt, v: ValueRef, t: ty::t) -> result {
if type_is_immediate(bcx_ccx(cx), t) { ret do_spill(cx, v); } if type_is_immediate(bcx_ccx(cx), t) { ret do_spill(cx, v, t); }
ret v; ret rslt(cx, v);
} }
fn load_if_immediate(cx: &@block_ctxt, v: ValueRef, t: ty::t) -> ValueRef { fn load_if_immediate(cx: &@block_ctxt, v: ValueRef, t: ty::t) -> ValueRef {
@ -4222,13 +4243,17 @@ fn trans_log(lvl: int, cx: &@block_ctxt, e: &@ast::expr) -> result {
let ti = none::<@tydesc_info>; let ti = none::<@tydesc_info>;
let r = get_tydesc(log_bcx, e_ty, false, tps_normal, ti).result; let r = get_tydesc(log_bcx, e_ty, false, tps_normal, ti).result;
log_bcx = r.bcx; log_bcx = r.bcx;
let lltydesc = r.val;
// Call the polymorphic log function. // Call the polymorphic log function.
let llvalptr = spill_if_immediate(log_bcx, sub.val, e_ty); r = spill_if_immediate(log_bcx, sub.val, e_ty);
log_bcx = r.bcx;
let llvalptr = r.val;
let llval_i8 = PointerCast(log_bcx, llvalptr, T_ptr(T_i8())); let llval_i8 = PointerCast(log_bcx, llvalptr, T_ptr(T_i8()));
Call(log_bcx, bcx_ccx(log_bcx).upcalls.log_type, Call(log_bcx, bcx_ccx(log_bcx).upcalls.log_type,
[log_bcx.fcx.lltaskptr, r.val, llval_i8, C_int(lvl)]); [log_bcx.fcx.lltaskptr, lltydesc, llval_i8, C_int(lvl)]);
log_bcx = trans_block_cleanups(log_bcx, log_cx); log_bcx = trans_block_cleanups(log_bcx, log_cx);
Br(log_bcx, after_cx.llbb); Br(log_bcx, after_cx.llbb);
@ -4940,7 +4965,8 @@ fn create_llargs_for_fn_args(cx: &@fn_ctxt, proto: ast::proto,
fn copy_args_to_allocas(fcx: @fn_ctxt, scope: @block_ctxt, fn copy_args_to_allocas(fcx: @fn_ctxt, scope: @block_ctxt,
args: &[ast::arg], arg_tys: &[ty::arg]) { args: &[ast::arg], arg_tys: &[ty::arg]) {
let bcx = new_raw_block_ctxt(fcx, fcx.llcopyargs); let llcopyargs = new_raw_block_ctxt(fcx, fcx.llcopyargs);
let bcx = llcopyargs;
let arg_n: uint = 0u; let arg_n: uint = 0u;
for aarg: ast::arg in args { for aarg: ast::arg in args {
let arg_ty = arg_tys[arg_n].ty; let arg_ty = arg_tys[arg_n].ty;
@ -4951,7 +4977,11 @@ fn copy_args_to_allocas(fcx: @fn_ctxt, scope: @block_ctxt,
if !type_is_structural_or_param(fcx_tcx(fcx), arg_ty) { if !type_is_structural_or_param(fcx_tcx(fcx), arg_ty) {
// Overwrite the llargs entry for this arg with its alloca. // Overwrite the llargs entry for this arg with its alloca.
let aval = bcx.fcx.llargs.get(aarg.id); let aval = bcx.fcx.llargs.get(aarg.id);
let addr = do_spill(bcx, aval);
let r = do_spill(bcx, aval, arg_ty);
bcx = r.bcx;
let addr = r.val;
bcx.fcx.llargs.insert(aarg.id, addr); bcx.fcx.llargs.insert(aarg.id, addr);
// Args that are locally assigned to need to do a local // Args that are locally assigned to need to do a local
@ -4969,7 +4999,7 @@ fn copy_args_to_allocas(fcx: @fn_ctxt, scope: @block_ctxt,
} }
arg_n += 1u; arg_n += 1u;
} }
fcx.llcopyargs = bcx.llbb; fcx.llcopyargs = llcopyargs.llbb;
} }
fn is_terminated(cx: &@block_ctxt) -> bool { fn is_terminated(cx: &@block_ctxt) -> bool {

View file

@ -534,9 +534,9 @@ fn trans_alt(cx: &@block_ctxt, expr: &@ast::expr, arms: &[ast::arm],
let exit_map = []; let exit_map = [];
let t = trans::node_id_type(cx.fcx.lcx.ccx, expr.id); let t = trans::node_id_type(cx.fcx.lcx.ccx, expr.id);
let v = trans::spill_if_immediate(er.bcx, er.val, t); let vr = trans::spill_if_immediate(er.bcx, er.val, t);
compile_submatch(er.bcx, match, [v], bind mk_fail(cx, expr.span, fail_cx), compile_submatch(vr.bcx, match, [vr.val],
exit_map); bind mk_fail(cx, expr.span, fail_cx), exit_map);
let i = 0u; let i = 0u;
let arm_results = []; let arm_results = [];

View file

@ -302,9 +302,11 @@ fn add_clean(cx: &@block_ctxt, val: ValueRef, ty: ty::t) {
find_scope_cx(cx).cleanups += [clean(bind drop_ty(_, val, ty))]; find_scope_cx(cx).cleanups += [clean(bind drop_ty(_, val, ty))];
} }
fn add_clean_temp(cx: &@block_ctxt, val: ValueRef, ty: ty::t) { fn add_clean_temp(cx: &@block_ctxt, val: ValueRef, ty: ty::t) {
fn spill_and_drop(bcx: &@block_ctxt, val: ValueRef, ty: ty::t) fn spill_and_drop(cx: &@block_ctxt, val: ValueRef, ty: ty::t)
-> @block_ctxt { -> @block_ctxt {
let spilled = trans::spill_if_immediate(bcx, val, ty); let bcx = cx;
let r = trans::spill_if_immediate(bcx, val, ty);
let spilled = r.val; bcx = r.bcx;
ret drop_ty(bcx, spilled, ty); ret drop_ty(bcx, spilled, ty);
} }
find_scope_cx(cx).cleanups += find_scope_cx(cx).cleanups +=

View file

@ -8,7 +8,7 @@ import trans::{call_memmove, trans_shared_malloc, llsize_of,
alloca, size_of, llderivedtydescs_block_ctxt, alloca, size_of, llderivedtydescs_block_ctxt,
lazily_emit_tydesc_glue, get_tydesc, load_inbounds, lazily_emit_tydesc_glue, get_tydesc, load_inbounds,
move_val_if_temp, trans_lval, node_id_type, move_val_if_temp, trans_lval, node_id_type,
new_sub_block_ctxt, tps_normal, do_spill}; new_sub_block_ctxt, tps_normal, do_spill_noroot};
import trans_build::*; import trans_build::*;
import trans_common::*; import trans_common::*;
@ -159,7 +159,7 @@ fn trans_append(cx: &@block_ctxt, vec_ty: ty::t, lhsptr: ValueRef,
let lhs_off = lfill; let lhs_off = lfill;
if strings { lhs_off = Sub(bcx, lhs_off, C_int(1)); } if strings { lhs_off = Sub(bcx, lhs_off, C_int(1)); }
let write_ptr = pointer_add(bcx, lhs_data, lhs_off); let write_ptr = pointer_add(bcx, lhs_data, lhs_off);
let write_ptr_ptr = do_spill(bcx, write_ptr); let write_ptr_ptr = do_spill_noroot(bcx, write_ptr);
let bcx = iter_vec_raw(bcx, rhs, vec_ty, rfill, { | &bcx, addr, _ty | let bcx = iter_vec_raw(bcx, rhs, vec_ty, rfill, { | &bcx, addr, _ty |
let write_ptr = Load(bcx, write_ptr_ptr); let write_ptr = Load(bcx, write_ptr_ptr);
let bcx = copy_val(bcx, INIT, write_ptr, let bcx = copy_val(bcx, INIT, write_ptr,
@ -186,7 +186,8 @@ fn trans_append_literal(bcx: &@block_ctxt, vptrptr: ValueRef, vec_ty: ty::t,
for val in vals { for val in vals {
let {bcx: e_bcx, val: elt} = trans::trans_expr(bcx, val); let {bcx: e_bcx, val: elt} = trans::trans_expr(bcx, val);
bcx = e_bcx; bcx = e_bcx;
let spilled = trans::spill_if_immediate(bcx, elt, elt_ty); let r = trans::spill_if_immediate(bcx, elt, elt_ty);
let spilled = r.val; bcx = r.bcx;
Call(bcx, bcx_ccx(bcx).upcalls.vec_push, Call(bcx, bcx_ccx(bcx).upcalls.vec_push,
[bcx.fcx.lltaskptr, opaque_v, td, [bcx.fcx.lltaskptr, opaque_v, td,
PointerCast(bcx, spilled, T_ptr(T_i8()))]); PointerCast(bcx, spilled, T_ptr(T_i8()))]);
@ -212,7 +213,8 @@ fn trans_add(bcx: &@block_ctxt, vec_ty: ty::t, lhs: ValueRef,
let new_vec = PointerCast(bcx, new_vec, T_ptr(T_vec(llunitty))); let new_vec = PointerCast(bcx, new_vec, T_ptr(T_vec(llunitty)));
add_clean_temp(bcx, new_vec, vec_ty); add_clean_temp(bcx, new_vec, vec_ty);
let write_ptr_ptr = do_spill(bcx, get_dataptr(bcx, new_vec, llunitty)); let write_ptr_ptr = do_spill_noroot(bcx,
get_dataptr(bcx, new_vec, llunitty));
let copy_fn = bind fn(bcx: &@block_ctxt, addr: ValueRef, _ty: ty::t, let copy_fn = bind fn(bcx: &@block_ctxt, addr: ValueRef, _ty: ty::t,
write_ptr_ptr: ValueRef, unit_ty: ty::t, write_ptr_ptr: ValueRef, unit_ty: ty::t,
llunitsz: ValueRef) -> @block_ctxt { llunitsz: ValueRef) -> @block_ctxt {
@ -249,7 +251,7 @@ fn iter_vec_raw(bcx: &@block_ctxt, vptr: ValueRef, vec_ty: ty::t,
// TODO: Optimize this when the size of the unit type is statically // TODO: Optimize this when the size of the unit type is statically
// known to not use pointer casts, which tend to confuse LLVM. // known to not use pointer casts, which tend to confuse LLVM.
let data_end_ptr = pointer_add(bcx, data_ptr, fill); let data_end_ptr = pointer_add(bcx, data_ptr, fill);
let data_ptr_ptr = do_spill(bcx, data_ptr); let data_ptr_ptr = do_spill_noroot(bcx, data_ptr);
// Now perform the iteration. // Now perform the iteration.
let header_cx = new_sub_block_ctxt(bcx, ~"iter_vec_loop_header"); let header_cx = new_sub_block_ctxt(bcx, ~"iter_vec_loop_header");