auto merge of #15563 : luqmana/rust/nif, r=pcwalton
This commit is contained in:
commit
a1bd5d359b
6 changed files with 528 additions and 544 deletions
|
@ -862,9 +862,12 @@ pub fn trans_external_path(ccx: &CrateContext, did: ast::DefId, t: ty::t) -> Val
|
|||
ty::ty_bare_fn(ref fn_ty) => {
|
||||
match fn_ty.abi.for_target(ccx.sess().targ_cfg.os,
|
||||
ccx.sess().targ_cfg.arch) {
|
||||
Some(Rust) | Some(RustIntrinsic) => {
|
||||
Some(Rust) => {
|
||||
get_extern_rust_fn(ccx, t, name.as_slice(), did)
|
||||
}
|
||||
Some(RustIntrinsic) => {
|
||||
ccx.sess().bug("unexpected intrinsic in trans_external_path")
|
||||
}
|
||||
Some(..) | None => {
|
||||
foreign::register_foreign_item_fn(ccx, fn_ty.abi, t,
|
||||
name.as_slice(), None)
|
||||
|
@ -1781,9 +1784,9 @@ fn register_fn(ccx: &CrateContext,
|
|||
-> ValueRef {
|
||||
match ty::get(node_type).sty {
|
||||
ty::ty_bare_fn(ref f) => {
|
||||
assert!(f.abi == Rust || f.abi == RustIntrinsic);
|
||||
assert!(f.abi == Rust);
|
||||
}
|
||||
_ => fail!("expected bare rust fn or an intrinsic")
|
||||
_ => fail!("expected bare rust fn")
|
||||
};
|
||||
|
||||
let llfn = decl_rust_fn(ccx, node_type, sym.as_slice());
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
use arena::TypedArena;
|
||||
use back::abi;
|
||||
use back::link;
|
||||
use driver::session;
|
||||
use lib::llvm::ValueRef;
|
||||
use lib::llvm::llvm;
|
||||
use metadata::csearch;
|
||||
|
@ -40,6 +39,7 @@ use middle::trans::expr;
|
|||
use middle::trans::glue;
|
||||
use middle::trans::inline;
|
||||
use middle::trans::foreign;
|
||||
use middle::trans::intrinsic;
|
||||
use middle::trans::meth;
|
||||
use middle::trans::monomorphize;
|
||||
use middle::trans::type_::Type;
|
||||
|
@ -53,7 +53,6 @@ use util::ppaux::Repr;
|
|||
use std::gc::Gc;
|
||||
use syntax::ast;
|
||||
use synabi = syntax::abi;
|
||||
use syntax::ast_map;
|
||||
|
||||
pub struct MethodData {
|
||||
pub llfn: ValueRef,
|
||||
|
@ -68,6 +67,8 @@ pub enum CalleeData {
|
|||
// value (which is a pair).
|
||||
Fn(/* llfn */ ValueRef),
|
||||
|
||||
Intrinsic(ast::NodeId, subst::Substs),
|
||||
|
||||
TraitMethod(MethodData)
|
||||
}
|
||||
|
||||
|
@ -119,7 +120,21 @@ fn trans<'a>(bcx: &'a Block<'a>, expr: &ast::Expr) -> Callee<'a> {
|
|||
|
||||
fn trans_def<'a>(bcx: &'a Block<'a>, def: def::Def, ref_expr: &ast::Expr)
|
||||
-> Callee<'a> {
|
||||
debug!("trans_def(def={}, ref_expr={})", def.repr(bcx.tcx()), ref_expr.repr(bcx.tcx()));
|
||||
let expr_ty = node_id_type(bcx, ref_expr.id);
|
||||
match def {
|
||||
def::DefFn(did, _) if match ty::get(expr_ty).sty {
|
||||
ty::ty_bare_fn(ref f) => f.abi == synabi::RustIntrinsic,
|
||||
_ => false
|
||||
} => {
|
||||
let substs = node_id_substs(bcx, ExprId(ref_expr.id));
|
||||
let def_id = if did.krate != ast::LOCAL_CRATE {
|
||||
inline::maybe_instantiate_inline(bcx.ccx(), did)
|
||||
} else {
|
||||
did
|
||||
};
|
||||
Callee { bcx: bcx, data: Intrinsic(def_id.node, substs) }
|
||||
}
|
||||
def::DefFn(did, _) |
|
||||
def::DefStaticMethod(did, def::FromImpl(_), _) => {
|
||||
fn_callee(bcx, trans_fn_ref(bcx, did, ExprId(ref_expr.id)))
|
||||
|
@ -460,27 +475,8 @@ pub fn trans_fn_ref_with_vtables(
|
|||
}
|
||||
};
|
||||
|
||||
// We must monomorphise if the fn has type parameters, is a rust
|
||||
// intrinsic, or is a default method. In particular, if we see an
|
||||
// intrinsic that is inlined from a different crate, we want to reemit the
|
||||
// intrinsic instead of trying to call it in the other crate.
|
||||
let must_monomorphise = if !substs.types.is_empty() || is_default {
|
||||
true
|
||||
} else if def_id.krate == ast::LOCAL_CRATE {
|
||||
let map_node = session::expect(
|
||||
ccx.sess(),
|
||||
tcx.map.find(def_id.node),
|
||||
|| "local item should be in ast map".to_string());
|
||||
|
||||
match map_node {
|
||||
ast_map::NodeForeignItem(_) => {
|
||||
tcx.map.get_foreign_abi(def_id.node) == synabi::RustIntrinsic
|
||||
}
|
||||
_ => false
|
||||
}
|
||||
} else {
|
||||
false
|
||||
};
|
||||
// We must monomorphise if the fn has type parameters or is a default method.
|
||||
let must_monomorphise = !substs.types.is_empty() || is_default;
|
||||
|
||||
// Create a monomorphic version of generic functions
|
||||
if must_monomorphise {
|
||||
|
@ -662,6 +658,12 @@ pub fn trans_call_inner<'a>(
|
|||
let callee = get_callee(bcx, cleanup::CustomScope(arg_cleanup_scope));
|
||||
let mut bcx = callee.bcx;
|
||||
|
||||
let (abi, ret_ty) = match ty::get(callee_ty).sty {
|
||||
ty::ty_bare_fn(ref f) => (f.abi, f.sig.output),
|
||||
ty::ty_closure(ref f) => (synabi::Rust, f.sig.output),
|
||||
_ => fail!("expected bare rust fn or closure in trans_call_inner")
|
||||
};
|
||||
|
||||
let (llfn, llenv, llself) = match callee.data {
|
||||
Fn(llfn) => {
|
||||
(llfn, None, None)
|
||||
|
@ -679,14 +681,19 @@ pub fn trans_call_inner<'a>(
|
|||
let llenv = Load(bcx, llenv);
|
||||
(llfn, Some(llenv), None)
|
||||
}
|
||||
Intrinsic(node, substs) => {
|
||||
assert!(abi == synabi::RustIntrinsic);
|
||||
assert!(dest.is_some());
|
||||
|
||||
return intrinsic::trans_intrinsic_call(bcx, node, callee_ty,
|
||||
arg_cleanup_scope, args,
|
||||
dest.unwrap(), substs);
|
||||
}
|
||||
};
|
||||
|
||||
let (abi, ret_ty) = match ty::get(callee_ty).sty {
|
||||
ty::ty_bare_fn(ref f) => (f.abi, f.sig.output),
|
||||
ty::ty_closure(ref f) => (synabi::Rust, f.sig.output),
|
||||
_ => fail!("expected bare rust fn or closure in trans_call_inner")
|
||||
};
|
||||
let is_rust_fn = abi == synabi::Rust || abi == synabi::RustIntrinsic;
|
||||
// Intrinsics should not become actual functions.
|
||||
// We trans them in place in `trans_intrinsic_call`
|
||||
assert!(abi != synabi::RustIntrinsic);
|
||||
|
||||
// Generate a location to store the result. If the user does
|
||||
// not care about the result, just make a stack slot.
|
||||
|
@ -716,7 +723,7 @@ pub fn trans_call_inner<'a>(
|
|||
// and done, either the return value of the function will have been
|
||||
// written in opt_llretslot (if it is Some) or `llresult` will be
|
||||
// set appropriately (otherwise).
|
||||
if is_rust_fn {
|
||||
if abi == synabi::Rust {
|
||||
let mut llargs = Vec::new();
|
||||
|
||||
// Push the out-pointer if we use an out-pointer for this
|
||||
|
@ -816,13 +823,13 @@ pub enum CallArgs<'a> {
|
|||
ArgOverloadedOp(Datum<Expr>, Option<(Datum<Expr>, ast::NodeId)>),
|
||||
}
|
||||
|
||||
fn trans_args<'a>(cx: &'a Block<'a>,
|
||||
args: CallArgs,
|
||||
fn_ty: ty::t,
|
||||
llargs: &mut Vec<ValueRef> ,
|
||||
arg_cleanup_scope: cleanup::ScopeId,
|
||||
ignore_self: bool)
|
||||
-> &'a Block<'a> {
|
||||
pub fn trans_args<'a>(cx: &'a Block<'a>,
|
||||
args: CallArgs,
|
||||
fn_ty: ty::t,
|
||||
llargs: &mut Vec<ValueRef> ,
|
||||
arg_cleanup_scope: cleanup::ScopeId,
|
||||
ignore_self: bool)
|
||||
-> &'a Block<'a> {
|
||||
let _icx = push_ctxt("trans_args");
|
||||
let arg_tys = ty::ty_fn_args(fn_ty);
|
||||
let variadic = ty::fn_is_variadic(fn_ty);
|
||||
|
|
|
@ -76,7 +76,7 @@ pub fn llvm_calling_convention(ccx: &CrateContext,
|
|||
abi.for_target(os, arch).map(|abi| {
|
||||
match abi {
|
||||
RustIntrinsic => {
|
||||
// Intrinsics are emitted by monomorphic fn
|
||||
// Intrinsics are emitted at the call site
|
||||
ccx.sess().bug("asked to register intrinsic fn");
|
||||
}
|
||||
|
||||
|
|
|
@ -10,15 +10,18 @@
|
|||
|
||||
#![allow(non_uppercase_pattern_statics)]
|
||||
|
||||
use arena::TypedArena;
|
||||
use lib::llvm::{SequentiallyConsistent, Acquire, Release, Xchg};
|
||||
use lib::llvm::{ValueRef, Pointer, Array, Struct};
|
||||
use lib::llvm::{SequentiallyConsistent, Acquire, Release, Xchg, ValueRef};
|
||||
use lib;
|
||||
use middle::subst;
|
||||
use middle::subst::FnSpace;
|
||||
use middle::trans::base::*;
|
||||
use middle::trans::build::*;
|
||||
use middle::trans::callee;
|
||||
use middle::trans::cleanup;
|
||||
use middle::trans::cleanup::CleanupMethods;
|
||||
use middle::trans::common::*;
|
||||
use middle::trans::datum::*;
|
||||
use middle::trans::expr;
|
||||
use middle::trans::glue;
|
||||
use middle::trans::type_of::*;
|
||||
use middle::trans::type_of;
|
||||
|
@ -27,7 +30,6 @@ use middle::trans::machine::llsize_of;
|
|||
use middle::trans::type_::Type;
|
||||
use middle::ty;
|
||||
use syntax::ast;
|
||||
use syntax::ast_map;
|
||||
use syntax::parse::token;
|
||||
use util::ppaux::ty_to_string;
|
||||
|
||||
|
@ -83,492 +85,6 @@ pub fn get_simple_intrinsic(ccx: &CrateContext, item: &ast::ForeignItem) -> Opti
|
|||
Some(ccx.get_intrinsic(&name))
|
||||
}
|
||||
|
||||
pub fn trans_intrinsic(ccx: &CrateContext,
|
||||
decl: ValueRef,
|
||||
item: &ast::ForeignItem,
|
||||
substs: ¶m_substs,
|
||||
ref_id: Option<ast::NodeId>) {
|
||||
debug!("trans_intrinsic(item.ident={})", token::get_ident(item.ident));
|
||||
|
||||
fn with_overflow_instrinsic(bcx: &Block, name: &'static str, t: ty::t) {
|
||||
let first_real_arg = bcx.fcx.arg_pos(0u);
|
||||
let a = get_param(bcx.fcx.llfn, first_real_arg);
|
||||
let b = get_param(bcx.fcx.llfn, first_real_arg + 1);
|
||||
let llfn = bcx.ccx().get_intrinsic(&name);
|
||||
|
||||
// convert `i1` to a `bool`, and write to the out parameter
|
||||
let val = Call(bcx, llfn, [a, b], []);
|
||||
let result = ExtractValue(bcx, val, 0);
|
||||
let overflow = ZExt(bcx, ExtractValue(bcx, val, 1), Type::bool(bcx.ccx()));
|
||||
let ret = C_undef(type_of::type_of(bcx.ccx(), t));
|
||||
let ret = InsertValue(bcx, ret, result, 0);
|
||||
let ret = InsertValue(bcx, ret, overflow, 1);
|
||||
|
||||
if type_is_immediate(bcx.ccx(), t) {
|
||||
Ret(bcx, ret);
|
||||
} else {
|
||||
let retptr = get_param(bcx.fcx.llfn, bcx.fcx.out_arg_pos());
|
||||
Store(bcx, ret, retptr);
|
||||
RetVoid(bcx);
|
||||
}
|
||||
}
|
||||
|
||||
fn volatile_load_intrinsic(bcx: &Block) {
|
||||
let first_real_arg = bcx.fcx.arg_pos(0u);
|
||||
let src = get_param(bcx.fcx.llfn, first_real_arg);
|
||||
|
||||
let val = VolatileLoad(bcx, src);
|
||||
Ret(bcx, val);
|
||||
}
|
||||
|
||||
fn volatile_store_intrinsic(bcx: &Block) {
|
||||
let first_real_arg = bcx.fcx.arg_pos(0u);
|
||||
let dst = get_param(bcx.fcx.llfn, first_real_arg);
|
||||
let val = get_param(bcx.fcx.llfn, first_real_arg + 1);
|
||||
|
||||
VolatileStore(bcx, val, dst);
|
||||
RetVoid(bcx);
|
||||
}
|
||||
|
||||
fn copy_intrinsic(bcx: &Block, allow_overlap: bool, volatile: bool, tp_ty: ty::t) {
|
||||
let ccx = bcx.ccx();
|
||||
let lltp_ty = type_of::type_of(ccx, tp_ty);
|
||||
let align = C_i32(ccx, machine::llalign_of_min(ccx, lltp_ty) as i32);
|
||||
let size = machine::llsize_of(ccx, lltp_ty);
|
||||
let int_size = machine::llbitsize_of_real(ccx, ccx.int_type);
|
||||
let name = if allow_overlap {
|
||||
if int_size == 32 {
|
||||
"llvm.memmove.p0i8.p0i8.i32"
|
||||
} else {
|
||||
"llvm.memmove.p0i8.p0i8.i64"
|
||||
}
|
||||
} else {
|
||||
if int_size == 32 {
|
||||
"llvm.memcpy.p0i8.p0i8.i32"
|
||||
} else {
|
||||
"llvm.memcpy.p0i8.p0i8.i64"
|
||||
}
|
||||
};
|
||||
|
||||
let decl = bcx.fcx.llfn;
|
||||
let first_real_arg = bcx.fcx.arg_pos(0u);
|
||||
let dst_ptr = PointerCast(bcx, get_param(decl, first_real_arg), Type::i8p(ccx));
|
||||
let src_ptr = PointerCast(bcx, get_param(decl, first_real_arg + 1), Type::i8p(ccx));
|
||||
let count = get_param(decl, first_real_arg + 2);
|
||||
let llfn = ccx.get_intrinsic(&name);
|
||||
Call(bcx, llfn,
|
||||
[dst_ptr, src_ptr, Mul(bcx, size, count), align, C_bool(ccx, volatile)], []);
|
||||
RetVoid(bcx);
|
||||
}
|
||||
|
||||
fn memset_intrinsic(bcx: &Block, volatile: bool, tp_ty: ty::t) {
|
||||
let ccx = bcx.ccx();
|
||||
let lltp_ty = type_of::type_of(ccx, tp_ty);
|
||||
let align = C_i32(ccx, machine::llalign_of_min(ccx, lltp_ty) as i32);
|
||||
let size = machine::llsize_of(ccx, lltp_ty);
|
||||
let name = if machine::llbitsize_of_real(ccx, ccx.int_type) == 32 {
|
||||
"llvm.memset.p0i8.i32"
|
||||
} else {
|
||||
"llvm.memset.p0i8.i64"
|
||||
};
|
||||
|
||||
let decl = bcx.fcx.llfn;
|
||||
let first_real_arg = bcx.fcx.arg_pos(0u);
|
||||
let dst_ptr = PointerCast(bcx, get_param(decl, first_real_arg), Type::i8p(ccx));
|
||||
let val = get_param(decl, first_real_arg + 1);
|
||||
let count = get_param(decl, first_real_arg + 2);
|
||||
let llfn = ccx.get_intrinsic(&name);
|
||||
Call(bcx, llfn, [dst_ptr, val, Mul(bcx, size, count), align, C_bool(ccx, volatile)], []);
|
||||
RetVoid(bcx);
|
||||
}
|
||||
|
||||
fn count_zeros_intrinsic(bcx: &Block, name: &'static str) {
|
||||
let x = get_param(bcx.fcx.llfn, bcx.fcx.arg_pos(0u));
|
||||
let y = C_bool(bcx.ccx(), false);
|
||||
let llfn = bcx.ccx().get_intrinsic(&name);
|
||||
let llcall = Call(bcx, llfn, [x, y], []);
|
||||
Ret(bcx, llcall);
|
||||
}
|
||||
|
||||
let output_type = ty::ty_fn_ret(ty::node_id_to_type(ccx.tcx(), item.id));
|
||||
|
||||
let arena = TypedArena::new();
|
||||
let fcx = new_fn_ctxt(ccx, decl, item.id, false, output_type,
|
||||
substs, Some(item.span), &arena);
|
||||
let mut bcx = init_function(&fcx, true, output_type);
|
||||
|
||||
set_always_inline(fcx.llfn);
|
||||
|
||||
let first_real_arg = fcx.arg_pos(0u);
|
||||
|
||||
let name = token::get_ident(item.ident);
|
||||
|
||||
// This requires that atomic intrinsics follow a specific naming pattern:
|
||||
// "atomic_<operation>[_<ordering>], and no ordering means SeqCst
|
||||
if name.get().starts_with("atomic_") {
|
||||
let split: Vec<&str> = name.get().split('_').collect();
|
||||
assert!(split.len() >= 2, "Atomic intrinsic not correct format");
|
||||
let order = if split.len() == 2 {
|
||||
lib::llvm::SequentiallyConsistent
|
||||
} else {
|
||||
match *split.get(2) {
|
||||
"relaxed" => lib::llvm::Monotonic,
|
||||
"acq" => lib::llvm::Acquire,
|
||||
"rel" => lib::llvm::Release,
|
||||
"acqrel" => lib::llvm::AcquireRelease,
|
||||
_ => ccx.sess().fatal("unknown ordering in atomic intrinsic")
|
||||
}
|
||||
};
|
||||
|
||||
match *split.get(1) {
|
||||
"cxchg" => {
|
||||
// See include/llvm/IR/Instructions.h for their implementation
|
||||
// of this, I assume that it's good enough for us to use for
|
||||
// now.
|
||||
let strongest_failure_ordering = match order {
|
||||
lib::llvm::NotAtomic | lib::llvm::Unordered =>
|
||||
ccx.sess().fatal("cmpxchg must be atomic"),
|
||||
lib::llvm::Monotonic | lib::llvm::Release =>
|
||||
lib::llvm::Monotonic,
|
||||
lib::llvm::Acquire | lib::llvm::AcquireRelease =>
|
||||
lib::llvm::Acquire,
|
||||
lib::llvm::SequentiallyConsistent =>
|
||||
lib::llvm::SequentiallyConsistent,
|
||||
};
|
||||
let res = AtomicCmpXchg(bcx, get_param(decl, first_real_arg),
|
||||
get_param(decl, first_real_arg + 1u),
|
||||
get_param(decl, first_real_arg + 2u),
|
||||
order, strongest_failure_ordering);
|
||||
if unsafe { lib::llvm::llvm::LLVMVersionMinor() >= 5 } {
|
||||
Ret(bcx, ExtractValue(bcx, res, 0));
|
||||
} else {
|
||||
Ret(bcx, res);
|
||||
}
|
||||
}
|
||||
"load" => {
|
||||
let old = AtomicLoad(bcx, get_param(decl, first_real_arg),
|
||||
order);
|
||||
Ret(bcx, old);
|
||||
}
|
||||
"store" => {
|
||||
AtomicStore(bcx, get_param(decl, first_real_arg + 1u),
|
||||
get_param(decl, first_real_arg),
|
||||
order);
|
||||
RetVoid(bcx);
|
||||
}
|
||||
"fence" => {
|
||||
AtomicFence(bcx, order);
|
||||
RetVoid(bcx);
|
||||
}
|
||||
op => {
|
||||
// These are all AtomicRMW ops
|
||||
let atom_op = match op {
|
||||
"xchg" => lib::llvm::Xchg,
|
||||
"xadd" => lib::llvm::Add,
|
||||
"xsub" => lib::llvm::Sub,
|
||||
"and" => lib::llvm::And,
|
||||
"nand" => lib::llvm::Nand,
|
||||
"or" => lib::llvm::Or,
|
||||
"xor" => lib::llvm::Xor,
|
||||
"max" => lib::llvm::Max,
|
||||
"min" => lib::llvm::Min,
|
||||
"umax" => lib::llvm::UMax,
|
||||
"umin" => lib::llvm::UMin,
|
||||
_ => ccx.sess().fatal("unknown atomic operation")
|
||||
};
|
||||
|
||||
let old = AtomicRMW(bcx, atom_op, get_param(decl, first_real_arg),
|
||||
get_param(decl, first_real_arg + 1u),
|
||||
order);
|
||||
Ret(bcx, old);
|
||||
}
|
||||
}
|
||||
|
||||
fcx.cleanup();
|
||||
return;
|
||||
}
|
||||
|
||||
match name.get() {
|
||||
"abort" => {
|
||||
let llfn = bcx.ccx().get_intrinsic(&("llvm.trap"));
|
||||
Call(bcx, llfn, [], []);
|
||||
Unreachable(bcx);
|
||||
}
|
||||
"breakpoint" => {
|
||||
let llfn = bcx.ccx().get_intrinsic(&("llvm.debugtrap"));
|
||||
Call(bcx, llfn, [], []);
|
||||
RetVoid(bcx);
|
||||
}
|
||||
"size_of" => {
|
||||
let tp_ty = *substs.substs.types.get(FnSpace, 0);
|
||||
let lltp_ty = type_of::type_of(ccx, tp_ty);
|
||||
Ret(bcx, C_uint(ccx, machine::llsize_of_real(ccx, lltp_ty) as uint));
|
||||
}
|
||||
"move_val_init" => {
|
||||
// Create a datum reflecting the value being moved.
|
||||
// Use `appropriate_mode` so that the datum is by ref
|
||||
// if the value is non-immediate. Note that, with
|
||||
// intrinsics, there are no argument cleanups to
|
||||
// concern ourselves with, so we can use an rvalue datum.
|
||||
let tp_ty = *substs.substs.types.get(FnSpace, 0);
|
||||
let mode = appropriate_rvalue_mode(ccx, tp_ty);
|
||||
let src = Datum {val: get_param(decl, first_real_arg + 1u),
|
||||
ty: tp_ty,
|
||||
kind: Rvalue::new(mode)};
|
||||
bcx = src.store_to(bcx, get_param(decl, first_real_arg));
|
||||
RetVoid(bcx);
|
||||
}
|
||||
"min_align_of" => {
|
||||
let tp_ty = *substs.substs.types.get(FnSpace, 0);
|
||||
let lltp_ty = type_of::type_of(ccx, tp_ty);
|
||||
Ret(bcx, C_uint(ccx, machine::llalign_of_min(ccx, lltp_ty) as uint));
|
||||
}
|
||||
"pref_align_of"=> {
|
||||
let tp_ty = *substs.substs.types.get(FnSpace, 0);
|
||||
let lltp_ty = type_of::type_of(ccx, tp_ty);
|
||||
Ret(bcx, C_uint(ccx, machine::llalign_of_pref(ccx, lltp_ty) as uint));
|
||||
}
|
||||
"get_tydesc" => {
|
||||
let tp_ty = *substs.substs.types.get(FnSpace, 0);
|
||||
let static_ti = get_tydesc(ccx, tp_ty);
|
||||
glue::lazily_emit_visit_glue(ccx, &*static_ti);
|
||||
|
||||
// FIXME (#3730): ideally this shouldn't need a cast,
|
||||
// but there's a circularity between translating rust types to llvm
|
||||
// types and having a tydesc type available. So I can't directly access
|
||||
// the llvm type of intrinsic::TyDesc struct.
|
||||
let userland_tydesc_ty = type_of::type_of(ccx, output_type);
|
||||
let td = PointerCast(bcx, static_ti.tydesc, userland_tydesc_ty);
|
||||
Ret(bcx, td);
|
||||
}
|
||||
"type_id" => {
|
||||
let hash = ty::hash_crate_independent(
|
||||
ccx.tcx(),
|
||||
*substs.substs.types.get(FnSpace, 0),
|
||||
&ccx.link_meta.crate_hash);
|
||||
// NB: This needs to be kept in lockstep with the TypeId struct in
|
||||
// libstd/unstable/intrinsics.rs
|
||||
let val = C_named_struct(type_of::type_of(ccx, output_type),
|
||||
[C_u64(ccx, hash)]);
|
||||
match bcx.fcx.llretptr.get() {
|
||||
Some(ptr) => {
|
||||
Store(bcx, val, ptr);
|
||||
RetVoid(bcx);
|
||||
},
|
||||
None => Ret(bcx, val)
|
||||
}
|
||||
}
|
||||
"init" => {
|
||||
let tp_ty = *substs.substs.types.get(FnSpace, 0);
|
||||
let lltp_ty = type_of::type_of(ccx, tp_ty);
|
||||
match bcx.fcx.llretptr.get() {
|
||||
Some(ptr) => { Store(bcx, C_null(lltp_ty), ptr); RetVoid(bcx); }
|
||||
None if ty::type_is_nil(tp_ty) => RetVoid(bcx),
|
||||
None => Ret(bcx, C_null(lltp_ty)),
|
||||
}
|
||||
}
|
||||
"uninit" => {
|
||||
// Do nothing, this is effectively a no-op
|
||||
let retty = *substs.substs.types.get(FnSpace, 0);
|
||||
if type_is_immediate(ccx, retty) && !return_type_is_void(ccx, retty) {
|
||||
unsafe {
|
||||
Ret(bcx, lib::llvm::llvm::LLVMGetUndef(arg_type_of(ccx, retty).to_ref()));
|
||||
}
|
||||
} else {
|
||||
RetVoid(bcx)
|
||||
}
|
||||
}
|
||||
"forget" => {
|
||||
RetVoid(bcx);
|
||||
}
|
||||
"transmute" => {
|
||||
let (in_type, out_type) = (*substs.substs.types.get(FnSpace, 0),
|
||||
*substs.substs.types.get(FnSpace, 1));
|
||||
let llintype = type_of::type_of(ccx, in_type);
|
||||
let llouttype = type_of::type_of(ccx, out_type);
|
||||
|
||||
let in_type_size = machine::llbitsize_of_real(ccx, llintype);
|
||||
let out_type_size = machine::llbitsize_of_real(ccx, llouttype);
|
||||
if in_type_size != out_type_size {
|
||||
let sp = match ccx.tcx.map.get(ref_id.unwrap()) {
|
||||
ast_map::NodeExpr(e) => e.span,
|
||||
_ => fail!("transmute has non-expr arg"),
|
||||
};
|
||||
ccx.sess().span_bug(sp,
|
||||
format!("transmute called on types with different sizes: \
|
||||
{} ({} bit{}) to \
|
||||
{} ({} bit{})",
|
||||
ty_to_string(ccx.tcx(), in_type),
|
||||
in_type_size,
|
||||
if in_type_size == 1 {""} else {"s"},
|
||||
ty_to_string(ccx.tcx(), out_type),
|
||||
out_type_size,
|
||||
if out_type_size == 1 {""} else {"s"}).as_slice());
|
||||
}
|
||||
|
||||
if !return_type_is_void(ccx, out_type) {
|
||||
let llsrcval = get_param(decl, first_real_arg);
|
||||
if type_is_immediate(ccx, in_type) {
|
||||
match fcx.llretptr.get() {
|
||||
Some(llretptr) => {
|
||||
Store(bcx, llsrcval, PointerCast(bcx, llretptr, llintype.ptr_to()));
|
||||
RetVoid(bcx);
|
||||
}
|
||||
None => match (llintype.kind(), llouttype.kind()) {
|
||||
(Pointer, other) | (other, Pointer) if other != Pointer => {
|
||||
let tmp = Alloca(bcx, llouttype, "");
|
||||
Store(bcx, llsrcval, PointerCast(bcx, tmp, llintype.ptr_to()));
|
||||
Ret(bcx, Load(bcx, tmp));
|
||||
}
|
||||
(Array, _) | (_, Array) | (Struct, _) | (_, Struct) => {
|
||||
let tmp = Alloca(bcx, llouttype, "");
|
||||
Store(bcx, llsrcval, PointerCast(bcx, tmp, llintype.ptr_to()));
|
||||
Ret(bcx, Load(bcx, tmp));
|
||||
}
|
||||
_ => {
|
||||
let llbitcast = BitCast(bcx, llsrcval, llouttype);
|
||||
Ret(bcx, llbitcast)
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if type_is_immediate(ccx, out_type) {
|
||||
let llsrcptr = PointerCast(bcx, llsrcval, llouttype.ptr_to());
|
||||
let ll_load = Load(bcx, llsrcptr);
|
||||
Ret(bcx, ll_load);
|
||||
} else {
|
||||
// NB: Do not use a Load and Store here. This causes massive
|
||||
// code bloat when `transmute` is used on large structural
|
||||
// types.
|
||||
let lldestptr = fcx.llretptr.get().unwrap();
|
||||
let lldestptr = PointerCast(bcx, lldestptr, Type::i8p(ccx));
|
||||
let llsrcptr = PointerCast(bcx, llsrcval, Type::i8p(ccx));
|
||||
|
||||
let llsize = llsize_of(ccx, llintype);
|
||||
call_memcpy(bcx, lldestptr, llsrcptr, llsize, 1);
|
||||
RetVoid(bcx);
|
||||
};
|
||||
} else {
|
||||
RetVoid(bcx);
|
||||
}
|
||||
}
|
||||
"needs_drop" => {
|
||||
let tp_ty = *substs.substs.types.get(FnSpace, 0);
|
||||
Ret(bcx, C_bool(ccx, ty::type_needs_drop(ccx.tcx(), tp_ty)));
|
||||
}
|
||||
"owns_managed" => {
|
||||
let tp_ty = *substs.substs.types.get(FnSpace, 0);
|
||||
Ret(bcx, C_bool(ccx, ty::type_contents(ccx.tcx(), tp_ty).owns_managed()));
|
||||
}
|
||||
"visit_tydesc" => {
|
||||
let td = get_param(decl, first_real_arg);
|
||||
let visitor = get_param(decl, first_real_arg + 1u);
|
||||
let td = PointerCast(bcx, td, ccx.tydesc_type().ptr_to());
|
||||
glue::call_visit_glue(bcx, visitor, td, None);
|
||||
RetVoid(bcx);
|
||||
}
|
||||
"offset" => {
|
||||
let ptr = get_param(decl, first_real_arg);
|
||||
let offset = get_param(decl, first_real_arg + 1);
|
||||
let lladdr = InBoundsGEP(bcx, ptr, [offset]);
|
||||
Ret(bcx, lladdr);
|
||||
}
|
||||
"copy_nonoverlapping_memory" => {
|
||||
copy_intrinsic(bcx, false, false, *substs.substs.types.get(FnSpace, 0))
|
||||
}
|
||||
"copy_memory" => {
|
||||
copy_intrinsic(bcx, true, false, *substs.substs.types.get(FnSpace, 0))
|
||||
}
|
||||
"set_memory" => {
|
||||
memset_intrinsic(bcx, false, *substs.substs.types.get(FnSpace, 0))
|
||||
}
|
||||
|
||||
"volatile_copy_nonoverlapping_memory" => {
|
||||
copy_intrinsic(bcx, false, true, *substs.substs.types.get(FnSpace, 0))
|
||||
}
|
||||
|
||||
"volatile_copy_memory" => {
|
||||
copy_intrinsic(bcx, true, true, *substs.substs.types.get(FnSpace, 0))
|
||||
}
|
||||
|
||||
"volatile_set_memory" => {
|
||||
memset_intrinsic(bcx, true, *substs.substs.types.get(FnSpace, 0))
|
||||
}
|
||||
|
||||
"ctlz8" => count_zeros_intrinsic(bcx, "llvm.ctlz.i8"),
|
||||
"ctlz16" => count_zeros_intrinsic(bcx, "llvm.ctlz.i16"),
|
||||
"ctlz32" => count_zeros_intrinsic(bcx, "llvm.ctlz.i32"),
|
||||
"ctlz64" => count_zeros_intrinsic(bcx, "llvm.ctlz.i64"),
|
||||
"cttz8" => count_zeros_intrinsic(bcx, "llvm.cttz.i8"),
|
||||
"cttz16" => count_zeros_intrinsic(bcx, "llvm.cttz.i16"),
|
||||
"cttz32" => count_zeros_intrinsic(bcx, "llvm.cttz.i32"),
|
||||
"cttz64" => count_zeros_intrinsic(bcx, "llvm.cttz.i64"),
|
||||
|
||||
"volatile_load" => volatile_load_intrinsic(bcx),
|
||||
"volatile_store" => volatile_store_intrinsic(bcx),
|
||||
|
||||
"i8_add_with_overflow" =>
|
||||
with_overflow_instrinsic(bcx, "llvm.sadd.with.overflow.i8", output_type),
|
||||
"i16_add_with_overflow" =>
|
||||
with_overflow_instrinsic(bcx, "llvm.sadd.with.overflow.i16", output_type),
|
||||
"i32_add_with_overflow" =>
|
||||
with_overflow_instrinsic(bcx, "llvm.sadd.with.overflow.i32", output_type),
|
||||
"i64_add_with_overflow" =>
|
||||
with_overflow_instrinsic(bcx, "llvm.sadd.with.overflow.i64", output_type),
|
||||
|
||||
"u8_add_with_overflow" =>
|
||||
with_overflow_instrinsic(bcx, "llvm.uadd.with.overflow.i8", output_type),
|
||||
"u16_add_with_overflow" =>
|
||||
with_overflow_instrinsic(bcx, "llvm.uadd.with.overflow.i16", output_type),
|
||||
"u32_add_with_overflow" =>
|
||||
with_overflow_instrinsic(bcx, "llvm.uadd.with.overflow.i32", output_type),
|
||||
"u64_add_with_overflow" =>
|
||||
with_overflow_instrinsic(bcx, "llvm.uadd.with.overflow.i64", output_type),
|
||||
|
||||
"i8_sub_with_overflow" =>
|
||||
with_overflow_instrinsic(bcx, "llvm.ssub.with.overflow.i8", output_type),
|
||||
"i16_sub_with_overflow" =>
|
||||
with_overflow_instrinsic(bcx, "llvm.ssub.with.overflow.i16", output_type),
|
||||
"i32_sub_with_overflow" =>
|
||||
with_overflow_instrinsic(bcx, "llvm.ssub.with.overflow.i32", output_type),
|
||||
"i64_sub_with_overflow" =>
|
||||
with_overflow_instrinsic(bcx, "llvm.ssub.with.overflow.i64", output_type),
|
||||
|
||||
"u8_sub_with_overflow" =>
|
||||
with_overflow_instrinsic(bcx, "llvm.usub.with.overflow.i8", output_type),
|
||||
"u16_sub_with_overflow" =>
|
||||
with_overflow_instrinsic(bcx, "llvm.usub.with.overflow.i16", output_type),
|
||||
"u32_sub_with_overflow" =>
|
||||
with_overflow_instrinsic(bcx, "llvm.usub.with.overflow.i32", output_type),
|
||||
"u64_sub_with_overflow" =>
|
||||
with_overflow_instrinsic(bcx, "llvm.usub.with.overflow.i64", output_type),
|
||||
|
||||
"i8_mul_with_overflow" =>
|
||||
with_overflow_instrinsic(bcx, "llvm.smul.with.overflow.i8", output_type),
|
||||
"i16_mul_with_overflow" =>
|
||||
with_overflow_instrinsic(bcx, "llvm.smul.with.overflow.i16", output_type),
|
||||
"i32_mul_with_overflow" =>
|
||||
with_overflow_instrinsic(bcx, "llvm.smul.with.overflow.i32", output_type),
|
||||
"i64_mul_with_overflow" =>
|
||||
with_overflow_instrinsic(bcx, "llvm.smul.with.overflow.i64", output_type),
|
||||
|
||||
"u8_mul_with_overflow" =>
|
||||
with_overflow_instrinsic(bcx, "llvm.umul.with.overflow.i8", output_type),
|
||||
"u16_mul_with_overflow" =>
|
||||
with_overflow_instrinsic(bcx, "llvm.umul.with.overflow.i16", output_type),
|
||||
"u32_mul_with_overflow" =>
|
||||
with_overflow_instrinsic(bcx, "llvm.umul.with.overflow.i32", output_type),
|
||||
"u64_mul_with_overflow" =>
|
||||
with_overflow_instrinsic(bcx, "llvm.umul.with.overflow.i64", output_type),
|
||||
|
||||
_ => {
|
||||
// Could we make this an enum rather than a string? does it get
|
||||
// checked earlier?
|
||||
ccx.sess().span_bug(item.span, "unknown intrinsic");
|
||||
}
|
||||
}
|
||||
fcx.cleanup();
|
||||
}
|
||||
|
||||
/// Performs late verification that intrinsics are used correctly. At present,
|
||||
/// the only intrinsic that needs such verification is `transmute`.
|
||||
pub fn check_intrinsics(ccx: &CrateContext) {
|
||||
|
@ -606,3 +122,470 @@ pub fn check_intrinsics(ccx: &CrateContext) {
|
|||
ccx.sess().abort_if_errors();
|
||||
}
|
||||
|
||||
pub fn trans_intrinsic_call<'a>(mut bcx: &'a Block<'a>, node: ast::NodeId,
|
||||
callee_ty: ty::t, cleanup_scope: cleanup::CustomScopeIndex,
|
||||
args: callee::CallArgs, dest: expr::Dest,
|
||||
substs: subst::Substs) -> Result<'a> {
|
||||
|
||||
let fcx = bcx.fcx;
|
||||
let ccx = fcx.ccx;
|
||||
let tcx = bcx.tcx();
|
||||
|
||||
let ret_ty = match ty::get(callee_ty).sty {
|
||||
ty::ty_bare_fn(ref f) => f.sig.output,
|
||||
_ => fail!("expected bare_fn in trans_intrinsic_call")
|
||||
};
|
||||
let llret_ty = type_of::type_of(ccx, ret_ty);
|
||||
let foreign_item = tcx.map.expect_foreign_item(node);
|
||||
let name = token::get_ident(foreign_item.ident);
|
||||
|
||||
// For `transmute` we can just trans the input expr directly into dest
|
||||
if name.get() == "transmute" {
|
||||
match args {
|
||||
callee::ArgExprs(arg_exprs) => {
|
||||
assert_eq!(arg_exprs.len(), 1);
|
||||
|
||||
let (in_type, out_type) = (*substs.types.get(FnSpace, 0),
|
||||
*substs.types.get(FnSpace, 1));
|
||||
let llintype = type_of::type_of(ccx, in_type);
|
||||
let llouttype = type_of::type_of(ccx, out_type);
|
||||
|
||||
let in_type_size = machine::llbitsize_of_real(ccx, llintype);
|
||||
let out_type_size = machine::llbitsize_of_real(ccx, llouttype);
|
||||
|
||||
// This should be caught by the intrinsicck pass
|
||||
assert_eq!(in_type_size, out_type_size);
|
||||
|
||||
// We need to cast the dest so the types work out
|
||||
let dest = match dest {
|
||||
expr::SaveIn(d) => expr::SaveIn(PointerCast(bcx, d, llintype.ptr_to())),
|
||||
expr::Ignore => expr::Ignore
|
||||
};
|
||||
bcx = expr::trans_into(bcx, &*arg_exprs[0], dest);
|
||||
|
||||
fcx.pop_custom_cleanup_scope(cleanup_scope);
|
||||
|
||||
return match dest {
|
||||
expr::SaveIn(d) => Result::new(bcx, d),
|
||||
expr::Ignore => Result::new(bcx, C_undef(llret_ty.ptr_to()))
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
_ => {
|
||||
ccx.sess().bug("expected expr as argument for transmute");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Get location to store the result. If the user does
|
||||
// not care about the result, just make a stack slot
|
||||
let llresult = match dest {
|
||||
expr::SaveIn(d) => d,
|
||||
expr::Ignore => {
|
||||
if !type_is_zero_size(ccx, ret_ty) {
|
||||
alloc_ty(bcx, ret_ty, "intrinsic_result")
|
||||
} else {
|
||||
C_undef(llret_ty.ptr_to())
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// Push the arguments.
|
||||
let mut llargs = Vec::new();
|
||||
bcx = callee::trans_args(bcx, args, callee_ty, &mut llargs,
|
||||
cleanup::CustomScope(cleanup_scope), false);
|
||||
|
||||
fcx.pop_custom_cleanup_scope(cleanup_scope);
|
||||
|
||||
let simple = get_simple_intrinsic(ccx, &*foreign_item);
|
||||
|
||||
let llval = match (simple, name.get()) {
|
||||
(Some(llfn), _) => {
|
||||
Call(bcx, llfn, llargs.as_slice(), [])
|
||||
}
|
||||
(_, "abort") => {
|
||||
let llfn = ccx.get_intrinsic(&("llvm.trap"));
|
||||
let v = Call(bcx, llfn, [], []);
|
||||
Unreachable(bcx);
|
||||
v
|
||||
}
|
||||
(_, "breakpoint") => {
|
||||
let llfn = ccx.get_intrinsic(&("llvm.debugtrap"));
|
||||
Call(bcx, llfn, [], [])
|
||||
}
|
||||
(_, "size_of") => {
|
||||
let tp_ty = *substs.types.get(FnSpace, 0);
|
||||
let lltp_ty = type_of::type_of(ccx, tp_ty);
|
||||
C_uint(ccx, machine::llsize_of_real(ccx, lltp_ty) as uint)
|
||||
}
|
||||
(_, "min_align_of") => {
|
||||
let tp_ty = *substs.types.get(FnSpace, 0);
|
||||
let lltp_ty = type_of::type_of(ccx, tp_ty);
|
||||
C_uint(ccx, machine::llalign_of_min(ccx, lltp_ty) as uint)
|
||||
}
|
||||
(_, "pref_align_of") => {
|
||||
let tp_ty = *substs.types.get(FnSpace, 0);
|
||||
let lltp_ty = type_of::type_of(ccx, tp_ty);
|
||||
C_uint(ccx, machine::llalign_of_pref(ccx, lltp_ty) as uint)
|
||||
}
|
||||
(_, "move_val_init") => {
|
||||
// Create a datum reflecting the value being moved.
|
||||
// Use `appropriate_mode` so that the datum is by ref
|
||||
// if the value is non-immediate. Note that, with
|
||||
// intrinsics, there are no argument cleanups to
|
||||
// concern ourselves with, so we can use an rvalue datum.
|
||||
let tp_ty = *substs.types.get(FnSpace, 0);
|
||||
let mode = appropriate_rvalue_mode(ccx, tp_ty);
|
||||
let src = Datum {
|
||||
val: *llargs.get(1),
|
||||
ty: tp_ty,
|
||||
kind: Rvalue::new(mode)
|
||||
};
|
||||
bcx = src.store_to(bcx, *llargs.get(0));
|
||||
C_nil(ccx)
|
||||
}
|
||||
(_, "get_tydesc") => {
|
||||
let tp_ty = *substs.types.get(FnSpace, 0);
|
||||
let static_ti = get_tydesc(ccx, tp_ty);
|
||||
glue::lazily_emit_visit_glue(ccx, &*static_ti);
|
||||
|
||||
// FIXME (#3730): ideally this shouldn't need a cast,
|
||||
// but there's a circularity between translating rust types to llvm
|
||||
// types and having a tydesc type available. So I can't directly access
|
||||
// the llvm type of intrinsic::TyDesc struct.
|
||||
PointerCast(bcx, static_ti.tydesc, llret_ty)
|
||||
}
|
||||
(_, "type_id") => {
|
||||
let hash = ty::hash_crate_independent(
|
||||
ccx.tcx(),
|
||||
*substs.types.get(FnSpace, 0),
|
||||
&ccx.link_meta.crate_hash);
|
||||
// NB: This needs to be kept in lockstep with the TypeId struct in
|
||||
// the intrinsic module
|
||||
C_named_struct(llret_ty, [C_u64(ccx, hash)])
|
||||
}
|
||||
(_, "init") => {
|
||||
let tp_ty = *substs.types.get(FnSpace, 0);
|
||||
let lltp_ty = type_of::type_of(ccx, tp_ty);
|
||||
if return_type_is_void(ccx, tp_ty) {
|
||||
C_nil(ccx)
|
||||
} else {
|
||||
C_null(lltp_ty)
|
||||
}
|
||||
}
|
||||
// Effectively no-ops
|
||||
(_, "uninit") | (_, "forget") => {
|
||||
C_nil(ccx)
|
||||
}
|
||||
(_, "needs_drop") => {
|
||||
let tp_ty = *substs.types.get(FnSpace, 0);
|
||||
C_bool(ccx, ty::type_needs_drop(ccx.tcx(), tp_ty))
|
||||
}
|
||||
(_, "owns_managed") => {
|
||||
let tp_ty = *substs.types.get(FnSpace, 0);
|
||||
C_bool(ccx, ty::type_contents(ccx.tcx(), tp_ty).owns_managed())
|
||||
}
|
||||
(_, "visit_tydesc") => {
|
||||
let td = *llargs.get(0);
|
||||
let visitor = *llargs.get(1);
|
||||
let td = PointerCast(bcx, td, ccx.tydesc_type().ptr_to());
|
||||
glue::call_visit_glue(bcx, visitor, td, None);
|
||||
C_nil(ccx)
|
||||
}
|
||||
(_, "offset") => {
|
||||
let ptr = *llargs.get(0);
|
||||
let offset = *llargs.get(1);
|
||||
InBoundsGEP(bcx, ptr, [offset])
|
||||
}
|
||||
|
||||
(_, "copy_nonoverlapping_memory") => {
|
||||
copy_intrinsic(bcx, false, false, *substs.types.get(FnSpace, 0),
|
||||
*llargs.get(0), *llargs.get(1), *llargs.get(2))
|
||||
}
|
||||
(_, "copy_memory") => {
|
||||
copy_intrinsic(bcx, true, false, *substs.types.get(FnSpace, 0),
|
||||
*llargs.get(0), *llargs.get(1), *llargs.get(2))
|
||||
}
|
||||
(_, "set_memory") => {
|
||||
memset_intrinsic(bcx, false, *substs.types.get(FnSpace, 0),
|
||||
*llargs.get(0), *llargs.get(1), *llargs.get(2))
|
||||
}
|
||||
|
||||
(_, "volatile_copy_nonoverlapping_memory") => {
|
||||
copy_intrinsic(bcx, false, true, *substs.types.get(FnSpace, 0),
|
||||
*llargs.get(0), *llargs.get(1), *llargs.get(2))
|
||||
}
|
||||
(_, "volatile_copy_memory") => {
|
||||
copy_intrinsic(bcx, true, true, *substs.types.get(FnSpace, 0),
|
||||
*llargs.get(0), *llargs.get(1), *llargs.get(2))
|
||||
}
|
||||
(_, "volatile_set_memory") => {
|
||||
memset_intrinsic(bcx, true, *substs.types.get(FnSpace, 0),
|
||||
*llargs.get(0), *llargs.get(1), *llargs.get(2))
|
||||
}
|
||||
(_, "volatile_load") => {
|
||||
VolatileLoad(bcx, *llargs.get(0))
|
||||
},
|
||||
(_, "volatile_store") => {
|
||||
VolatileStore(bcx, *llargs.get(1), *llargs.get(0));
|
||||
C_nil(ccx)
|
||||
},
|
||||
|
||||
(_, "ctlz8") => count_zeros_intrinsic(bcx, "llvm.ctlz.i8", *llargs.get(0)),
|
||||
(_, "ctlz16") => count_zeros_intrinsic(bcx, "llvm.ctlz.i16", *llargs.get(0)),
|
||||
(_, "ctlz32") => count_zeros_intrinsic(bcx, "llvm.ctlz.i32", *llargs.get(0)),
|
||||
(_, "ctlz64") => count_zeros_intrinsic(bcx, "llvm.ctlz.i64", *llargs.get(0)),
|
||||
(_, "cttz8") => count_zeros_intrinsic(bcx, "llvm.cttz.i8", *llargs.get(0)),
|
||||
(_, "cttz16") => count_zeros_intrinsic(bcx, "llvm.cttz.i16", *llargs.get(0)),
|
||||
(_, "cttz32") => count_zeros_intrinsic(bcx, "llvm.cttz.i32", *llargs.get(0)),
|
||||
(_, "cttz64") => count_zeros_intrinsic(bcx, "llvm.cttz.i64", *llargs.get(0)),
|
||||
|
||||
(_, "i8_add_with_overflow") =>
|
||||
with_overflow_intrinsic(bcx, "llvm.sadd.with.overflow.i8", ret_ty,
|
||||
*llargs.get(0), *llargs.get(1)),
|
||||
(_, "i16_add_with_overflow") =>
|
||||
with_overflow_intrinsic(bcx, "llvm.sadd.with.overflow.i16", ret_ty,
|
||||
*llargs.get(0), *llargs.get(1)),
|
||||
(_, "i32_add_with_overflow") =>
|
||||
with_overflow_intrinsic(bcx, "llvm.sadd.with.overflow.i32", ret_ty,
|
||||
*llargs.get(0), *llargs.get(1)),
|
||||
(_, "i64_add_with_overflow") =>
|
||||
with_overflow_intrinsic(bcx, "llvm.sadd.with.overflow.i64", ret_ty,
|
||||
*llargs.get(0), *llargs.get(1)),
|
||||
|
||||
(_, "u8_add_with_overflow") =>
|
||||
with_overflow_intrinsic(bcx, "llvm.uadd.with.overflow.i8", ret_ty,
|
||||
*llargs.get(0), *llargs.get(1)),
|
||||
(_, "u16_add_with_overflow") =>
|
||||
with_overflow_intrinsic(bcx, "llvm.uadd.with.overflow.i16", ret_ty,
|
||||
*llargs.get(0), *llargs.get(1)),
|
||||
(_, "u32_add_with_overflow") =>
|
||||
with_overflow_intrinsic(bcx, "llvm.uadd.with.overflow.i32", ret_ty,
|
||||
*llargs.get(0), *llargs.get(1)),
|
||||
(_, "u64_add_with_overflow") =>
|
||||
with_overflow_intrinsic(bcx, "llvm.uadd.with.overflow.i64", ret_ty,
|
||||
*llargs.get(0), *llargs.get(1)),
|
||||
|
||||
(_, "i8_sub_with_overflow") =>
|
||||
with_overflow_intrinsic(bcx, "llvm.ssub.with.overflow.i8", ret_ty,
|
||||
*llargs.get(0), *llargs.get(1)),
|
||||
(_, "i16_sub_with_overflow") =>
|
||||
with_overflow_intrinsic(bcx, "llvm.ssub.with.overflow.i16", ret_ty,
|
||||
*llargs.get(0), *llargs.get(1)),
|
||||
(_, "i32_sub_with_overflow") =>
|
||||
with_overflow_intrinsic(bcx, "llvm.ssub.with.overflow.i32", ret_ty,
|
||||
*llargs.get(0), *llargs.get(1)),
|
||||
(_, "i64_sub_with_overflow") =>
|
||||
with_overflow_intrinsic(bcx, "llvm.ssub.with.overflow.i64", ret_ty,
|
||||
*llargs.get(0), *llargs.get(1)),
|
||||
|
||||
(_, "u8_sub_with_overflow") =>
|
||||
with_overflow_intrinsic(bcx, "llvm.usub.with.overflow.i8", ret_ty,
|
||||
*llargs.get(0), *llargs.get(1)),
|
||||
(_, "u16_sub_with_overflow") =>
|
||||
with_overflow_intrinsic(bcx, "llvm.usub.with.overflow.i16", ret_ty,
|
||||
*llargs.get(0), *llargs.get(1)),
|
||||
(_, "u32_sub_with_overflow") =>
|
||||
with_overflow_intrinsic(bcx, "llvm.usub.with.overflow.i32", ret_ty,
|
||||
*llargs.get(0), *llargs.get(1)),
|
||||
(_, "u64_sub_with_overflow") =>
|
||||
with_overflow_intrinsic(bcx, "llvm.usub.with.overflow.i64", ret_ty,
|
||||
*llargs.get(0), *llargs.get(1)),
|
||||
|
||||
(_, "i8_mul_with_overflow") =>
|
||||
with_overflow_intrinsic(bcx, "llvm.smul.with.overflow.i8", ret_ty,
|
||||
*llargs.get(0), *llargs.get(1)),
|
||||
(_, "i16_mul_with_overflow") =>
|
||||
with_overflow_intrinsic(bcx, "llvm.smul.with.overflow.i16", ret_ty,
|
||||
*llargs.get(0), *llargs.get(1)),
|
||||
(_, "i32_mul_with_overflow") =>
|
||||
with_overflow_intrinsic(bcx, "llvm.smul.with.overflow.i32", ret_ty,
|
||||
*llargs.get(0), *llargs.get(1)),
|
||||
(_, "i64_mul_with_overflow") =>
|
||||
with_overflow_intrinsic(bcx, "llvm.smul.with.overflow.i64", ret_ty,
|
||||
*llargs.get(0), *llargs.get(1)),
|
||||
|
||||
(_, "u8_mul_with_overflow") =>
|
||||
with_overflow_intrinsic(bcx, "llvm.umul.with.overflow.i8", ret_ty,
|
||||
*llargs.get(0), *llargs.get(1)),
|
||||
(_, "u16_mul_with_overflow") =>
|
||||
with_overflow_intrinsic(bcx, "llvm.umul.with.overflow.i16", ret_ty,
|
||||
*llargs.get(0), *llargs.get(1)),
|
||||
(_, "u32_mul_with_overflow") =>
|
||||
with_overflow_intrinsic(bcx, "llvm.umul.with.overflow.i32", ret_ty,
|
||||
*llargs.get(0), *llargs.get(1)),
|
||||
(_, "u64_mul_with_overflow") =>
|
||||
with_overflow_intrinsic(bcx, "llvm.umul.with.overflow.i64", ret_ty,
|
||||
*llargs.get(0), *llargs.get(1)),
|
||||
|
||||
// This requires that atomic intrinsics follow a specific naming pattern:
|
||||
// "atomic_<operation>[_<ordering>]", and no ordering means SeqCst
|
||||
(_, name) if name.starts_with("atomic_") => {
|
||||
let split: Vec<&str> = name.split('_').collect();
|
||||
assert!(split.len() >= 2, "Atomic intrinsic not correct format");
|
||||
|
||||
let order = if split.len() == 2 {
|
||||
lib::llvm::SequentiallyConsistent
|
||||
} else {
|
||||
match *split.get(2) {
|
||||
"relaxed" => lib::llvm::Monotonic,
|
||||
"acq" => lib::llvm::Acquire,
|
||||
"rel" => lib::llvm::Release,
|
||||
"acqrel" => lib::llvm::AcquireRelease,
|
||||
_ => ccx.sess().fatal("unknown ordering in atomic intrinsic")
|
||||
}
|
||||
};
|
||||
|
||||
match *split.get(1) {
|
||||
"cxchg" => {
|
||||
// See include/llvm/IR/Instructions.h for their implementation
|
||||
// of this, I assume that it's good enough for us to use for
|
||||
// now.
|
||||
let strongest_failure_ordering = match order {
|
||||
lib::llvm::NotAtomic | lib::llvm::Unordered =>
|
||||
ccx.sess().fatal("cmpxchg must be atomic"),
|
||||
|
||||
lib::llvm::Monotonic | lib::llvm::Release =>
|
||||
lib::llvm::Monotonic,
|
||||
|
||||
lib::llvm::Acquire | lib::llvm::AcquireRelease =>
|
||||
lib::llvm::Acquire,
|
||||
|
||||
lib::llvm::SequentiallyConsistent =>
|
||||
lib::llvm::SequentiallyConsistent
|
||||
};
|
||||
|
||||
let res = AtomicCmpXchg(bcx, *llargs.get(0), *llargs.get(1),
|
||||
*llargs.get(2), order,
|
||||
strongest_failure_ordering);
|
||||
if unsafe { lib::llvm::llvm::LLVMVersionMinor() >= 5 } {
|
||||
ExtractValue(bcx, res, 0)
|
||||
} else {
|
||||
res
|
||||
}
|
||||
}
|
||||
|
||||
"load" => {
|
||||
AtomicLoad(bcx, *llargs.get(0), order)
|
||||
}
|
||||
"store" => {
|
||||
AtomicStore(bcx, *llargs.get(1), *llargs.get(0), order);
|
||||
C_nil(ccx)
|
||||
}
|
||||
|
||||
"fence" => {
|
||||
AtomicFence(bcx, order);
|
||||
C_nil(ccx)
|
||||
}
|
||||
|
||||
// These are all AtomicRMW ops
|
||||
op => {
|
||||
let atom_op = match op {
|
||||
"xchg" => lib::llvm::Xchg,
|
||||
"xadd" => lib::llvm::Add,
|
||||
"xsub" => lib::llvm::Sub,
|
||||
"and" => lib::llvm::And,
|
||||
"nand" => lib::llvm::Nand,
|
||||
"or" => lib::llvm::Or,
|
||||
"xor" => lib::llvm::Xor,
|
||||
"max" => lib::llvm::Max,
|
||||
"min" => lib::llvm::Min,
|
||||
"umax" => lib::llvm::UMax,
|
||||
"umin" => lib::llvm::UMin,
|
||||
_ => ccx.sess().fatal("unknown atomic operation")
|
||||
};
|
||||
|
||||
AtomicRMW(bcx, atom_op, *llargs.get(0), *llargs.get(1), order)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
(_, _) => ccx.sess().span_bug(foreign_item.span, "unknown intrinsic")
|
||||
};
|
||||
|
||||
if val_ty(llval) != Type::void(ccx) &&
|
||||
machine::llsize_of_alloc(ccx, val_ty(llval)) != 0 {
|
||||
store_ty(bcx, llval, llresult, ret_ty);
|
||||
}
|
||||
|
||||
// If we made a temporary stack slot, let's clean it up
|
||||
match dest {
|
||||
expr::Ignore => {
|
||||
bcx = glue::drop_ty(bcx, llresult, ret_ty);
|
||||
}
|
||||
expr::SaveIn(_) => {}
|
||||
}
|
||||
|
||||
Result::new(bcx, llresult)
|
||||
}
|
||||
|
||||
fn copy_intrinsic(bcx: &Block, allow_overlap: bool, volatile: bool,
|
||||
tp_ty: ty::t, dst: ValueRef, src: ValueRef, count: ValueRef) -> ValueRef {
|
||||
let ccx = bcx.ccx();
|
||||
let lltp_ty = type_of::type_of(ccx, tp_ty);
|
||||
let align = C_i32(ccx, machine::llalign_of_min(ccx, lltp_ty) as i32);
|
||||
let size = machine::llsize_of(ccx, lltp_ty);
|
||||
let int_size = machine::llbitsize_of_real(ccx, ccx.int_type);
|
||||
let name = if allow_overlap {
|
||||
if int_size == 32 {
|
||||
"llvm.memmove.p0i8.p0i8.i32"
|
||||
} else {
|
||||
"llvm.memmove.p0i8.p0i8.i64"
|
||||
}
|
||||
} else {
|
||||
if int_size == 32 {
|
||||
"llvm.memcpy.p0i8.p0i8.i32"
|
||||
} else {
|
||||
"llvm.memcpy.p0i8.p0i8.i64"
|
||||
}
|
||||
};
|
||||
|
||||
let dst_ptr = PointerCast(bcx, dst, Type::i8p(ccx));
|
||||
let src_ptr = PointerCast(bcx, src, Type::i8p(ccx));
|
||||
let llfn = ccx.get_intrinsic(&name);
|
||||
|
||||
Call(bcx, llfn, [dst_ptr, src_ptr, Mul(bcx, size, count), align,
|
||||
C_bool(ccx, volatile)], [])
|
||||
}
|
||||
|
||||
fn memset_intrinsic(bcx: &Block, volatile: bool, tp_ty: ty::t,
|
||||
dst: ValueRef, val: ValueRef, count: ValueRef) -> ValueRef {
|
||||
let ccx = bcx.ccx();
|
||||
let lltp_ty = type_of::type_of(ccx, tp_ty);
|
||||
let align = C_i32(ccx, machine::llalign_of_min(ccx, lltp_ty) as i32);
|
||||
let size = machine::llsize_of(ccx, lltp_ty);
|
||||
let name = if machine::llbitsize_of_real(ccx, ccx.int_type) == 32 {
|
||||
"llvm.memset.p0i8.i32"
|
||||
} else {
|
||||
"llvm.memset.p0i8.i64"
|
||||
};
|
||||
|
||||
let dst_ptr = PointerCast(bcx, dst, Type::i8p(ccx));
|
||||
let llfn = ccx.get_intrinsic(&name);
|
||||
|
||||
Call(bcx, llfn, [dst_ptr, val, Mul(bcx, size, count), align,
|
||||
C_bool(ccx, volatile)], [])
|
||||
}
|
||||
|
||||
fn count_zeros_intrinsic(bcx: &Block, name: &'static str, val: ValueRef) -> ValueRef {
|
||||
let y = C_bool(bcx.ccx(), false);
|
||||
let llfn = bcx.ccx().get_intrinsic(&name);
|
||||
Call(bcx, llfn, [val, y], [])
|
||||
}
|
||||
|
||||
fn with_overflow_intrinsic(bcx: &Block, name: &'static str, t: ty::t,
|
||||
a: ValueRef, b: ValueRef) -> ValueRef {
|
||||
let llfn = bcx.ccx().get_intrinsic(&name);
|
||||
|
||||
// Convert `i1` to a `bool`, and write it to the out parameter
|
||||
let val = Call(bcx, llfn, [a, b], []);
|
||||
let result = ExtractValue(bcx, val, 0);
|
||||
let overflow = ZExt(bcx, ExtractValue(bcx, val, 1), Type::bool(bcx.ccx()));
|
||||
let ret = C_undef(type_of::type_of(bcx.ccx(), t));
|
||||
let ret = InsertValue(bcx, ret, result, 0);
|
||||
let ret = InsertValue(bcx, ret, overflow, 1);
|
||||
|
||||
ret
|
||||
}
|
||||
|
|
|
@ -18,7 +18,6 @@ use middle::trans::base::{trans_enum_variant, push_ctxt, get_item_val};
|
|||
use middle::trans::base::{trans_fn, decl_internal_rust_fn};
|
||||
use middle::trans::base;
|
||||
use middle::trans::common::*;
|
||||
use middle::trans::intrinsic;
|
||||
use middle::ty;
|
||||
use middle::typeck;
|
||||
use util::ppaux::Repr;
|
||||
|
@ -158,17 +157,6 @@ pub fn monomorphic_fn(ccx: &CrateContext,
|
|||
}
|
||||
}
|
||||
}
|
||||
ast_map::NodeForeignItem(i) => {
|
||||
let simple = intrinsic::get_simple_intrinsic(ccx, &*i);
|
||||
match simple {
|
||||
Some(decl) => decl,
|
||||
None => {
|
||||
let d = mk_lldecl();
|
||||
intrinsic::trans_intrinsic(ccx, d, &*i, &psubsts, ref_id);
|
||||
d
|
||||
}
|
||||
}
|
||||
}
|
||||
ast_map::NodeVariant(v) => {
|
||||
let parent = ccx.tcx.map.get_parent(fn_id.node);
|
||||
let tvs = ty::enum_variants(ccx.tcx(), local_def(parent));
|
||||
|
@ -223,6 +211,7 @@ pub fn monomorphic_fn(ccx: &CrateContext,
|
|||
}
|
||||
|
||||
// Ugh -- but this ensures any new variants won't be forgotten
|
||||
ast_map::NodeForeignItem(..) |
|
||||
ast_map::NodeLifetime(..) |
|
||||
ast_map::NodeExpr(..) |
|
||||
ast_map::NodeStmt(..) |
|
||||
|
|
|
@ -75,11 +75,13 @@ pub fn type_of_fn_from_ty(cx: &CrateContext, fty: ty::t) -> Type {
|
|||
type_of_rust_fn(cx, true, f.sig.inputs.as_slice(), f.sig.output)
|
||||
}
|
||||
ty::ty_bare_fn(ref f) => {
|
||||
if f.abi == abi::Rust || f.abi == abi::RustIntrinsic {
|
||||
if f.abi == abi::Rust {
|
||||
type_of_rust_fn(cx,
|
||||
false,
|
||||
f.sig.inputs.as_slice(),
|
||||
f.sig.output)
|
||||
} else if f.abi == abi::RustIntrinsic {
|
||||
cx.sess().bug("type_of_fn_from_ty given intrinsic")
|
||||
} else {
|
||||
foreign::lltype_for_foreign_fn(cx, fty)
|
||||
}
|
||||
|
|
Loading…
Add table
Reference in a new issue