diff --git a/src/librustc_codegen_llvm/context.rs b/src/librustc_codegen_llvm/context.rs index 9b1de3d44ea..b774d7c5def 100644 --- a/src/librustc_codegen_llvm/context.rs +++ b/src/librustc_codegen_llvm/context.rs @@ -522,6 +522,15 @@ fn declare_intrinsic(cx: &CodegenCx, key: &str) -> Option { let t_f32 = Type::f32(cx); let t_f64 = Type::f64(cx); + let t_v2f32 = Type::vector(&t_f32, 2); + let t_v4f32 = Type::vector(&t_f32, 4); + let t_v8f32 = Type::vector(&t_f32, 8); + let t_v16f32 = Type::vector(&t_f32, 16); + + let t_v2f64 = Type::vector(&t_f64, 2); + let t_v4f64 = Type::vector(&t_f64, 4); + let t_v8f64 = Type::vector(&t_f64, 8); + ifn!("llvm.memcpy.p0i8.p0i8.i16", fn(i8p, i8p, t_i16, t_i32, i1) -> void); ifn!("llvm.memcpy.p0i8.p0i8.i32", fn(i8p, i8p, t_i32, t_i32, i1) -> void); ifn!("llvm.memcpy.p0i8.p0i8.i64", fn(i8p, i8p, t_i64, t_i32, i1) -> void); @@ -537,37 +546,145 @@ fn declare_intrinsic(cx: &CodegenCx, key: &str) -> Option { ifn!("llvm.frameaddress", fn(t_i32) -> i8p); ifn!("llvm.powi.f32", fn(t_f32, t_i32) -> t_f32); + ifn!("llvm.powi.v2f32", fn(t_v2f32, t_i32) -> t_v2f32); + ifn!("llvm.powi.v4f32", fn(t_v4f32, t_i32) -> t_v4f32); + ifn!("llvm.powi.v8f32", fn(t_v8f32, t_i32) -> t_v8f32); + ifn!("llvm.powi.v16f32", fn(t_v16f32, t_i32) -> t_v16f32); ifn!("llvm.powi.f64", fn(t_f64, t_i32) -> t_f64); + ifn!("llvm.powi.v2f64", fn(t_v2f64, t_i32) -> t_v2f64); + ifn!("llvm.powi.v4f64", fn(t_v4f64, t_i32) -> t_v4f64); + ifn!("llvm.powi.v8f64", fn(t_v8f64, t_i32) -> t_v8f64); + ifn!("llvm.pow.f32", fn(t_f32, t_f32) -> t_f32); + ifn!("llvm.pow.v2f32", fn(t_v2f32, t_v2f32) -> t_v2f32); + ifn!("llvm.pow.v4f32", fn(t_v4f32, t_v4f32) -> t_v4f32); + ifn!("llvm.pow.v8f32", fn(t_v8f32, t_v8f32) -> t_v8f32); + ifn!("llvm.pow.v16f32", fn(t_v16f32, t_v16f32) -> t_v16f32); ifn!("llvm.pow.f64", fn(t_f64, t_f64) -> t_f64); + ifn!("llvm.pow.v2f64", fn(t_v2f64, t_v2f64) -> t_v2f64); + ifn!("llvm.pow.v4f64", fn(t_v4f64, t_v4f64) -> t_v4f64); + ifn!("llvm.pow.v8f64", fn(t_v8f64, t_v8f64) -> t_v8f64); ifn!("llvm.sqrt.f32", fn(t_f32) -> t_f32); + ifn!("llvm.sqrt.v2f32", fn(t_v2f32) -> t_v2f32); + ifn!("llvm.sqrt.v4f32", fn(t_v4f32) -> t_v4f32); + ifn!("llvm.sqrt.v8f32", fn(t_v8f32) -> t_v8f32); + ifn!("llvm.sqrt.v16f32", fn(t_v16f32) -> t_v16f32); ifn!("llvm.sqrt.f64", fn(t_f64) -> t_f64); + ifn!("llvm.sqrt.v2f64", fn(t_v2f64) -> t_v2f64); + ifn!("llvm.sqrt.v4f64", fn(t_v4f64) -> t_v4f64); + ifn!("llvm.sqrt.v8f64", fn(t_v8f64) -> t_v8f64); + ifn!("llvm.sin.f32", fn(t_f32) -> t_f32); + ifn!("llvm.sin.v2f32", fn(t_v2f32) -> t_v2f32); + ifn!("llvm.sin.v4f32", fn(t_v4f32) -> t_v4f32); + ifn!("llvm.sin.v8f32", fn(t_v8f32) -> t_v8f32); + ifn!("llvm.sin.v16f32", fn(t_v16f32) -> t_v16f32); ifn!("llvm.sin.f64", fn(t_f64) -> t_f64); + ifn!("llvm.sin.v2f64", fn(t_v2f64) -> t_v2f64); + ifn!("llvm.sin.v4f64", fn(t_v4f64) -> t_v4f64); + ifn!("llvm.sin.v8f64", fn(t_v8f64) -> t_v8f64); + ifn!("llvm.cos.f32", fn(t_f32) -> t_f32); + ifn!("llvm.cos.v2f32", fn(t_v2f32) -> t_v2f32); + ifn!("llvm.cos.v4f32", fn(t_v4f32) -> t_v4f32); + ifn!("llvm.cos.v8f32", fn(t_v8f32) -> t_v8f32); + ifn!("llvm.cos.v16f32", fn(t_v16f32) -> t_v16f32); ifn!("llvm.cos.f64", fn(t_f64) -> t_f64); + ifn!("llvm.cos.v2f64", fn(t_v2f64) -> t_v2f64); + ifn!("llvm.cos.v4f64", fn(t_v4f64) -> t_v4f64); + ifn!("llvm.cos.v8f64", fn(t_v8f64) -> t_v8f64); + ifn!("llvm.exp.f32", fn(t_f32) -> t_f32); + ifn!("llvm.exp.v2f32", fn(t_v2f32) -> t_v2f32); + ifn!("llvm.exp.v4f32", fn(t_v4f32) -> t_v4f32); + ifn!("llvm.exp.v8f32", fn(t_v8f32) -> t_v8f32); + ifn!("llvm.exp.v16f32", fn(t_v16f32) -> t_v16f32); ifn!("llvm.exp.f64", fn(t_f64) -> t_f64); + ifn!("llvm.exp.v2f64", fn(t_v2f64) -> t_v2f64); + ifn!("llvm.exp.v4f64", fn(t_v4f64) -> t_v4f64); + ifn!("llvm.exp.v8f64", fn(t_v8f64) -> t_v8f64); + ifn!("llvm.exp2.f32", fn(t_f32) -> t_f32); + ifn!("llvm.exp2.v2f32", fn(t_v2f32) -> t_v2f32); + ifn!("llvm.exp2.v4f32", fn(t_v4f32) -> t_v4f32); + ifn!("llvm.exp2.v8f32", fn(t_v8f32) -> t_v8f32); + ifn!("llvm.exp2.v16f32", fn(t_v16f32) -> t_v16f32); ifn!("llvm.exp2.f64", fn(t_f64) -> t_f64); + ifn!("llvm.exp2.v2f64", fn(t_v2f64) -> t_v2f64); + ifn!("llvm.exp2.v4f64", fn(t_v4f64) -> t_v4f64); + ifn!("llvm.exp2.v8f64", fn(t_v8f64) -> t_v8f64); + ifn!("llvm.log.f32", fn(t_f32) -> t_f32); + ifn!("llvm.log.v2f32", fn(t_v2f32) -> t_v2f32); + ifn!("llvm.log.v4f32", fn(t_v4f32) -> t_v4f32); + ifn!("llvm.log.v8f32", fn(t_v8f32) -> t_v8f32); + ifn!("llvm.log.v16f32", fn(t_v16f32) -> t_v16f32); ifn!("llvm.log.f64", fn(t_f64) -> t_f64); + ifn!("llvm.log.v2f64", fn(t_v2f64) -> t_v2f64); + ifn!("llvm.log.v4f64", fn(t_v4f64) -> t_v4f64); + ifn!("llvm.log.v8f64", fn(t_v8f64) -> t_v8f64); + ifn!("llvm.log10.f32", fn(t_f32) -> t_f32); + ifn!("llvm.log10.v2f32", fn(t_v2f32) -> t_v2f32); + ifn!("llvm.log10.v4f32", fn(t_v4f32) -> t_v4f32); + ifn!("llvm.log10.v8f32", fn(t_v8f32) -> t_v8f32); + ifn!("llvm.log10.v16f32", fn(t_v16f32) -> t_v16f32); ifn!("llvm.log10.f64", fn(t_f64) -> t_f64); + ifn!("llvm.log10.v2f64", fn(t_v2f64) -> t_v2f64); + ifn!("llvm.log10.v4f64", fn(t_v4f64) -> t_v4f64); + ifn!("llvm.log10.v8f64", fn(t_v8f64) -> t_v8f64); + ifn!("llvm.log2.f32", fn(t_f32) -> t_f32); + ifn!("llvm.log2.v2f32", fn(t_v2f32) -> t_v2f32); + ifn!("llvm.log2.v4f32", fn(t_v4f32) -> t_v4f32); + ifn!("llvm.log2.v8f32", fn(t_v8f32) -> t_v8f32); + ifn!("llvm.log2.v16f32", fn(t_v16f32) -> t_v16f32); ifn!("llvm.log2.f64", fn(t_f64) -> t_f64); + ifn!("llvm.log2.v2f64", fn(t_v2f64) -> t_v2f64); + ifn!("llvm.log2.v4f64", fn(t_v4f64) -> t_v4f64); + ifn!("llvm.log2.v8f64", fn(t_v8f64) -> t_v8f64); ifn!("llvm.fma.f32", fn(t_f32, t_f32, t_f32) -> t_f32); + ifn!("llvm.fma.v2f32", fn(t_v2f32, t_v2f32, t_v2f32) -> t_v2f32); + ifn!("llvm.fma.v4f32", fn(t_v4f32, t_v4f32, t_v4f32) -> t_v4f32); + ifn!("llvm.fma.v8f32", fn(t_v8f32, t_v8f32, t_v8f32) -> t_v8f32); + ifn!("llvm.fma.v16f32", fn(t_v16f32, t_v16f32, t_v16f32) -> t_v16f32); ifn!("llvm.fma.f64", fn(t_f64, t_f64, t_f64) -> t_f64); + ifn!("llvm.fma.v2f64", fn(t_v2f64, t_v2f64, t_v2f64) -> t_v2f64); + ifn!("llvm.fma.v4f64", fn(t_v4f64, t_v4f64, t_v4f64) -> t_v4f64); + ifn!("llvm.fma.v8f64", fn(t_v8f64, t_v8f64, t_v8f64) -> t_v8f64); ifn!("llvm.fabs.f32", fn(t_f32) -> t_f32); + ifn!("llvm.fabs.v2f32", fn(t_v2f32) -> t_v2f32); + ifn!("llvm.fabs.v4f32", fn(t_v4f32) -> t_v4f32); + ifn!("llvm.fabs.v8f32", fn(t_v8f32) -> t_v8f32); + ifn!("llvm.fabs.v16f32", fn(t_v16f32) -> t_v16f32); ifn!("llvm.fabs.f64", fn(t_f64) -> t_f64); + ifn!("llvm.fabs.v2f64", fn(t_v2f64) -> t_v2f64); + ifn!("llvm.fabs.v4f64", fn(t_v4f64) -> t_v4f64); + ifn!("llvm.fabs.v8f64", fn(t_v8f64) -> t_v8f64); ifn!("llvm.floor.f32", fn(t_f32) -> t_f32); + ifn!("llvm.floor.v2f32", fn(t_v2f32) -> t_v2f32); + ifn!("llvm.floor.v4f32", fn(t_v4f32) -> t_v4f32); + ifn!("llvm.floor.v8f32", fn(t_v8f32) -> t_v8f32); + ifn!("llvm.floor.v16f32", fn(t_v16f32) -> t_v16f32); ifn!("llvm.floor.f64", fn(t_f64) -> t_f64); + ifn!("llvm.floor.v2f64", fn(t_v2f64) -> t_v2f64); + ifn!("llvm.floor.v4f64", fn(t_v4f64) -> t_v4f64); + ifn!("llvm.floor.v8f64", fn(t_v8f64) -> t_v8f64); + ifn!("llvm.ceil.f32", fn(t_f32) -> t_f32); + ifn!("llvm.ceil.v2f32", fn(t_v2f32) -> t_v2f32); + ifn!("llvm.ceil.v4f32", fn(t_v4f32) -> t_v4f32); + ifn!("llvm.ceil.v8f32", fn(t_v8f32) -> t_v8f32); + ifn!("llvm.ceil.v16f32", fn(t_v16f32) -> t_v16f32); ifn!("llvm.ceil.f64", fn(t_f64) -> t_f64); + ifn!("llvm.ceil.v2f64", fn(t_v2f64) -> t_v2f64); + ifn!("llvm.ceil.v4f64", fn(t_v4f64) -> t_v4f64); + ifn!("llvm.ceil.v8f64", fn(t_v8f64) -> t_v8f64); + ifn!("llvm.trunc.f32", fn(t_f32) -> t_f32); ifn!("llvm.trunc.f64", fn(t_f64) -> t_f64); diff --git a/src/librustc_codegen_llvm/intrinsic.rs b/src/librustc_codegen_llvm/intrinsic.rs index cffe7f79e97..71641a7f248 100644 --- a/src/librustc_codegen_llvm/intrinsic.rs +++ b/src/librustc_codegen_llvm/intrinsic.rs @@ -1140,6 +1140,377 @@ fn generic_simd_intrinsic<'a, 'tcx>( return Ok(bx.select(m_i1s, args[1].immediate(), args[2].immediate())); } + fn simd_simple_float_intrinsic<'a, 'tcx>(name: &str, in_elem: &::rustc::ty::TyS, in_ty: &::rustc::ty::TyS, + in_len: usize, bx: &Builder<'a, 'tcx>, span: Span, + args: &[OperandRef<'tcx>]) + -> Result { + macro_rules! emit_error { + ($msg: tt) => { + emit_error!($msg, ) + }; + ($msg: tt, $($fmt: tt)*) => { + span_invalid_monomorphization_error( + bx.sess(), span, + &format!(concat!("invalid monomorphization of `{}` intrinsic: ", + $msg), + name, $($fmt)*)); + } + } + macro_rules! return_error { + ($($fmt: tt)*) => { + { + emit_error!($($fmt)*); + return Err(()); + } + } + } + let ety = match in_elem.sty { + ty::TyFloat(f) if f.bit_width() == 32 => { + if in_len < 2 || in_len > 16 { + return_error!("unsupported floating-point vector `{}` with length `{}` out-of-range [2, 16]", + in_ty, in_len); + } + "f32" + }, + ty::TyFloat(f) if f.bit_width() == 64 => { + if in_len < 2 || in_len > 8 { + return_error!("unsupported floating-point vector `{}` with length `{}` out-of-range [2, 8]", + in_ty, in_len); + } + "f64" + }, + ty::TyFloat(f) => { + return_error!("unsupported element type `{}` of floating-point vector `{}`", + f, in_ty); + }, + _ => { + return_error!("`{}` is not a floating-point type", in_ty); + } + }; + + let llvm_name = &format!("llvm.{0}.v{1}{2}", name, in_len, ety); + let intrinsic = bx.cx.get_intrinsic(&llvm_name); + return Ok(bx.call(intrinsic, + &args.iter().map(|arg| arg.immediate()).collect::>(), + None)); + } + + if name == "simd_fsqrt" { + return simd_simple_float_intrinsic("sqrt", in_elem, in_ty, in_len, bx, span, args); + } + + if name == "simd_fsin" { + return simd_simple_float_intrinsic("sin", in_elem, in_ty, in_len, bx, span, args); + } + + if name == "simd_fcos" { + return simd_simple_float_intrinsic("cos", in_elem, in_ty, in_len, bx, span, args); + } + + if name == "simd_fabs" { + return simd_simple_float_intrinsic("fabs", in_elem, in_ty, in_len, bx, span, args); + } + + if name == "simd_floor" { + return simd_simple_float_intrinsic("floor", in_elem, in_ty, in_len, bx, span, args); + } + + if name == "simd_ceil" { + return simd_simple_float_intrinsic("ceil", in_elem, in_ty, in_len, bx, span, args); + } + + if name == "simd_fexp" { + return simd_simple_float_intrinsic("exp", in_elem, in_ty, in_len, bx, span, args); + } + + if name == "simd_fexp2" { + return simd_simple_float_intrinsic("exp2", in_elem, in_ty, in_len, bx, span, args); + } + + if name == "simd_flog10" { + return simd_simple_float_intrinsic("log10", in_elem, in_ty, in_len, bx, span, args); + } + + if name == "simd_flog2" { + return simd_simple_float_intrinsic("log2", in_elem, in_ty, in_len, bx, span, args); + } + + if name == "simd_flog" { + return simd_simple_float_intrinsic("log", in_elem, in_ty, in_len, bx, span, args); + } + + if name == "simd_fpowi" { + return simd_simple_float_intrinsic("powi", in_elem, in_ty, in_len, bx, span, args); + } + + if name == "simd_fpow" { + return simd_simple_float_intrinsic("pow", in_elem, in_ty, in_len, bx, span, args); + } + + if name == "simd_fma" { + return simd_simple_float_intrinsic("fma", in_elem, in_ty, in_len, bx, span, args); + } + + if name == "simd_gather" { + // simd_gather(values: , pointers: , + // mask: ) -> + // * N: number of elements in the input vectors + // * T: type of the element to load + // * M: any integer width is supported, will be truncated to i1 + + // All types must be simd vector types + require_simd!(in_ty, "first"); + require_simd!(arg_tys[1], "second"); + require_simd!(arg_tys[2], "third"); + require_simd!(ret_ty, "return"); + + // Of the same length: + require!(in_len == arg_tys[1].simd_size(tcx), + "expected {} argument with length {} (same as input type `{}`), \ + found `{}` with length {}", "second", in_len, in_ty, arg_tys[1], + arg_tys[1].simd_size(tcx)); + require!(in_len == arg_tys[2].simd_size(tcx), + "expected {} argument with length {} (same as input type `{}`), \ + found `{}` with length {}", "third", in_len, in_ty, arg_tys[2], + arg_tys[2].simd_size(tcx)); + + // The return type must match the first argument type + require!(ret_ty == in_ty, + "expected return type `{}`, found `{}`", + in_ty, ret_ty); + + // This counts how many pointers + fn ptr_count(t: ty::Ty) -> usize { + match t.sty { + ty::TyRawPtr(p) => 1 + ptr_count(p.ty), + _ => 0, + } + } + + // Non-ptr type + fn non_ptr(t: ty::Ty) -> ty::Ty { + match t.sty { + ty::TyRawPtr(p) => non_ptr(p.ty), + _ => t, + } + } + + // The second argument must be a simd vector with an element type that's a pointer + // to the element type of the first argument + let (pointer_count, underlying_ty) = match arg_tys[1].simd_type(tcx).sty { + ty::TyRawPtr(p) if p.ty == in_elem => (ptr_count(arg_tys[1].simd_type(tcx)), + non_ptr(arg_tys[1].simd_type(tcx))), + _ => { + require!(false, "expected element type `{}` of second argument `{}` \ + to be a pointer to the element type `{}` of the first \ + argument `{}`, found `{}` != `*_ {}`", + arg_tys[1].simd_type(tcx).sty, arg_tys[1], in_elem, in_ty, + arg_tys[1].simd_type(tcx).sty, in_elem); + unreachable!(); + } + }; + assert!(pointer_count > 0); + assert!(pointer_count - 1 == ptr_count(arg_tys[0].simd_type(tcx))); + assert_eq!(underlying_ty, non_ptr(arg_tys[0].simd_type(tcx))); + + // The element type of the third argument must be a signed integer type of any width: + match arg_tys[2].simd_type(tcx).sty { + ty::TyInt(_) => (), + _ => { + require!(false, "expected element type `{}` of third argument `{}` \ + to be a signed integer type", + arg_tys[2].simd_type(tcx).sty, arg_tys[2]); + } + } + + // Alignment of T, must be a constant integer value: + let alignment_ty = Type::i32(bx.cx); + let alignment = C_i32(bx.cx, bx.cx.align_of(in_elem).abi() as i32); + + // Truncate the mask vector to a vector of i1s: + let (mask, mask_ty) = { + let i1 = Type::i1(bx.cx); + let i1xn = Type::vector(&i1, in_len as u64); + (bx.trunc(args[2].immediate(), i1xn), i1xn) + }; + + // FIXME: use: + // https://github.com/llvm-mirror/llvm/blob/master/include/llvm/IR/Function.h#L182 + // https://github.com/llvm-mirror/llvm/blob/master/include/llvm/IR/Intrinsics.h#L81 + fn llvm_vector_str(elem_ty: ty::Ty, vec_len: usize, no_pointers: usize) -> String { + let p0s: String = "p0".repeat(no_pointers); + match elem_ty.sty { + ty::TyInt(v) => format!("v{}{}i{}", vec_len, p0s, v.bit_width().unwrap()), + ty::TyUint(v) => format!("v{}{}i{}", vec_len, p0s, v.bit_width().unwrap()), + ty::TyFloat(v) => format!("v{}{}f{}", vec_len, p0s, v.bit_width()), + _ => unreachable!(), + } + } + + fn llvm_vector_ty(cx: &CodegenCx, elem_ty: ty::Ty, vec_len: usize, + mut no_pointers: usize) -> Type { + // FIXME: use cx.layout_of(ty).llvm_type() ? + let mut elem_ty = match elem_ty.sty { + ty::TyInt(v) => Type::int_from_ty(cx, v), + ty::TyUint(v) => Type::uint_from_ty(cx, v), + ty::TyFloat(v) => Type::float_from_ty(cx, v), + _ => unreachable!(), + }; + while no_pointers > 0 { + elem_ty = elem_ty.ptr_to(); + no_pointers -= 1; + } + Type::vector(&elem_ty, vec_len as u64) + } + + + // Type of the vector of pointers: + let llvm_pointer_vec_ty = llvm_vector_ty(bx.cx, underlying_ty, in_len, pointer_count); + let llvm_pointer_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count); + + // Type of the vector of elements: + let llvm_elem_vec_ty = llvm_vector_ty(bx.cx, underlying_ty, in_len, pointer_count - 1); + let llvm_elem_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count - 1); + + let llvm_intrinsic = format!("llvm.masked.gather.{}.{}", + llvm_elem_vec_str, llvm_pointer_vec_str); + let f = declare::declare_cfn(bx.cx, &llvm_intrinsic, + Type::func(&[llvm_pointer_vec_ty, alignment_ty, mask_ty, + llvm_elem_vec_ty], &llvm_elem_vec_ty)); + llvm::SetUnnamedAddr(f, false); + let v = bx.call(f, &[args[1].immediate(), alignment, mask, args[0].immediate()], + None); + return Ok(v); + } + + if name == "simd_scatter" { + // simd_scatter(values: , pointers: , + // mask: ) -> () + // * N: number of elements in the input vectors + // * T: type of the element to load + // * M: any integer width is supported, will be truncated to i1 + + // All types must be simd vector types + require_simd!(in_ty, "first"); + require_simd!(arg_tys[1], "second"); + require_simd!(arg_tys[2], "third"); + + // Of the same length: + require!(in_len == arg_tys[1].simd_size(tcx), + "expected {} argument with length {} (same as input type `{}`), \ + found `{}` with length {}", "second", in_len, in_ty, arg_tys[1], + arg_tys[1].simd_size(tcx)); + require!(in_len == arg_tys[2].simd_size(tcx), + "expected {} argument with length {} (same as input type `{}`), \ + found `{}` with length {}", "third", in_len, in_ty, arg_tys[2], + arg_tys[2].simd_size(tcx)); + + // This counts how many pointers + fn ptr_count(t: ty::Ty) -> usize { + match t.sty { + ty::TyRawPtr(p) => 1 + ptr_count(p.ty), + _ => 0, + } + } + + // Non-ptr type + fn non_ptr(t: ty::Ty) -> ty::Ty { + match t.sty { + ty::TyRawPtr(p) => non_ptr(p.ty), + _ => t, + } + } + + // The second argument must be a simd vector with an element type that's a pointer + // to the element type of the first argument + let (pointer_count, underlying_ty) = match arg_tys[1].simd_type(tcx).sty { + ty::TyRawPtr(p) if p.ty == in_elem && p.mutbl == hir::MutMutable + => (ptr_count(arg_tys[1].simd_type(tcx)), + non_ptr(arg_tys[1].simd_type(tcx))), + _ => { + require!(false, "expected element type `{}` of second argument `{}` \ + to be a pointer to the element type `{}` of the first \ + argument `{}`, found `{}` != `*mut {}`", + arg_tys[1].simd_type(tcx).sty, arg_tys[1], in_elem, in_ty, + arg_tys[1].simd_type(tcx).sty, in_elem); + unreachable!(); + } + }; + assert!(pointer_count > 0); + assert!(pointer_count - 1 == ptr_count(arg_tys[0].simd_type(tcx))); + assert_eq!(underlying_ty, non_ptr(arg_tys[0].simd_type(tcx))); + + // The element type of the third argument must be a signed integer type of any width: + match arg_tys[2].simd_type(tcx).sty { + ty::TyInt(_) => (), + _ => { + require!(false, "expected element type `{}` of third argument `{}` \ + to be a signed integer type", + arg_tys[2].simd_type(tcx).sty, arg_tys[2]); + } + } + + // Alignment of T, must be a constant integer value: + let alignment_ty = Type::i32(bx.cx); + let alignment = C_i32(bx.cx, bx.cx.align_of(in_elem).abi() as i32); + + // Truncate the mask vector to a vector of i1s: + let (mask, mask_ty) = { + let i1 = Type::i1(bx.cx); + let i1xn = Type::vector(&i1, in_len as u64); + (bx.trunc(args[2].immediate(), i1xn), i1xn) + }; + + let ret_t = Type::void(bx.cx); + + // FIXME: use: + // https://github.com/llvm-mirror/llvm/blob/master/include/llvm/IR/Function.h#L182 + // https://github.com/llvm-mirror/llvm/blob/master/include/llvm/IR/Intrinsics.h#L81 + fn llvm_vector_str(elem_ty: ty::Ty, vec_len: usize, no_pointers: usize) -> String { + let p0s: String = "p0".repeat(no_pointers); + match elem_ty.sty { + ty::TyInt(v) => format!("v{}{}i{}", vec_len, p0s, v.bit_width().unwrap()), + ty::TyUint(v) => format!("v{}{}i{}", vec_len, p0s, v.bit_width().unwrap()), + ty::TyFloat(v) => format!("v{}{}f{}", vec_len, p0s, v.bit_width()), + _ => unreachable!(), + } + } + + fn llvm_vector_ty(cx: &CodegenCx, elem_ty: ty::Ty, vec_len: usize, + mut no_pointers: usize) -> Type { + // FIXME: use cx.layout_of(ty).llvm_type() ? + let mut elem_ty = match elem_ty.sty { + ty::TyInt(v) => Type::int_from_ty(cx, v), + ty::TyUint(v) => Type::uint_from_ty(cx, v), + ty::TyFloat(v) => Type::float_from_ty(cx, v), + _ => unreachable!(), + }; + while no_pointers > 0 { + elem_ty = elem_ty.ptr_to(); + no_pointers -= 1; + } + Type::vector(&elem_ty, vec_len as u64) + } + + + // Type of the vector of pointers: + let llvm_pointer_vec_ty = llvm_vector_ty(bx.cx, underlying_ty, in_len, pointer_count); + let llvm_pointer_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count); + + // Type of the vector of elements: + let llvm_elem_vec_ty = llvm_vector_ty(bx.cx, underlying_ty, in_len, pointer_count - 1); + let llvm_elem_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count - 1); + + let llvm_intrinsic = format!("llvm.masked.scatter.{}.{}", + llvm_elem_vec_str, llvm_pointer_vec_str); + let f = declare::declare_cfn(bx.cx, &llvm_intrinsic, + Type::func(&[llvm_elem_vec_ty, llvm_pointer_vec_ty, alignment_ty, mask_ty], &ret_t)); + llvm::SetUnnamedAddr(f, false); + let v = bx.call(f, &[args[0].immediate(), args[1].immediate(), alignment, mask], + None); + return Ok(v); + } + macro_rules! arith_red { ($name:tt : $integer_reduce:ident, $float_reduce:ident, $ordered:expr) => { if name == $name { diff --git a/src/librustc_typeck/check/intrinsic.rs b/src/librustc_typeck/check/intrinsic.rs index af1f1044edf..5546aa58d4c 100644 --- a/src/librustc_typeck/check/intrinsic.rs +++ b/src/librustc_typeck/check/intrinsic.rs @@ -351,9 +351,26 @@ pub fn check_platform_intrinsic_type<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, "simd_add" | "simd_sub" | "simd_mul" | "simd_rem" | "simd_div" | "simd_shl" | "simd_shr" | "simd_and" | "simd_or" | "simd_xor" | - "simd_fmin" | "simd_fmax" => { + "simd_fmin" | "simd_fmax" | "simd_fpow" => { (1, vec![param(0), param(0)], param(0)) } + "simd_fsqrt" | "simd_fsin" | "simd_fcos" | "simd_fexp" | "simd_fexp2" | + "simd_flog2" | "simd_flog10" | "simd_flog" | + "simd_fabs" | "simd_floor" | "simd_ceil" => { + (1, vec![param(0)], param(0)) + } + "simd_fpowi" => { + (1, vec![param(0), tcx.types.i32], param(0)) + } + "simd_fma" => { + (1, vec![param(0), param(0), param(0)], param(0)) + } + "simd_gather" => { + (3, vec![param(0), param(1), param(2)], param(0)) + } + "simd_scatter" => { + (3, vec![param(0), param(1), param(2)], tcx.mk_nil()) + } "simd_insert" => (2, vec![param(0), tcx.types.u32, param(1)], param(0)), "simd_extract" => (2, vec![param(0), tcx.types.u32], param(1)), "simd_cast" => (2, vec![param(0)], param(1)), diff --git a/src/test/codegen/simd-intrinsic-float-abs.rs b/src/test/codegen/simd-intrinsic-float-abs.rs new file mode 100644 index 00000000000..8fc6b7a4826 --- /dev/null +++ b/src/test/codegen/simd-intrinsic-float-abs.rs @@ -0,0 +1,104 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// ignore-emscripten + +// compile-flags: -C no-prepopulate-passes + +#![crate_type = "lib"] + +#![feature(repr_simd, platform_intrinsics)] +#![allow(non_camel_case_types)] + +#[repr(simd)] +#[derive(Copy, Clone, PartialEq, Debug)] +pub struct f32x2(pub f32, pub f32); + +#[repr(simd)] +#[derive(Copy, Clone, PartialEq, Debug)] +pub struct f32x4(pub f32, pub f32, pub f32, pub f32); + +#[repr(simd)] +#[derive(Copy, Clone, PartialEq, Debug)] +pub struct f32x8(pub f32, pub f32, pub f32, pub f32, + pub f32, pub f32, pub f32, pub f32); + +#[repr(simd)] +#[derive(Copy, Clone, PartialEq, Debug)] +pub struct f32x16(pub f32, pub f32, pub f32, pub f32, + pub f32, pub f32, pub f32, pub f32, + pub f32, pub f32, pub f32, pub f32, + pub f32, pub f32, pub f32, pub f32); + +extern "platform-intrinsic" { + fn simd_fabs(x: T) -> T; +} + +// CHECK-LABEL: @fabs_32x2 +#[no_mangle] +pub unsafe fn fabs_32x2(a: f32x2) -> f32x2 { + // CHECK: call <2 x float> @llvm.fabs.v2f32 + simd_fabs(a) +} + +// CHECK-LABEL: @fabs_32x4 +#[no_mangle] +pub unsafe fn fabs_32x4(a: f32x4) -> f32x4 { + // CHECK: call <4 x float> @llvm.fabs.v4f32 + simd_fabs(a) +} + +// CHECK-LABEL: @fabs_32x8 +#[no_mangle] +pub unsafe fn fabs_32x8(a: f32x8) -> f32x8 { + // CHECK: call <8 x float> @llvm.fabs.v8f32 + simd_fabs(a) +} + +// CHECK-LABEL: @fabs_32x16 +#[no_mangle] +pub unsafe fn fabs_32x16(a: f32x16) -> f32x16 { + // CHECK: call <16 x float> @llvm.fabs.v16f32 + simd_fabs(a) +} + +#[repr(simd)] +#[derive(Copy, Clone, PartialEq, Debug)] +pub struct f64x2(pub f64, pub f64); + +#[repr(simd)] +#[derive(Copy, Clone, PartialEq, Debug)] +pub struct f64x4(pub f64, pub f64, pub f64, pub f64); + +#[repr(simd)] +#[derive(Copy, Clone, PartialEq, Debug)] +pub struct f64x8(pub f64, pub f64, pub f64, pub f64, + pub f64, pub f64, pub f64, pub f64); + +// CHECK-LABEL: @fabs_64x4 +#[no_mangle] +pub unsafe fn fabs_64x4(a: f64x4) -> f64x4 { + // CHECK: call <4 x double> @llvm.fabs.v4f64 + simd_fabs(a) +} + +// CHECK-LABEL: @fabs_64x2 +#[no_mangle] +pub unsafe fn fabs_64x2(a: f64x2) -> f64x2 { + // CHECK: call <2 x double> @llvm.fabs.v2f64 + simd_fabs(a) +} + +// CHECK-LABEL: @fabs_64x8 +#[no_mangle] +pub unsafe fn fabs_64x8(a: f64x8) -> f64x8 { + // CHECK: call <8 x double> @llvm.fabs.v8f64 + simd_fabs(a) +} diff --git a/src/test/codegen/simd-intrinsic-float-ceil.rs b/src/test/codegen/simd-intrinsic-float-ceil.rs new file mode 100644 index 00000000000..aca591b97ab --- /dev/null +++ b/src/test/codegen/simd-intrinsic-float-ceil.rs @@ -0,0 +1,104 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// ignore-emscripten + +// compile-flags: -C no-prepopulate-passes + +#![crate_type = "lib"] + +#![feature(repr_simd, platform_intrinsics)] +#![allow(non_camel_case_types)] + +#[repr(simd)] +#[derive(Copy, Clone, PartialEq, Debug)] +pub struct f32x2(pub f32, pub f32); + +#[repr(simd)] +#[derive(Copy, Clone, PartialEq, Debug)] +pub struct f32x4(pub f32, pub f32, pub f32, pub f32); + +#[repr(simd)] +#[derive(Copy, Clone, PartialEq, Debug)] +pub struct f32x8(pub f32, pub f32, pub f32, pub f32, + pub f32, pub f32, pub f32, pub f32); + +#[repr(simd)] +#[derive(Copy, Clone, PartialEq, Debug)] +pub struct f32x16(pub f32, pub f32, pub f32, pub f32, + pub f32, pub f32, pub f32, pub f32, + pub f32, pub f32, pub f32, pub f32, + pub f32, pub f32, pub f32, pub f32); + +extern "platform-intrinsic" { + fn simd_ceil(x: T) -> T; +} + +// CHECK-LABEL: @ceil_32x2 +#[no_mangle] +pub unsafe fn ceil_32x2(a: f32x2) -> f32x2 { + // CHECK: call <2 x float> @llvm.ceil.v2f32 + simd_ceil(a) +} + +// CHECK-LABEL: @ceil_32x4 +#[no_mangle] +pub unsafe fn ceil_32x4(a: f32x4) -> f32x4 { + // CHECK: call <4 x float> @llvm.ceil.v4f32 + simd_ceil(a) +} + +// CHECK-LABEL: @ceil_32x8 +#[no_mangle] +pub unsafe fn ceil_32x8(a: f32x8) -> f32x8 { + // CHECK: call <8 x float> @llvm.ceil.v8f32 + simd_ceil(a) +} + +// CHECK-LABEL: @ceil_32x16 +#[no_mangle] +pub unsafe fn ceil_32x16(a: f32x16) -> f32x16 { + // CHECK: call <16 x float> @llvm.ceil.v16f32 + simd_ceil(a) +} + +#[repr(simd)] +#[derive(Copy, Clone, PartialEq, Debug)] +pub struct f64x2(pub f64, pub f64); + +#[repr(simd)] +#[derive(Copy, Clone, PartialEq, Debug)] +pub struct f64x4(pub f64, pub f64, pub f64, pub f64); + +#[repr(simd)] +#[derive(Copy, Clone, PartialEq, Debug)] +pub struct f64x8(pub f64, pub f64, pub f64, pub f64, + pub f64, pub f64, pub f64, pub f64); + +// CHECK-LABEL: @ceil_64x4 +#[no_mangle] +pub unsafe fn ceil_64x4(a: f64x4) -> f64x4 { + // CHECK: call <4 x double> @llvm.ceil.v4f64 + simd_ceil(a) +} + +// CHECK-LABEL: @ceil_64x2 +#[no_mangle] +pub unsafe fn ceil_64x2(a: f64x2) -> f64x2 { + // CHECK: call <2 x double> @llvm.ceil.v2f64 + simd_ceil(a) +} + +// CHECK-LABEL: @ceil_64x8 +#[no_mangle] +pub unsafe fn ceil_64x8(a: f64x8) -> f64x8 { + // CHECK: call <8 x double> @llvm.ceil.v8f64 + simd_ceil(a) +} diff --git a/src/test/codegen/simd-intrinsic-float-cos.rs b/src/test/codegen/simd-intrinsic-float-cos.rs new file mode 100644 index 00000000000..77715f23d63 --- /dev/null +++ b/src/test/codegen/simd-intrinsic-float-cos.rs @@ -0,0 +1,104 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// ignore-emscripten + +// compile-flags: -C no-prepopulate-passes + +#![crate_type = "lib"] + +#![feature(repr_simd, platform_intrinsics)] +#![allow(non_camel_case_types)] + +#[repr(simd)] +#[derive(Copy, Clone, PartialEq, Debug)] +pub struct f32x2(pub f32, pub f32); + +#[repr(simd)] +#[derive(Copy, Clone, PartialEq, Debug)] +pub struct f32x4(pub f32, pub f32, pub f32, pub f32); + +#[repr(simd)] +#[derive(Copy, Clone, PartialEq, Debug)] +pub struct f32x8(pub f32, pub f32, pub f32, pub f32, + pub f32, pub f32, pub f32, pub f32); + +#[repr(simd)] +#[derive(Copy, Clone, PartialEq, Debug)] +pub struct f32x16(pub f32, pub f32, pub f32, pub f32, + pub f32, pub f32, pub f32, pub f32, + pub f32, pub f32, pub f32, pub f32, + pub f32, pub f32, pub f32, pub f32); + +extern "platform-intrinsic" { + fn simd_fcos(x: T) -> T; +} + +// CHECK-LABEL: @fcos_32x2 +#[no_mangle] +pub unsafe fn fcos_32x2(a: f32x2) -> f32x2 { + // CHECK: call <2 x float> @llvm.cos.v2f32 + simd_fcos(a) +} + +// CHECK-LABEL: @fcos_32x4 +#[no_mangle] +pub unsafe fn fcos_32x4(a: f32x4) -> f32x4 { + // CHECK: call <4 x float> @llvm.cos.v4f32 + simd_fcos(a) +} + +// CHECK-LABEL: @fcos_32x8 +#[no_mangle] +pub unsafe fn fcos_32x8(a: f32x8) -> f32x8 { + // CHECK: call <8 x float> @llvm.cos.v8f32 + simd_fcos(a) +} + +// CHECK-LABEL: @fcos_32x16 +#[no_mangle] +pub unsafe fn fcos_32x16(a: f32x16) -> f32x16 { + // CHECK: call <16 x float> @llvm.cos.v16f32 + simd_fcos(a) +} + +#[repr(simd)] +#[derive(Copy, Clone, PartialEq, Debug)] +pub struct f64x2(pub f64, pub f64); + +#[repr(simd)] +#[derive(Copy, Clone, PartialEq, Debug)] +pub struct f64x4(pub f64, pub f64, pub f64, pub f64); + +#[repr(simd)] +#[derive(Copy, Clone, PartialEq, Debug)] +pub struct f64x8(pub f64, pub f64, pub f64, pub f64, + pub f64, pub f64, pub f64, pub f64); + +// CHECK-LABEL: @fcos_64x4 +#[no_mangle] +pub unsafe fn fcos_64x4(a: f64x4) -> f64x4 { + // CHECK: call <4 x double> @llvm.cos.v4f64 + simd_fcos(a) +} + +// CHECK-LABEL: @fcos_64x2 +#[no_mangle] +pub unsafe fn fcos_64x2(a: f64x2) -> f64x2 { + // CHECK: call <2 x double> @llvm.cos.v2f64 + simd_fcos(a) +} + +// CHECK-LABEL: @fcos_64x8 +#[no_mangle] +pub unsafe fn fcos_64x8(a: f64x8) -> f64x8 { + // CHECK: call <8 x double> @llvm.cos.v8f64 + simd_fcos(a) +} diff --git a/src/test/codegen/simd-intrinsic-float-exp.rs b/src/test/codegen/simd-intrinsic-float-exp.rs new file mode 100644 index 00000000000..0046ce66860 --- /dev/null +++ b/src/test/codegen/simd-intrinsic-float-exp.rs @@ -0,0 +1,104 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// ignore-emscripten + +// compile-flags: -C no-prepopulate-passes + +#![crate_type = "lib"] + +#![feature(repr_simd, platform_intrinsics)] +#![allow(non_camel_case_types)] + +#[repr(simd)] +#[derive(Copy, Clone, PartialEq, Debug)] +pub struct f32x2(pub f32, pub f32); + +#[repr(simd)] +#[derive(Copy, Clone, PartialEq, Debug)] +pub struct f32x4(pub f32, pub f32, pub f32, pub f32); + +#[repr(simd)] +#[derive(Copy, Clone, PartialEq, Debug)] +pub struct f32x8(pub f32, pub f32, pub f32, pub f32, + pub f32, pub f32, pub f32, pub f32); + +#[repr(simd)] +#[derive(Copy, Clone, PartialEq, Debug)] +pub struct f32x16(pub f32, pub f32, pub f32, pub f32, + pub f32, pub f32, pub f32, pub f32, + pub f32, pub f32, pub f32, pub f32, + pub f32, pub f32, pub f32, pub f32); + +extern "platform-intrinsic" { + fn simd_fexp(x: T) -> T; +} + +// CHECK-LABEL: @exp_32x2 +#[no_mangle] +pub unsafe fn exp_32x2(a: f32x2) -> f32x2 { + // CHECK: call <2 x float> @llvm.exp.v2f32 + simd_fexp(a) +} + +// CHECK-LABEL: @exp_32x4 +#[no_mangle] +pub unsafe fn exp_32x4(a: f32x4) -> f32x4 { + // CHECK: call <4 x float> @llvm.exp.v4f32 + simd_fexp(a) +} + +// CHECK-LABEL: @exp_32x8 +#[no_mangle] +pub unsafe fn exp_32x8(a: f32x8) -> f32x8 { + // CHECK: call <8 x float> @llvm.exp.v8f32 + simd_fexp(a) +} + +// CHECK-LABEL: @exp_32x16 +#[no_mangle] +pub unsafe fn exp_32x16(a: f32x16) -> f32x16 { + // CHECK: call <16 x float> @llvm.exp.v16f32 + simd_fexp(a) +} + +#[repr(simd)] +#[derive(Copy, Clone, PartialEq, Debug)] +pub struct f64x2(pub f64, pub f64); + +#[repr(simd)] +#[derive(Copy, Clone, PartialEq, Debug)] +pub struct f64x4(pub f64, pub f64, pub f64, pub f64); + +#[repr(simd)] +#[derive(Copy, Clone, PartialEq, Debug)] +pub struct f64x8(pub f64, pub f64, pub f64, pub f64, + pub f64, pub f64, pub f64, pub f64); + +// CHECK-LABEL: @exp_64x4 +#[no_mangle] +pub unsafe fn exp_64x4(a: f64x4) -> f64x4 { + // CHECK: call <4 x double> @llvm.exp.v4f64 + simd_fexp(a) +} + +// CHECK-LABEL: @exp_64x2 +#[no_mangle] +pub unsafe fn exp_64x2(a: f64x2) -> f64x2 { + // CHECK: call <2 x double> @llvm.exp.v2f64 + simd_fexp(a) +} + +// CHECK-LABEL: @exp_64x8 +#[no_mangle] +pub unsafe fn exp_64x8(a: f64x8) -> f64x8 { + // CHECK: call <8 x double> @llvm.exp.v8f64 + simd_fexp(a) +} diff --git a/src/test/codegen/simd-intrinsic-float-exp2.rs b/src/test/codegen/simd-intrinsic-float-exp2.rs new file mode 100644 index 00000000000..a93fd7ea65a --- /dev/null +++ b/src/test/codegen/simd-intrinsic-float-exp2.rs @@ -0,0 +1,104 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// ignore-emscripten + +// compile-flags: -C no-prepopulate-passes + +#![crate_type = "lib"] + +#![feature(repr_simd, platform_intrinsics)] +#![allow(non_camel_case_types)] + +#[repr(simd)] +#[derive(Copy, Clone, PartialEq, Debug)] +pub struct f32x2(pub f32, pub f32); + +#[repr(simd)] +#[derive(Copy, Clone, PartialEq, Debug)] +pub struct f32x4(pub f32, pub f32, pub f32, pub f32); + +#[repr(simd)] +#[derive(Copy, Clone, PartialEq, Debug)] +pub struct f32x8(pub f32, pub f32, pub f32, pub f32, + pub f32, pub f32, pub f32, pub f32); + +#[repr(simd)] +#[derive(Copy, Clone, PartialEq, Debug)] +pub struct f32x16(pub f32, pub f32, pub f32, pub f32, + pub f32, pub f32, pub f32, pub f32, + pub f32, pub f32, pub f32, pub f32, + pub f32, pub f32, pub f32, pub f32); + +extern "platform-intrinsic" { + fn simd_fexp2(x: T) -> T; +} + +// CHECK-LABEL: @exp2_32x2 +#[no_mangle] +pub unsafe fn exp2_32x2(a: f32x2) -> f32x2 { + // CHECK: call <2 x float> @llvm.exp2.v2f32 + simd_fexp2(a) +} + +// CHECK-LABEL: @exp2_32x4 +#[no_mangle] +pub unsafe fn exp2_32x4(a: f32x4) -> f32x4 { + // CHECK: call <4 x float> @llvm.exp2.v4f32 + simd_fexp2(a) +} + +// CHECK-LABEL: @exp2_32x8 +#[no_mangle] +pub unsafe fn exp2_32x8(a: f32x8) -> f32x8 { + // CHECK: call <8 x float> @llvm.exp2.v8f32 + simd_fexp2(a) +} + +// CHECK-LABEL: @exp2_32x16 +#[no_mangle] +pub unsafe fn exp2_32x16(a: f32x16) -> f32x16 { + // CHECK: call <16 x float> @llvm.exp2.v16f32 + simd_fexp2(a) +} + +#[repr(simd)] +#[derive(Copy, Clone, PartialEq, Debug)] +pub struct f64x2(pub f64, pub f64); + +#[repr(simd)] +#[derive(Copy, Clone, PartialEq, Debug)] +pub struct f64x4(pub f64, pub f64, pub f64, pub f64); + +#[repr(simd)] +#[derive(Copy, Clone, PartialEq, Debug)] +pub struct f64x8(pub f64, pub f64, pub f64, pub f64, + pub f64, pub f64, pub f64, pub f64); + +// CHECK-LABEL: @exp2_64x4 +#[no_mangle] +pub unsafe fn exp2_64x4(a: f64x4) -> f64x4 { + // CHECK: call <4 x double> @llvm.exp2.v4f64 + simd_fexp2(a) +} + +// CHECK-LABEL: @exp2_64x2 +#[no_mangle] +pub unsafe fn exp2_64x2(a: f64x2) -> f64x2 { + // CHECK: call <2 x double> @llvm.exp2.v2f64 + simd_fexp2(a) +} + +// CHECK-LABEL: @exp2_64x8 +#[no_mangle] +pub unsafe fn exp2_64x8(a: f64x8) -> f64x8 { + // CHECK: call <8 x double> @llvm.exp2.v8f64 + simd_fexp2(a) +} diff --git a/src/test/codegen/simd-intrinsic-float-floor.rs b/src/test/codegen/simd-intrinsic-float-floor.rs new file mode 100644 index 00000000000..dfea41869de --- /dev/null +++ b/src/test/codegen/simd-intrinsic-float-floor.rs @@ -0,0 +1,104 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// ignore-emscripten + +// compile-flags: -C no-prepopulate-passes + +#![crate_type = "lib"] + +#![feature(repr_simd, platform_intrinsics)] +#![allow(non_camel_case_types)] + +#[repr(simd)] +#[derive(Copy, Clone, PartialEq, Debug)] +pub struct f32x2(pub f32, pub f32); + +#[repr(simd)] +#[derive(Copy, Clone, PartialEq, Debug)] +pub struct f32x4(pub f32, pub f32, pub f32, pub f32); + +#[repr(simd)] +#[derive(Copy, Clone, PartialEq, Debug)] +pub struct f32x8(pub f32, pub f32, pub f32, pub f32, + pub f32, pub f32, pub f32, pub f32); + +#[repr(simd)] +#[derive(Copy, Clone, PartialEq, Debug)] +pub struct f32x16(pub f32, pub f32, pub f32, pub f32, + pub f32, pub f32, pub f32, pub f32, + pub f32, pub f32, pub f32, pub f32, + pub f32, pub f32, pub f32, pub f32); + +extern "platform-intrinsic" { + fn simd_floor(x: T) -> T; +} + +// CHECK-LABEL: @floor_32x2 +#[no_mangle] +pub unsafe fn floor_32x2(a: f32x2) -> f32x2 { + // CHECK: call <2 x float> @llvm.floor.v2f32 + simd_floor(a) +} + +// CHECK-LABEL: @floor_32x4 +#[no_mangle] +pub unsafe fn floor_32x4(a: f32x4) -> f32x4 { + // CHECK: call <4 x float> @llvm.floor.v4f32 + simd_floor(a) +} + +// CHECK-LABEL: @floor_32x8 +#[no_mangle] +pub unsafe fn floor_32x8(a: f32x8) -> f32x8 { + // CHECK: call <8 x float> @llvm.floor.v8f32 + simd_floor(a) +} + +// CHECK-LABEL: @floor_32x16 +#[no_mangle] +pub unsafe fn floor_32x16(a: f32x16) -> f32x16 { + // CHECK: call <16 x float> @llvm.floor.v16f32 + simd_floor(a) +} + +#[repr(simd)] +#[derive(Copy, Clone, PartialEq, Debug)] +pub struct f64x2(pub f64, pub f64); + +#[repr(simd)] +#[derive(Copy, Clone, PartialEq, Debug)] +pub struct f64x4(pub f64, pub f64, pub f64, pub f64); + +#[repr(simd)] +#[derive(Copy, Clone, PartialEq, Debug)] +pub struct f64x8(pub f64, pub f64, pub f64, pub f64, + pub f64, pub f64, pub f64, pub f64); + +// CHECK-LABEL: @floor_64x4 +#[no_mangle] +pub unsafe fn floor_64x4(a: f64x4) -> f64x4 { + // CHECK: call <4 x double> @llvm.floor.v4f64 + simd_floor(a) +} + +// CHECK-LABEL: @floor_64x2 +#[no_mangle] +pub unsafe fn floor_64x2(a: f64x2) -> f64x2 { + // CHECK: call <2 x double> @llvm.floor.v2f64 + simd_floor(a) +} + +// CHECK-LABEL: @floor_64x8 +#[no_mangle] +pub unsafe fn floor_64x8(a: f64x8) -> f64x8 { + // CHECK: call <8 x double> @llvm.floor.v8f64 + simd_floor(a) +} diff --git a/src/test/codegen/simd-intrinsic-float-fma.rs b/src/test/codegen/simd-intrinsic-float-fma.rs new file mode 100644 index 00000000000..02f6d0ff75e --- /dev/null +++ b/src/test/codegen/simd-intrinsic-float-fma.rs @@ -0,0 +1,104 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// ignore-emscripten + +// compile-flags: -C no-prepopulate-passes + +#![crate_type = "lib"] + +#![feature(repr_simd, platform_intrinsics)] +#![allow(non_camel_case_types)] + +#[repr(simd)] +#[derive(Copy, Clone, PartialEq, Debug)] +pub struct f32x2(pub f32, pub f32); + +#[repr(simd)] +#[derive(Copy, Clone, PartialEq, Debug)] +pub struct f32x4(pub f32, pub f32, pub f32, pub f32); + +#[repr(simd)] +#[derive(Copy, Clone, PartialEq, Debug)] +pub struct f32x8(pub f32, pub f32, pub f32, pub f32, + pub f32, pub f32, pub f32, pub f32); + +#[repr(simd)] +#[derive(Copy, Clone, PartialEq, Debug)] +pub struct f32x16(pub f32, pub f32, pub f32, pub f32, + pub f32, pub f32, pub f32, pub f32, + pub f32, pub f32, pub f32, pub f32, + pub f32, pub f32, pub f32, pub f32); + +extern "platform-intrinsic" { + fn simd_fma(x: T, b: T, c: T) -> T; +} + +// CHECK-LABEL: @fma_32x2 +#[no_mangle] +pub unsafe fn fma_32x2(a: f32x2, b: f32x2, c: f32x2) -> f32x2 { + // CHECK: call <2 x float> @llvm.fma.v2f32 + simd_fma(a, b, c) +} + +// CHECK-LABEL: @fma_32x4 +#[no_mangle] +pub unsafe fn fma_32x4(a: f32x4, b: f32x4, c: f32x4) -> f32x4 { + // CHECK: call <4 x float> @llvm.fma.v4f32 + simd_fma(a, b, c) +} + +// CHECK-LABEL: @fma_32x8 +#[no_mangle] +pub unsafe fn fma_32x8(a: f32x8, b: f32x8, c: f32x8) -> f32x8 { + // CHECK: call <8 x float> @llvm.fma.v8f32 + simd_fma(a, b, c) +} + +// CHECK-LABEL: @fma_32x16 +#[no_mangle] +pub unsafe fn fma_32x16(a: f32x16, b: f32x16, c: f32x16) -> f32x16 { + // CHECK: call <16 x float> @llvm.fma.v16f32 + simd_fma(a, b, c) +} + +#[repr(simd)] +#[derive(Copy, Clone, PartialEq, Debug)] +pub struct f64x2(pub f64, pub f64); + +#[repr(simd)] +#[derive(Copy, Clone, PartialEq, Debug)] +pub struct f64x4(pub f64, pub f64, pub f64, pub f64); + +#[repr(simd)] +#[derive(Copy, Clone, PartialEq, Debug)] +pub struct f64x8(pub f64, pub f64, pub f64, pub f64, + pub f64, pub f64, pub f64, pub f64); + +// CHECK-LABEL: @fma_64x4 +#[no_mangle] +pub unsafe fn fma_64x4(a: f64x4, b: f64x4, c: f64x4) -> f64x4 { + // CHECK: call <4 x double> @llvm.fma.v4f64 + simd_fma(a, b, c) +} + +// CHECK-LABEL: @fma_64x2 +#[no_mangle] +pub unsafe fn fma_64x2(a: f64x2, b: f64x2, c: f64x2) -> f64x2 { + // CHECK: call <2 x double> @llvm.fma.v2f64 + simd_fma(a, b, c) +} + +// CHECK-LABEL: @fma_64x8 +#[no_mangle] +pub unsafe fn fma_64x8(a: f64x8, b: f64x8, c: f64x8) -> f64x8 { + // CHECK: call <8 x double> @llvm.fma.v8f64 + simd_fma(a, b, c) +} diff --git a/src/test/codegen/simd-intrinsic-float-fsqrt.rs b/src/test/codegen/simd-intrinsic-float-fsqrt.rs new file mode 100644 index 00000000000..d8fc3e3675b --- /dev/null +++ b/src/test/codegen/simd-intrinsic-float-fsqrt.rs @@ -0,0 +1,104 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// ignore-emscripten + +// compile-flags: -C no-prepopulate-passes + +#![crate_type = "lib"] + +#![feature(repr_simd, platform_intrinsics)] +#![allow(non_camel_case_types)] + +#[repr(simd)] +#[derive(Copy, Clone, PartialEq, Debug)] +pub struct f32x2(pub f32, pub f32); + +#[repr(simd)] +#[derive(Copy, Clone, PartialEq, Debug)] +pub struct f32x4(pub f32, pub f32, pub f32, pub f32); + +#[repr(simd)] +#[derive(Copy, Clone, PartialEq, Debug)] +pub struct f32x8(pub f32, pub f32, pub f32, pub f32, + pub f32, pub f32, pub f32, pub f32); + +#[repr(simd)] +#[derive(Copy, Clone, PartialEq, Debug)] +pub struct f32x16(pub f32, pub f32, pub f32, pub f32, + pub f32, pub f32, pub f32, pub f32, + pub f32, pub f32, pub f32, pub f32, + pub f32, pub f32, pub f32, pub f32); + +extern "platform-intrinsic" { + fn simd_fsqrt(x: T) -> T; +} + +// CHECK-LABEL: @fsqrt_32x2 +#[no_mangle] +pub unsafe fn fsqrt_32x2(a: f32x2) -> f32x2 { + // CHECK: call <2 x float> @llvm.sqrt.v2f32 + simd_fsqrt(a) +} + +// CHECK-LABEL: @fsqrt_32x4 +#[no_mangle] +pub unsafe fn fsqrt_32x4(a: f32x4) -> f32x4 { + // CHECK: call <4 x float> @llvm.sqrt.v4f32 + simd_fsqrt(a) +} + +// CHECK-LABEL: @fsqrt_32x8 +#[no_mangle] +pub unsafe fn fsqrt_32x8(a: f32x8) -> f32x8 { + // CHECK: call <8 x float> @llvm.sqrt.v8f32 + simd_fsqrt(a) +} + +// CHECK-LABEL: @fsqrt_32x16 +#[no_mangle] +pub unsafe fn fsqrt_32x16(a: f32x16) -> f32x16 { + // CHECK: call <16 x float> @llvm.sqrt.v16f32 + simd_fsqrt(a) +} + +#[repr(simd)] +#[derive(Copy, Clone, PartialEq, Debug)] +pub struct f64x2(pub f64, pub f64); + +#[repr(simd)] +#[derive(Copy, Clone, PartialEq, Debug)] +pub struct f64x4(pub f64, pub f64, pub f64, pub f64); + +#[repr(simd)] +#[derive(Copy, Clone, PartialEq, Debug)] +pub struct f64x8(pub f64, pub f64, pub f64, pub f64, + pub f64, pub f64, pub f64, pub f64); + +// CHECK-LABEL: @fsqrt_64x4 +#[no_mangle] +pub unsafe fn fsqrt_64x4(a: f64x4) -> f64x4 { + // CHECK: call <4 x double> @llvm.sqrt.v4f64 + simd_fsqrt(a) +} + +// CHECK-LABEL: @fsqrt_64x2 +#[no_mangle] +pub unsafe fn fsqrt_64x2(a: f64x2) -> f64x2 { + // CHECK: call <2 x double> @llvm.sqrt.v2f64 + simd_fsqrt(a) +} + +// CHECK-LABEL: @fsqrt_64x8 +#[no_mangle] +pub unsafe fn fsqrt_64x8(a: f64x8) -> f64x8 { + // CHECK: call <8 x double> @llvm.sqrt.v8f64 + simd_fsqrt(a) +} diff --git a/src/test/codegen/simd-intrinsic-float-log10.rs b/src/test/codegen/simd-intrinsic-float-log10.rs new file mode 100644 index 00000000000..0e094ac5b6e --- /dev/null +++ b/src/test/codegen/simd-intrinsic-float-log10.rs @@ -0,0 +1,104 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// ignore-emscripten + +// compile-flags: -C no-prepopulate-passes + +#![crate_type = "lib"] + +#![feature(repr_simd, platform_intrinsics)] +#![allow(non_camel_case_types)] + +#[repr(simd)] +#[derive(Copy, Clone, PartialEq, Debug)] +pub struct f32x2(pub f32, pub f32); + +#[repr(simd)] +#[derive(Copy, Clone, PartialEq, Debug)] +pub struct f32x4(pub f32, pub f32, pub f32, pub f32); + +#[repr(simd)] +#[derive(Copy, Clone, PartialEq, Debug)] +pub struct f32x8(pub f32, pub f32, pub f32, pub f32, + pub f32, pub f32, pub f32, pub f32); + +#[repr(simd)] +#[derive(Copy, Clone, PartialEq, Debug)] +pub struct f32x16(pub f32, pub f32, pub f32, pub f32, + pub f32, pub f32, pub f32, pub f32, + pub f32, pub f32, pub f32, pub f32, + pub f32, pub f32, pub f32, pub f32); + +extern "platform-intrinsic" { + fn simd_flog10(x: T) -> T; +} + +// CHECK-LABEL: @log10_32x2 +#[no_mangle] +pub unsafe fn log10_32x2(a: f32x2) -> f32x2 { + // CHECK: call <2 x float> @llvm.log10.v2f32 + simd_flog10(a) +} + +// CHECK-LABEL: @log10_32x4 +#[no_mangle] +pub unsafe fn log10_32x4(a: f32x4) -> f32x4 { + // CHECK: call <4 x float> @llvm.log10.v4f32 + simd_flog10(a) +} + +// CHECK-LABEL: @log10_32x8 +#[no_mangle] +pub unsafe fn log10_32x8(a: f32x8) -> f32x8 { + // CHECK: call <8 x float> @llvm.log10.v8f32 + simd_flog10(a) +} + +// CHECK-LABEL: @log10_32x16 +#[no_mangle] +pub unsafe fn log10_32x16(a: f32x16) -> f32x16 { + // CHECK: call <16 x float> @llvm.log10.v16f32 + simd_flog10(a) +} + +#[repr(simd)] +#[derive(Copy, Clone, PartialEq, Debug)] +pub struct f64x2(pub f64, pub f64); + +#[repr(simd)] +#[derive(Copy, Clone, PartialEq, Debug)] +pub struct f64x4(pub f64, pub f64, pub f64, pub f64); + +#[repr(simd)] +#[derive(Copy, Clone, PartialEq, Debug)] +pub struct f64x8(pub f64, pub f64, pub f64, pub f64, + pub f64, pub f64, pub f64, pub f64); + +// CHECK-LABEL: @log10_64x4 +#[no_mangle] +pub unsafe fn log10_64x4(a: f64x4) -> f64x4 { + // CHECK: call <4 x double> @llvm.log10.v4f64 + simd_flog10(a) +} + +// CHECK-LABEL: @log10_64x2 +#[no_mangle] +pub unsafe fn log10_64x2(a: f64x2) -> f64x2 { + // CHECK: call <2 x double> @llvm.log10.v2f64 + simd_flog10(a) +} + +// CHECK-LABEL: @log10_64x8 +#[no_mangle] +pub unsafe fn log10_64x8(a: f64x8) -> f64x8 { + // CHECK: call <8 x double> @llvm.log10.v8f64 + simd_flog10(a) +} diff --git a/src/test/codegen/simd-intrinsic-float-log2.rs b/src/test/codegen/simd-intrinsic-float-log2.rs new file mode 100644 index 00000000000..da1207fc7ee --- /dev/null +++ b/src/test/codegen/simd-intrinsic-float-log2.rs @@ -0,0 +1,104 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// ignore-emscripten + +// compile-flags: -C no-prepopulate-passes + +#![crate_type = "lib"] + +#![feature(repr_simd, platform_intrinsics)] +#![allow(non_camel_case_types)] + +#[repr(simd)] +#[derive(Copy, Clone, PartialEq, Debug)] +pub struct f32x2(pub f32, pub f32); + +#[repr(simd)] +#[derive(Copy, Clone, PartialEq, Debug)] +pub struct f32x4(pub f32, pub f32, pub f32, pub f32); + +#[repr(simd)] +#[derive(Copy, Clone, PartialEq, Debug)] +pub struct f32x8(pub f32, pub f32, pub f32, pub f32, + pub f32, pub f32, pub f32, pub f32); + +#[repr(simd)] +#[derive(Copy, Clone, PartialEq, Debug)] +pub struct f32x16(pub f32, pub f32, pub f32, pub f32, + pub f32, pub f32, pub f32, pub f32, + pub f32, pub f32, pub f32, pub f32, + pub f32, pub f32, pub f32, pub f32); + +extern "platform-intrinsic" { + fn simd_flog2(x: T) -> T; +} + +// CHECK-LABEL: @log2_32x2 +#[no_mangle] +pub unsafe fn log2_32x2(a: f32x2) -> f32x2 { + // CHECK: call <2 x float> @llvm.log2.v2f32 + simd_flog2(a) +} + +// CHECK-LABEL: @log2_32x4 +#[no_mangle] +pub unsafe fn log2_32x4(a: f32x4) -> f32x4 { + // CHECK: call <4 x float> @llvm.log2.v4f32 + simd_flog2(a) +} + +// CHECK-LABEL: @log2_32x8 +#[no_mangle] +pub unsafe fn log2_32x8(a: f32x8) -> f32x8 { + // CHECK: call <8 x float> @llvm.log2.v8f32 + simd_flog2(a) +} + +// CHECK-LABEL: @log2_32x16 +#[no_mangle] +pub unsafe fn log2_32x16(a: f32x16) -> f32x16 { + // CHECK: call <16 x float> @llvm.log2.v16f32 + simd_flog2(a) +} + +#[repr(simd)] +#[derive(Copy, Clone, PartialEq, Debug)] +pub struct f64x2(pub f64, pub f64); + +#[repr(simd)] +#[derive(Copy, Clone, PartialEq, Debug)] +pub struct f64x4(pub f64, pub f64, pub f64, pub f64); + +#[repr(simd)] +#[derive(Copy, Clone, PartialEq, Debug)] +pub struct f64x8(pub f64, pub f64, pub f64, pub f64, + pub f64, pub f64, pub f64, pub f64); + +// CHECK-LABEL: @log2_64x4 +#[no_mangle] +pub unsafe fn log2_64x4(a: f64x4) -> f64x4 { + // CHECK: call <4 x double> @llvm.log2.v4f64 + simd_flog2(a) +} + +// CHECK-LABEL: @log2_64x2 +#[no_mangle] +pub unsafe fn log2_64x2(a: f64x2) -> f64x2 { + // CHECK: call <2 x double> @llvm.log2.v2f64 + simd_flog2(a) +} + +// CHECK-LABEL: @log2_64x8 +#[no_mangle] +pub unsafe fn log2_64x8(a: f64x8) -> f64x8 { + // CHECK: call <8 x double> @llvm.log2.v8f64 + simd_flog2(a) +} diff --git a/src/test/codegen/simd-intrinsic-float-pow.rs b/src/test/codegen/simd-intrinsic-float-pow.rs new file mode 100644 index 00000000000..5817dd49f4d --- /dev/null +++ b/src/test/codegen/simd-intrinsic-float-pow.rs @@ -0,0 +1,104 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// ignore-emscripten + +// compile-flags: -C no-prepopulate-passes + +#![crate_type = "lib"] + +#![feature(repr_simd, platform_intrinsics)] +#![allow(non_camel_case_types)] + +#[repr(simd)] +#[derive(Copy, Clone, PartialEq, Debug)] +pub struct f32x2(pub f32, pub f32); + +#[repr(simd)] +#[derive(Copy, Clone, PartialEq, Debug)] +pub struct f32x4(pub f32, pub f32, pub f32, pub f32); + +#[repr(simd)] +#[derive(Copy, Clone, PartialEq, Debug)] +pub struct f32x8(pub f32, pub f32, pub f32, pub f32, + pub f32, pub f32, pub f32, pub f32); + +#[repr(simd)] +#[derive(Copy, Clone, PartialEq, Debug)] +pub struct f32x16(pub f32, pub f32, pub f32, pub f32, + pub f32, pub f32, pub f32, pub f32, + pub f32, pub f32, pub f32, pub f32, + pub f32, pub f32, pub f32, pub f32); + +extern "platform-intrinsic" { + fn simd_fpow(x: T, b: T) -> T; +} + +// CHECK-LABEL: @fpow_32x2 +#[no_mangle] +pub unsafe fn fpow_32x2(a: f32x2, b: f32x2) -> f32x2 { + // CHECK: call <2 x float> @llvm.pow.v2f32 + simd_fpow(a, b) +} + +// CHECK-LABEL: @fpow_32x4 +#[no_mangle] +pub unsafe fn fpow_32x4(a: f32x4, b: f32x4) -> f32x4 { + // CHECK: call <4 x float> @llvm.pow.v4f32 + simd_fpow(a, b) +} + +// CHECK-LABEL: @fpow_32x8 +#[no_mangle] +pub unsafe fn fpow_32x8(a: f32x8, b: f32x8) -> f32x8 { + // CHECK: call <8 x float> @llvm.pow.v8f32 + simd_fpow(a, b) +} + +// CHECK-LABEL: @fpow_32x16 +#[no_mangle] +pub unsafe fn fpow_32x16(a: f32x16, b: f32x16) -> f32x16 { + // CHECK: call <16 x float> @llvm.pow.v16f32 + simd_fpow(a, b) +} + +#[repr(simd)] +#[derive(Copy, Clone, PartialEq, Debug)] +pub struct f64x2(pub f64, pub f64); + +#[repr(simd)] +#[derive(Copy, Clone, PartialEq, Debug)] +pub struct f64x4(pub f64, pub f64, pub f64, pub f64); + +#[repr(simd)] +#[derive(Copy, Clone, PartialEq, Debug)] +pub struct f64x8(pub f64, pub f64, pub f64, pub f64, + pub f64, pub f64, pub f64, pub f64); + +// CHECK-LABEL: @fpow_64x4 +#[no_mangle] +pub unsafe fn fpow_64x4(a: f64x4, b: f64x4) -> f64x4 { + // CHECK: call <4 x double> @llvm.pow.v4f64 + simd_fpow(a, b) +} + +// CHECK-LABEL: @fpow_64x2 +#[no_mangle] +pub unsafe fn fpow_64x2(a: f64x2, b: f64x2) -> f64x2 { + // CHECK: call <2 x double> @llvm.pow.v2f64 + simd_fpow(a, b) +} + +// CHECK-LABEL: @fpow_64x8 +#[no_mangle] +pub unsafe fn fpow_64x8(a: f64x8, b: f64x8) -> f64x8 { + // CHECK: call <8 x double> @llvm.pow.v8f64 + simd_fpow(a, b) +} diff --git a/src/test/codegen/simd-intrinsic-float-powi.rs b/src/test/codegen/simd-intrinsic-float-powi.rs new file mode 100644 index 00000000000..2db5689f623 --- /dev/null +++ b/src/test/codegen/simd-intrinsic-float-powi.rs @@ -0,0 +1,104 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// ignore-emscripten + +// compile-flags: -C no-prepopulate-passes + +#![crate_type = "lib"] + +#![feature(repr_simd, platform_intrinsics)] +#![allow(non_camel_case_types)] + +#[repr(simd)] +#[derive(Copy, Clone, PartialEq, Debug)] +pub struct f32x2(pub f32, pub f32); + +#[repr(simd)] +#[derive(Copy, Clone, PartialEq, Debug)] +pub struct f32x4(pub f32, pub f32, pub f32, pub f32); + +#[repr(simd)] +#[derive(Copy, Clone, PartialEq, Debug)] +pub struct f32x8(pub f32, pub f32, pub f32, pub f32, + pub f32, pub f32, pub f32, pub f32); + +#[repr(simd)] +#[derive(Copy, Clone, PartialEq, Debug)] +pub struct f32x16(pub f32, pub f32, pub f32, pub f32, + pub f32, pub f32, pub f32, pub f32, + pub f32, pub f32, pub f32, pub f32, + pub f32, pub f32, pub f32, pub f32); + +extern "platform-intrinsic" { + fn simd_fpowi(x: T, b: i32) -> T; +} + +// CHECK-LABEL: @fpowi_32x2 +#[no_mangle] +pub unsafe fn fpowi_32x2(a: f32x2, b: i32) -> f32x2 { + // CHECK: call <2 x float> @llvm.powi.v2f32 + simd_fpowi(a, b) +} + +// CHECK-LABEL: @fpowi_32x4 +#[no_mangle] +pub unsafe fn fpowi_32x4(a: f32x4, b: i32) -> f32x4 { + // CHECK: call <4 x float> @llvm.powi.v4f32 + simd_fpowi(a, b) +} + +// CHECK-LABEL: @fpowi_32x8 +#[no_mangle] +pub unsafe fn fpowi_32x8(a: f32x8, b: i32) -> f32x8 { + // CHECK: call <8 x float> @llvm.powi.v8f32 + simd_fpowi(a, b) +} + +// CHECK-LABEL: @fpowi_32x16 +#[no_mangle] +pub unsafe fn fpowi_32x16(a: f32x16, b: i32) -> f32x16 { + // CHECK: call <16 x float> @llvm.powi.v16f32 + simd_fpowi(a, b) +} + +#[repr(simd)] +#[derive(Copy, Clone, PartialEq, Debug)] +pub struct f64x2(pub f64, pub f64); + +#[repr(simd)] +#[derive(Copy, Clone, PartialEq, Debug)] +pub struct f64x4(pub f64, pub f64, pub f64, pub f64); + +#[repr(simd)] +#[derive(Copy, Clone, PartialEq, Debug)] +pub struct f64x8(pub f64, pub f64, pub f64, pub f64, + pub f64, pub f64, pub f64, pub f64); + +// CHECK-LABEL: @fpowi_64x4 +#[no_mangle] +pub unsafe fn fpowi_64x4(a: f64x4, b: i32) -> f64x4 { + // CHECK: call <4 x double> @llvm.powi.v4f64 + simd_fpowi(a, b) +} + +// CHECK-LABEL: @fpowi_64x2 +#[no_mangle] +pub unsafe fn fpowi_64x2(a: f64x2, b: i32) -> f64x2 { + // CHECK: call <2 x double> @llvm.powi.v2f64 + simd_fpowi(a, b) +} + +// CHECK-LABEL: @fpowi_64x8 +#[no_mangle] +pub unsafe fn fpowi_64x8(a: f64x8, b: i32) -> f64x8 { + // CHECK: call <8 x double> @llvm.powi.v8f64 + simd_fpowi(a, b) +} diff --git a/src/test/codegen/simd-intrinsic-float-sin.rs b/src/test/codegen/simd-intrinsic-float-sin.rs new file mode 100644 index 00000000000..15720462db7 --- /dev/null +++ b/src/test/codegen/simd-intrinsic-float-sin.rs @@ -0,0 +1,104 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// ignore-emscripten + +// compile-flags: -C no-prepopulate-passes + +#![crate_type = "lib"] + +#![feature(repr_simd, platform_intrinsics)] +#![allow(non_camel_case_types)] + +#[repr(simd)] +#[derive(Copy, Clone, PartialEq, Debug)] +pub struct f32x2(pub f32, pub f32); + +#[repr(simd)] +#[derive(Copy, Clone, PartialEq, Debug)] +pub struct f32x4(pub f32, pub f32, pub f32, pub f32); + +#[repr(simd)] +#[derive(Copy, Clone, PartialEq, Debug)] +pub struct f32x8(pub f32, pub f32, pub f32, pub f32, + pub f32, pub f32, pub f32, pub f32); + +#[repr(simd)] +#[derive(Copy, Clone, PartialEq, Debug)] +pub struct f32x16(pub f32, pub f32, pub f32, pub f32, + pub f32, pub f32, pub f32, pub f32, + pub f32, pub f32, pub f32, pub f32, + pub f32, pub f32, pub f32, pub f32); + +extern "platform-intrinsic" { + fn simd_fsin(x: T) -> T; +} + +// CHECK-LABEL: @fsin_32x2 +#[no_mangle] +pub unsafe fn fsin_32x2(a: f32x2) -> f32x2 { + // CHECK: call <2 x float> @llvm.sin.v2f32 + simd_fsin(a) +} + +// CHECK-LABEL: @fsin_32x4 +#[no_mangle] +pub unsafe fn fsin_32x4(a: f32x4) -> f32x4 { + // CHECK: call <4 x float> @llvm.sin.v4f32 + simd_fsin(a) +} + +// CHECK-LABEL: @fsin_32x8 +#[no_mangle] +pub unsafe fn fsin_32x8(a: f32x8) -> f32x8 { + // CHECK: call <8 x float> @llvm.sin.v8f32 + simd_fsin(a) +} + +// CHECK-LABEL: @fsin_32x16 +#[no_mangle] +pub unsafe fn fsin_32x16(a: f32x16) -> f32x16 { + // CHECK: call <16 x float> @llvm.sin.v16f32 + simd_fsin(a) +} + +#[repr(simd)] +#[derive(Copy, Clone, PartialEq, Debug)] +pub struct f64x2(pub f64, pub f64); + +#[repr(simd)] +#[derive(Copy, Clone, PartialEq, Debug)] +pub struct f64x4(pub f64, pub f64, pub f64, pub f64); + +#[repr(simd)] +#[derive(Copy, Clone, PartialEq, Debug)] +pub struct f64x8(pub f64, pub f64, pub f64, pub f64, + pub f64, pub f64, pub f64, pub f64); + +// CHECK-LABEL: @fsin_64x4 +#[no_mangle] +pub unsafe fn fsin_64x4(a: f64x4) -> f64x4 { + // CHECK: call <4 x double> @llvm.sin.v4f64 + simd_fsin(a) +} + +// CHECK-LABEL: @fsin_64x2 +#[no_mangle] +pub unsafe fn fsin_64x2(a: f64x2) -> f64x2 { + // CHECK: call <2 x double> @llvm.sin.v2f64 + simd_fsin(a) +} + +// CHECK-LABEL: @fsin_64x8 +#[no_mangle] +pub unsafe fn fsin_64x8(a: f64x8) -> f64x8 { + // CHECK: call <8 x double> @llvm.sin.v8f64 + simd_fsin(a) +} diff --git a/src/test/codegen/simd-intrinsic-generic-gather.rs b/src/test/codegen/simd-intrinsic-generic-gather.rs new file mode 100644 index 00000000000..58876d21978 --- /dev/null +++ b/src/test/codegen/simd-intrinsic-generic-gather.rs @@ -0,0 +1,46 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// ignore-emscripten + +// compile-flags: -C no-prepopulate-passes + +#![crate_type = "lib"] + +#![feature(repr_simd, platform_intrinsics)] +#![allow(non_camel_case_types)] + +#[repr(simd)] +#[derive(Copy, Clone, PartialEq, Debug)] +pub struct Vec2(pub T, pub T); + +#[repr(simd)] +#[derive(Copy, Clone, PartialEq, Debug)] +pub struct Vec4(pub T, pub T, pub T, pub T); + +extern "platform-intrinsic" { + fn simd_gather(value: T, pointers: P, mask: M) -> T; +} + +// CHECK-LABEL: @gather_f32x2 +#[no_mangle] +pub unsafe fn gather_f32x2(pointers: Vec2<*const f32>, mask: Vec2, + values: Vec2) -> Vec2 { + // CHECK: call <2 x float> @llvm.masked.gather.v2f32.v2p0f32(<2 x float*> {{.*}}, i32 {{.*}}, <2 x i1> {{.*}}, <2 x float> {{.*}}) + simd_gather(values, pointers, mask) +} + +// CHECK-LABEL: @gather_pf32x2 +#[no_mangle] +pub unsafe fn gather_pf32x2(pointers: Vec2<*const *const f32>, mask: Vec2, + values: Vec2<*const f32>) -> Vec2<*const f32> { + // CHECK: call <2 x float*> @llvm.masked.gather.v2p0f32.v2p0p0f32(<2 x float**> {{.*}}, i32 {{.*}}, <2 x i1> {{.*}}, <2 x float*> {{.*}}) + simd_gather(values, pointers, mask) +} diff --git a/src/test/codegen/simd-intrinsic-generic-scatter.rs b/src/test/codegen/simd-intrinsic-generic-scatter.rs new file mode 100644 index 00000000000..44bb4b71259 --- /dev/null +++ b/src/test/codegen/simd-intrinsic-generic-scatter.rs @@ -0,0 +1,46 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// ignore-emscripten + +// compile-flags: -C no-prepopulate-passes + +#![crate_type = "lib"] + +#![feature(repr_simd, platform_intrinsics)] +#![allow(non_camel_case_types)] + +#[repr(simd)] +#[derive(Copy, Clone, PartialEq, Debug)] +pub struct Vec2(pub T, pub T); + +#[repr(simd)] +#[derive(Copy, Clone, PartialEq, Debug)] +pub struct Vec4(pub T, pub T, pub T, pub T); + +extern "platform-intrinsic" { + fn simd_scatter(value: T, pointers: P, mask: M); +} + +// CHECK-LABEL: @scatter_f32x2 +#[no_mangle] +pub unsafe fn scatter_f32x2(pointers: Vec2<*mut f32>, mask: Vec2, + values: Vec2) { + // CHECK: call void @llvm.masked.scatter.v2f32.v2p0f32(<2 x float> {{.*}}, <2 x float*> {{.*}}, i32 {{.*}}, <2 x i1> {{.*}}) + simd_scatter(values, pointers, mask) +} + +// CHECK-LABEL: @scatter_pf32x2 +#[no_mangle] +pub unsafe fn scatter_pf32x2(pointers: Vec2<*mut *const f32>, mask: Vec2, + values: Vec2<*const f32>) { + // CHECK: call void @llvm.masked.scatter.v2p0f32.v2p0p0f32(<2 x float*> {{.*}}, <2 x float**> {{.*}}, i32 {{.*}}, <2 x i1> {{.*}}) + simd_scatter(values, pointers, mask) +}