1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 #![allow(non_upper_case_globals)]
13 use intrinsics::{self, Intrinsic};
16 use abi::{Abi, FnType, PassMode};
17 use mir::place::{PlaceRef, Alignment};
18 use mir::operand::{OperandRef, OperandValue};
24 use type_of::LayoutLlvmExt;
25 use rustc::ty::{self, Ty};
26 use rustc::ty::layout::{HasDataLayout, LayoutOf};
29 use syntax::symbol::Symbol;
32 use rustc::session::Session;
35 use std::cmp::Ordering;
38 fn get_simple_intrinsic(ccx: &CrateContext, name: &str) -> Option<ValueRef> {
39 let llvm_name = match name {
40 "sqrtf32" => "llvm.sqrt.f32",
41 "sqrtf64" => "llvm.sqrt.f64",
42 "powif32" => "llvm.powi.f32",
43 "powif64" => "llvm.powi.f64",
44 "sinf32" => "llvm.sin.f32",
45 "sinf64" => "llvm.sin.f64",
46 "cosf32" => "llvm.cos.f32",
47 "cosf64" => "llvm.cos.f64",
48 "powf32" => "llvm.pow.f32",
49 "powf64" => "llvm.pow.f64",
50 "expf32" => "llvm.exp.f32",
51 "expf64" => "llvm.exp.f64",
52 "exp2f32" => "llvm.exp2.f32",
53 "exp2f64" => "llvm.exp2.f64",
54 "logf32" => "llvm.log.f32",
55 "logf64" => "llvm.log.f64",
56 "log10f32" => "llvm.log10.f32",
57 "log10f64" => "llvm.log10.f64",
58 "log2f32" => "llvm.log2.f32",
59 "log2f64" => "llvm.log2.f64",
60 "fmaf32" => "llvm.fma.f32",
61 "fmaf64" => "llvm.fma.f64",
62 "fabsf32" => "llvm.fabs.f32",
63 "fabsf64" => "llvm.fabs.f64",
64 "copysignf32" => "llvm.copysign.f32",
65 "copysignf64" => "llvm.copysign.f64",
66 "floorf32" => "llvm.floor.f32",
67 "floorf64" => "llvm.floor.f64",
68 "ceilf32" => "llvm.ceil.f32",
69 "ceilf64" => "llvm.ceil.f64",
70 "truncf32" => "llvm.trunc.f32",
71 "truncf64" => "llvm.trunc.f64",
72 "rintf32" => "llvm.rint.f32",
73 "rintf64" => "llvm.rint.f64",
74 "nearbyintf32" => "llvm.nearbyint.f32",
75 "nearbyintf64" => "llvm.nearbyint.f64",
76 "roundf32" => "llvm.round.f32",
77 "roundf64" => "llvm.round.f64",
78 "assume" => "llvm.assume",
79 "abort" => "llvm.trap",
82 Some(ccx.get_intrinsic(&llvm_name))
85 /// Remember to add all intrinsics here, in librustc_typeck/check/mod.rs,
86 /// and in libcore/intrinsics.rs; if you need access to any llvm intrinsics,
87 /// add them to librustc_trans/trans/context.rs
88 pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
91 args: &[OperandRef<'tcx>],
97 let (def_id, substs) = match callee_ty.sty {
98 ty::TyFnDef(def_id, substs) => (def_id, substs),
99 _ => bug!("expected fn item type, found {}", callee_ty)
102 let sig = callee_ty.fn_sig(tcx);
103 let sig = tcx.erase_late_bound_regions_and_normalize(&sig);
104 let arg_tys = sig.inputs();
105 let ret_ty = sig.output();
106 let name = &*tcx.item_name(def_id);
108 let llret_ty = ccx.layout_of(ret_ty).llvm_type(ccx);
109 let result = PlaceRef::new_sized(llresult, fn_ty.ret.layout, Alignment::AbiAligned);
111 let simple = get_simple_intrinsic(ccx, name);
112 let llval = match name {
113 _ if simple.is_some() => {
114 bcx.call(simple.unwrap(),
115 &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(),
122 let expect = ccx.get_intrinsic(&("llvm.expect.i1"));
123 bcx.call(expect, &[args[0].immediate(), C_bool(ccx, true)], None)
126 let expect = ccx.get_intrinsic(&("llvm.expect.i1"));
127 bcx.call(expect, &[args[0].immediate(), C_bool(ccx, false)], None)
130 try_intrinsic(bcx, ccx,
138 let llfn = ccx.get_intrinsic(&("llvm.debugtrap"));
139 bcx.call(llfn, &[], None)
142 let tp_ty = substs.type_at(0);
143 C_usize(ccx, ccx.size_of(tp_ty).bytes())
146 let tp_ty = substs.type_at(0);
147 if let OperandValue::Pair(_, meta) = args[0].val {
149 glue::size_and_align_of_dst(bcx, tp_ty, meta);
152 C_usize(ccx, ccx.size_of(tp_ty).bytes())
156 let tp_ty = substs.type_at(0);
157 C_usize(ccx, ccx.align_of(tp_ty).abi())
159 "min_align_of_val" => {
160 let tp_ty = substs.type_at(0);
161 if let OperandValue::Pair(_, meta) = args[0].val {
163 glue::size_and_align_of_dst(bcx, tp_ty, meta);
166 C_usize(ccx, ccx.align_of(tp_ty).abi())
170 let tp_ty = substs.type_at(0);
171 C_usize(ccx, ccx.align_of(tp_ty).pref())
174 let tp_ty = substs.type_at(0);
175 let ty_name = Symbol::intern(&tp_ty.to_string()).as_str();
176 C_str_slice(ccx, ty_name)
179 C_u64(ccx, ccx.tcx().type_id_hash(substs.type_at(0)))
182 let ty = substs.type_at(0);
183 if !ccx.layout_of(ty).is_zst() {
184 // Just zero out the stack slot.
185 // If we store a zero constant, LLVM will drown in vreg allocation for large data
186 // structures, and the generated code will be awful. (A telltale sign of this is
187 // large quantities of `mov [byte ptr foo],0` in the generated code.)
188 memset_intrinsic(bcx, false, ty, llresult, C_u8(ccx, 0), C_usize(ccx, 1));
192 // Effectively no-ops
197 let tp_ty = substs.type_at(0);
199 C_bool(ccx, bcx.ccx.shared().type_needs_drop(tp_ty))
202 let ptr = args[0].immediate();
203 let offset = args[1].immediate();
204 bcx.inbounds_gep(ptr, &[offset])
207 let ptr = args[0].immediate();
208 let offset = args[1].immediate();
209 bcx.gep(ptr, &[offset])
212 "copy_nonoverlapping" => {
213 copy_intrinsic(bcx, false, false, substs.type_at(0),
214 args[1].immediate(), args[0].immediate(), args[2].immediate())
217 copy_intrinsic(bcx, true, false, substs.type_at(0),
218 args[1].immediate(), args[0].immediate(), args[2].immediate())
221 memset_intrinsic(bcx, false, substs.type_at(0),
222 args[0].immediate(), args[1].immediate(), args[2].immediate())
225 "volatile_copy_nonoverlapping_memory" => {
226 copy_intrinsic(bcx, false, true, substs.type_at(0),
227 args[0].immediate(), args[1].immediate(), args[2].immediate())
229 "volatile_copy_memory" => {
230 copy_intrinsic(bcx, true, true, substs.type_at(0),
231 args[0].immediate(), args[1].immediate(), args[2].immediate())
233 "volatile_set_memory" => {
234 memset_intrinsic(bcx, true, substs.type_at(0),
235 args[0].immediate(), args[1].immediate(), args[2].immediate())
238 let tp_ty = substs.type_at(0);
239 let mut ptr = args[0].immediate();
240 if let PassMode::Cast(ty) = fn_ty.ret.mode {
241 ptr = bcx.pointercast(ptr, ty.llvm_type(ccx).ptr_to());
243 let load = bcx.volatile_load(ptr);
245 llvm::LLVMSetAlignment(load, ccx.align_of(tp_ty).abi() as u32);
247 to_immediate(bcx, load, ccx.layout_of(tp_ty))
249 "volatile_store" => {
250 let tp_ty = substs.type_at(0);
251 let dst = args[0].deref(bcx.ccx);
252 if let OperandValue::Pair(a, b) = args[1].val {
253 bcx.volatile_store(a, dst.project_field(bcx, 0).llval);
254 bcx.volatile_store(b, dst.project_field(bcx, 1).llval);
256 let val = if let OperandValue::Ref(ptr, align) = args[1].val {
257 bcx.load(ptr, align.non_abi())
259 if dst.layout.is_zst() {
262 from_immediate(bcx, args[1].immediate())
264 let ptr = bcx.pointercast(dst.llval, val_ty(val).ptr_to());
265 let store = bcx.volatile_store(val, ptr);
267 llvm::LLVMSetAlignment(store, ccx.align_of(tp_ty).abi() as u32);
272 "prefetch_read_data" | "prefetch_write_data" |
273 "prefetch_read_instruction" | "prefetch_write_instruction" => {
274 let expect = ccx.get_intrinsic(&("llvm.prefetch"));
275 let (rw, cache_type) = match name {
276 "prefetch_read_data" => (0, 1),
277 "prefetch_write_data" => (1, 1),
278 "prefetch_read_instruction" => (0, 0),
279 "prefetch_write_instruction" => (1, 0),
286 C_i32(ccx, cache_type)
289 "ctlz" | "ctlz_nonzero" | "cttz" | "cttz_nonzero" | "ctpop" | "bswap" |
290 "add_with_overflow" | "sub_with_overflow" | "mul_with_overflow" |
291 "overflowing_add" | "overflowing_sub" | "overflowing_mul" |
292 "unchecked_div" | "unchecked_rem" | "unchecked_shl" | "unchecked_shr" => {
294 match int_type_width_signed(ty, ccx) {
295 Some((width, signed)) =>
298 let y = C_bool(bcx.ccx, false);
299 let llfn = ccx.get_intrinsic(&format!("llvm.{}.i{}", name, width));
300 bcx.call(llfn, &[args[0].immediate(), y], None)
302 "ctlz_nonzero" | "cttz_nonzero" => {
303 let y = C_bool(bcx.ccx, true);
304 let llvm_name = &format!("llvm.{}.i{}", &name[..4], width);
305 let llfn = ccx.get_intrinsic(llvm_name);
306 bcx.call(llfn, &[args[0].immediate(), y], None)
308 "ctpop" => bcx.call(ccx.get_intrinsic(&format!("llvm.ctpop.i{}", width)),
309 &[args[0].immediate()], None),
312 args[0].immediate() // byte swap a u8/i8 is just a no-op
314 bcx.call(ccx.get_intrinsic(&format!("llvm.bswap.i{}", width)),
315 &[args[0].immediate()], None)
318 "add_with_overflow" | "sub_with_overflow" | "mul_with_overflow" => {
319 let intrinsic = format!("llvm.{}{}.with.overflow.i{}",
320 if signed { 's' } else { 'u' },
322 let llfn = bcx.ccx.get_intrinsic(&intrinsic);
324 // Convert `i1` to a `bool`, and write it to the out parameter
325 let pair = bcx.call(llfn, &[
329 let val = bcx.extract_value(pair, 0);
330 let overflow = bcx.zext(bcx.extract_value(pair, 1), Type::bool(ccx));
332 let dest = result.project_field(bcx, 0);
333 bcx.store(val, dest.llval, dest.alignment.non_abi());
334 let dest = result.project_field(bcx, 1);
335 bcx.store(overflow, dest.llval, dest.alignment.non_abi());
339 "overflowing_add" => bcx.add(args[0].immediate(), args[1].immediate()),
340 "overflowing_sub" => bcx.sub(args[0].immediate(), args[1].immediate()),
341 "overflowing_mul" => bcx.mul(args[0].immediate(), args[1].immediate()),
344 bcx.sdiv(args[0].immediate(), args[1].immediate())
346 bcx.udiv(args[0].immediate(), args[1].immediate())
350 bcx.srem(args[0].immediate(), args[1].immediate())
352 bcx.urem(args[0].immediate(), args[1].immediate())
354 "unchecked_shl" => bcx.shl(args[0].immediate(), args[1].immediate()),
357 bcx.ashr(args[0].immediate(), args[1].immediate())
359 bcx.lshr(args[0].immediate(), args[1].immediate())
364 span_invalid_monomorphization_error(
366 &format!("invalid monomorphization of `{}` intrinsic: \
367 expected basic integer type, found `{}`", name, ty));
373 "fadd_fast" | "fsub_fast" | "fmul_fast" | "fdiv_fast" | "frem_fast" => {
374 let sty = &arg_tys[0].sty;
375 match float_type_width(sty) {
378 "fadd_fast" => bcx.fadd_fast(args[0].immediate(), args[1].immediate()),
379 "fsub_fast" => bcx.fsub_fast(args[0].immediate(), args[1].immediate()),
380 "fmul_fast" => bcx.fmul_fast(args[0].immediate(), args[1].immediate()),
381 "fdiv_fast" => bcx.fdiv_fast(args[0].immediate(), args[1].immediate()),
382 "frem_fast" => bcx.frem_fast(args[0].immediate(), args[1].immediate()),
386 span_invalid_monomorphization_error(
388 &format!("invalid monomorphization of `{}` intrinsic: \
389 expected basic float type, found `{}`", name, sty));
396 "discriminant_value" => {
397 args[0].deref(bcx.ccx).trans_get_discr(bcx, ret_ty)
402 let ptr_val = bcx.ptrtoint(args[0].immediate(), bcx.ccx.isize_ty());
404 let align = args[1].immediate();
405 let offset = bcx.urem(ptr_val, align);
406 let zero = C_null(bcx.ccx.isize_ty());
408 let is_zero = bcx.icmp(llvm::IntPredicate::IntEQ, offset, zero);
409 // `if offset == 0 { 0 } else { offset - align }`
410 bcx.select(is_zero, zero, bcx.sub(offset, align))
412 name if name.starts_with("simd_") => {
413 match generic_simd_intrinsic(bcx, name,
422 // This requires that atomic intrinsics follow a specific naming pattern:
423 // "atomic_<operation>[_<ordering>]", and no ordering means SeqCst
424 name if name.starts_with("atomic_") => {
425 use llvm::AtomicOrdering::*;
427 let split: Vec<&str> = name.split('_').collect();
429 let is_cxchg = split[1] == "cxchg" || split[1] == "cxchgweak";
430 let (order, failorder) = match split.len() {
431 2 => (SequentiallyConsistent, SequentiallyConsistent),
432 3 => match split[2] {
433 "unordered" => (Unordered, Unordered),
434 "relaxed" => (Monotonic, Monotonic),
435 "acq" => (Acquire, Acquire),
436 "rel" => (Release, Monotonic),
437 "acqrel" => (AcquireRelease, Acquire),
438 "failrelaxed" if is_cxchg =>
439 (SequentiallyConsistent, Monotonic),
440 "failacq" if is_cxchg =>
441 (SequentiallyConsistent, Acquire),
442 _ => ccx.sess().fatal("unknown ordering in atomic intrinsic")
444 4 => match (split[2], split[3]) {
445 ("acq", "failrelaxed") if is_cxchg =>
446 (Acquire, Monotonic),
447 ("acqrel", "failrelaxed") if is_cxchg =>
448 (AcquireRelease, Monotonic),
449 _ => ccx.sess().fatal("unknown ordering in atomic intrinsic")
451 _ => ccx.sess().fatal("Atomic intrinsic not in correct format"),
454 let invalid_monomorphization = |ty| {
455 span_invalid_monomorphization_error(tcx.sess, span,
456 &format!("invalid monomorphization of `{}` intrinsic: \
457 expected basic integer type, found `{}`", name, ty));
461 "cxchg" | "cxchgweak" => {
462 let ty = substs.type_at(0);
463 if int_type_width_signed(ty, ccx).is_some() {
464 let weak = if split[1] == "cxchgweak" { llvm::True } else { llvm::False };
465 let pair = bcx.atomic_cmpxchg(
472 let val = bcx.extract_value(pair, 0);
473 let success = bcx.zext(bcx.extract_value(pair, 1), Type::bool(bcx.ccx));
475 let dest = result.project_field(bcx, 0);
476 bcx.store(val, dest.llval, dest.alignment.non_abi());
477 let dest = result.project_field(bcx, 1);
478 bcx.store(success, dest.llval, dest.alignment.non_abi());
481 return invalid_monomorphization(ty);
486 let ty = substs.type_at(0);
487 if int_type_width_signed(ty, ccx).is_some() {
488 let align = ccx.align_of(ty);
489 bcx.atomic_load(args[0].immediate(), order, align)
491 return invalid_monomorphization(ty);
496 let ty = substs.type_at(0);
497 if int_type_width_signed(ty, ccx).is_some() {
498 let align = ccx.align_of(ty);
499 bcx.atomic_store(args[1].immediate(), args[0].immediate(), order, align);
502 return invalid_monomorphization(ty);
507 bcx.atomic_fence(order, llvm::SynchronizationScope::CrossThread);
511 "singlethreadfence" => {
512 bcx.atomic_fence(order, llvm::SynchronizationScope::SingleThread);
516 // These are all AtomicRMW ops
518 let atom_op = match op {
519 "xchg" => llvm::AtomicXchg,
520 "xadd" => llvm::AtomicAdd,
521 "xsub" => llvm::AtomicSub,
522 "and" => llvm::AtomicAnd,
523 "nand" => llvm::AtomicNand,
524 "or" => llvm::AtomicOr,
525 "xor" => llvm::AtomicXor,
526 "max" => llvm::AtomicMax,
527 "min" => llvm::AtomicMin,
528 "umax" => llvm::AtomicUMax,
529 "umin" => llvm::AtomicUMin,
530 _ => ccx.sess().fatal("unknown atomic operation")
533 let ty = substs.type_at(0);
534 if int_type_width_signed(ty, ccx).is_some() {
535 bcx.atomic_rmw(atom_op, args[0].immediate(), args[1].immediate(), order)
537 return invalid_monomorphization(ty);
543 "nontemporal_store" => {
544 let tp_ty = substs.type_at(0);
545 let dst = args[0].deref(bcx.ccx);
546 let val = if let OperandValue::Ref(ptr, align) = args[1].val {
547 bcx.load(ptr, align.non_abi())
549 from_immediate(bcx, args[1].immediate())
551 let ptr = bcx.pointercast(dst.llval, val_ty(val).ptr_to());
552 let store = bcx.nontemporal_store(val, ptr);
554 llvm::LLVMSetAlignment(store, ccx.align_of(tp_ty).abi() as u32);
560 let intr = match Intrinsic::find(&name) {
562 None => bug!("unknown intrinsic '{}'", name),
564 fn one<T>(x: Vec<T>) -> T {
565 assert_eq!(x.len(), 1);
566 x.into_iter().next().unwrap()
568 fn ty_to_type(ccx: &CrateContext, t: &intrinsics::Type) -> Vec<Type> {
569 use intrinsics::Type::*;
571 Void => vec![Type::void(ccx)],
572 Integer(_signed, _width, llvm_width) => {
573 vec![Type::ix(ccx, llvm_width as u64)]
577 32 => vec![Type::f32(ccx)],
578 64 => vec![Type::f64(ccx)],
582 Pointer(ref t, ref llvm_elem, _const) => {
583 let t = llvm_elem.as_ref().unwrap_or(t);
584 let elem = one(ty_to_type(ccx, t));
587 Vector(ref t, ref llvm_elem, length) => {
588 let t = llvm_elem.as_ref().unwrap_or(t);
589 let elem = one(ty_to_type(ccx, t));
590 vec![Type::vector(&elem, length as u64)]
592 Aggregate(false, ref contents) => {
593 let elems = contents.iter()
594 .map(|t| one(ty_to_type(ccx, t)))
595 .collect::<Vec<_>>();
596 vec![Type::struct_(ccx, &elems, false)]
598 Aggregate(true, ref contents) => {
600 .flat_map(|t| ty_to_type(ccx, t))
606 // This allows an argument list like `foo, (bar, baz),
607 // qux` to be converted into `foo, bar, baz, qux`, integer
608 // arguments to be truncated as needed and pointers to be
610 fn modify_as_needed<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
611 t: &intrinsics::Type,
612 arg: &OperandRef<'tcx>)
616 intrinsics::Type::Aggregate(true, ref contents) => {
617 // We found a tuple that needs squishing! So
618 // run over the tuple and load each field.
620 // This assumes the type is "simple", i.e. no
621 // destructors, and the contents are SIMD
623 assert!(!bcx.ccx.shared().type_needs_drop(arg.layout.ty));
624 let (ptr, align) = match arg.val {
625 OperandValue::Ref(ptr, align) => (ptr, align),
628 let arg = PlaceRef::new_sized(ptr, arg.layout, align);
629 (0..contents.len()).map(|i| {
630 arg.project_field(bcx, i).load(bcx).immediate()
633 intrinsics::Type::Pointer(_, Some(ref llvm_elem), _) => {
634 let llvm_elem = one(ty_to_type(bcx.ccx, llvm_elem));
635 vec![bcx.pointercast(arg.immediate(), llvm_elem.ptr_to())]
637 intrinsics::Type::Vector(_, Some(ref llvm_elem), length) => {
638 let llvm_elem = one(ty_to_type(bcx.ccx, llvm_elem));
639 vec![bcx.bitcast(arg.immediate(), Type::vector(&llvm_elem, length as u64))]
641 intrinsics::Type::Integer(_, width, llvm_width) if width != llvm_width => {
642 // the LLVM intrinsic uses a smaller integer
643 // size than the C intrinsic's signature, so
644 // we have to trim it down here.
645 vec![bcx.trunc(arg.immediate(), Type::ix(bcx.ccx, llvm_width as u64))]
647 _ => vec![arg.immediate()],
652 let inputs = intr.inputs.iter()
653 .flat_map(|t| ty_to_type(ccx, t))
654 .collect::<Vec<_>>();
656 let outputs = one(ty_to_type(ccx, &intr.output));
658 let llargs: Vec<_> = intr.inputs.iter().zip(args).flat_map(|(t, arg)| {
659 modify_as_needed(bcx, t, arg)
661 assert_eq!(inputs.len(), llargs.len());
663 let val = match intr.definition {
664 intrinsics::IntrinsicDef::Named(name) => {
665 let f = declare::declare_cfn(ccx,
667 Type::func(&inputs, &outputs));
668 bcx.call(f, &llargs, None)
673 intrinsics::Type::Aggregate(flatten, ref elems) => {
674 // the output is a tuple so we need to munge it properly
677 for i in 0..elems.len() {
678 let dest = result.project_field(bcx, i);
679 let val = bcx.extract_value(val, i as u64);
680 bcx.store(val, dest.llval, dest.alignment.non_abi());
689 if !fn_ty.ret.is_ignore() {
690 if let PassMode::Cast(ty) = fn_ty.ret.mode {
691 let ptr = bcx.pointercast(llresult, ty.llvm_type(ccx).ptr_to());
692 bcx.store(llval, ptr, Some(ccx.align_of(ret_ty)));
694 OperandRef::from_immediate_or_packed_pair(bcx, llval, result.layout)
695 .val.store(bcx, result);
700 fn copy_intrinsic<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
709 let (size, align) = ccx.size_and_align_of(ty);
710 let size = C_usize(ccx, size.bytes());
711 let align = C_i32(ccx, align.abi() as i32);
713 let operation = if allow_overlap {
719 let name = format!("llvm.{}.p0i8.p0i8.i{}", operation,
720 ccx.data_layout().pointer_size.bits());
722 let dst_ptr = bcx.pointercast(dst, Type::i8p(ccx));
723 let src_ptr = bcx.pointercast(src, Type::i8p(ccx));
724 let llfn = ccx.get_intrinsic(&name);
729 bcx.mul(size, count),
731 C_bool(ccx, volatile)],
735 fn memset_intrinsic<'a, 'tcx>(
736 bcx: &Builder<'a, 'tcx>,
744 let (size, align) = ccx.size_and_align_of(ty);
745 let size = C_usize(ccx, size.bytes());
746 let align = C_i32(ccx, align.abi() as i32);
747 let dst = bcx.pointercast(dst, Type::i8p(ccx));
748 call_memset(bcx, dst, val, bcx.mul(size, count), align, volatile)
751 fn try_intrinsic<'a, 'tcx>(
752 bcx: &Builder<'a, 'tcx>,
759 if bcx.sess().no_landing_pads() {
760 bcx.call(func, &[data], None);
761 bcx.store(C_null(Type::i8p(&bcx.ccx)), dest, None);
762 } else if wants_msvc_seh(bcx.sess()) {
763 trans_msvc_try(bcx, ccx, func, data, local_ptr, dest);
765 trans_gnu_try(bcx, ccx, func, data, local_ptr, dest);
769 // MSVC's definition of the `rust_try` function.
771 // This implementation uses the new exception handling instructions in LLVM
772 // which have support in LLVM for SEH on MSVC targets. Although these
773 // instructions are meant to work for all targets, as of the time of this
774 // writing, however, LLVM does not recommend the usage of these new instructions
775 // as the old ones are still more optimized.
776 fn trans_msvc_try<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
782 let llfn = get_rust_try_fn(ccx, &mut |bcx| {
785 bcx.set_personality_fn(bcx.ccx.eh_personality());
787 let normal = bcx.build_sibling_block("normal");
788 let catchswitch = bcx.build_sibling_block("catchswitch");
789 let catchpad = bcx.build_sibling_block("catchpad");
790 let caught = bcx.build_sibling_block("caught");
792 let func = llvm::get_param(bcx.llfn(), 0);
793 let data = llvm::get_param(bcx.llfn(), 1);
794 let local_ptr = llvm::get_param(bcx.llfn(), 2);
796 // We're generating an IR snippet that looks like:
798 // declare i32 @rust_try(%func, %data, %ptr) {
799 // %slot = alloca i64*
800 // invoke %func(%data) to label %normal unwind label %catchswitch
806 // %cs = catchswitch within none [%catchpad] unwind to caller
809 // %tok = catchpad within %cs [%type_descriptor, 0, %slot]
810 // %ptr[0] = %slot[0]
811 // %ptr[1] = %slot[1]
812 // catchret from %tok to label %caught
818 // This structure follows the basic usage of throw/try/catch in LLVM.
819 // For example, compile this C++ snippet to see what LLVM generates:
821 // #include <stdint.h>
823 // int bar(void (*foo)(void), uint64_t *ret) {
827 // } catch(uint64_t a[2]) {
834 // More information can be found in libstd's seh.rs implementation.
835 let i64p = Type::i64(ccx).ptr_to();
836 let slot = bcx.alloca(i64p, "slot", ccx.data_layout().pointer_align);
837 bcx.invoke(func, &[data], normal.llbb(), catchswitch.llbb(),
840 normal.ret(C_i32(ccx, 0));
842 let cs = catchswitch.catch_switch(None, None, 1);
843 catchswitch.add_handler(cs, catchpad.llbb());
846 let tydesc = match tcx.lang_items().msvc_try_filter() {
847 Some(did) => ::consts::get_static(ccx, did),
848 None => bug!("msvc_try_filter not defined"),
850 let tok = catchpad.catch_pad(cs, &[tydesc, C_i32(ccx, 0), slot]);
851 let addr = catchpad.load(slot, None);
852 let arg1 = catchpad.load(addr, None);
853 let val1 = C_i32(ccx, 1);
854 let arg2 = catchpad.load(catchpad.inbounds_gep(addr, &[val1]), None);
855 let local_ptr = catchpad.bitcast(local_ptr, i64p);
856 catchpad.store(arg1, local_ptr, None);
857 catchpad.store(arg2, catchpad.inbounds_gep(local_ptr, &[val1]), None);
858 catchpad.catch_ret(tok, caught.llbb());
860 caught.ret(C_i32(ccx, 1));
863 // Note that no invoke is used here because by definition this function
864 // can't panic (that's what it's catching).
865 let ret = bcx.call(llfn, &[func, data, local_ptr], None);
866 bcx.store(ret, dest, None);
869 // Definition of the standard "try" function for Rust using the GNU-like model
870 // of exceptions (e.g. the normal semantics of LLVM's landingpad and invoke
873 // This translation is a little surprising because we always call a shim
874 // function instead of inlining the call to `invoke` manually here. This is done
875 // because in LLVM we're only allowed to have one personality per function
876 // definition. The call to the `try` intrinsic is being inlined into the
877 // function calling it, and that function may already have other personality
878 // functions in play. By calling a shim we're guaranteed that our shim will have
879 // the right personality function.
880 fn trans_gnu_try<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
886 let llfn = get_rust_try_fn(ccx, &mut |bcx| {
889 // Translates the shims described above:
892 // invoke %func(%args...) normal %normal unwind %catch
898 // (ptr, _) = landingpad
899 // store ptr, %local_ptr
902 // Note that the `local_ptr` data passed into the `try` intrinsic is
903 // expected to be `*mut *mut u8` for this to actually work, but that's
904 // managed by the standard library.
906 let then = bcx.build_sibling_block("then");
907 let catch = bcx.build_sibling_block("catch");
909 let func = llvm::get_param(bcx.llfn(), 0);
910 let data = llvm::get_param(bcx.llfn(), 1);
911 let local_ptr = llvm::get_param(bcx.llfn(), 2);
912 bcx.invoke(func, &[data], then.llbb(), catch.llbb(), None);
913 then.ret(C_i32(ccx, 0));
915 // Type indicator for the exception being thrown.
917 // The first value in this tuple is a pointer to the exception object
918 // being thrown. The second value is a "selector" indicating which of
919 // the landing pad clauses the exception's type had been matched to.
920 // rust_try ignores the selector.
921 let lpad_ty = Type::struct_(ccx, &[Type::i8p(ccx), Type::i32(ccx)],
923 let vals = catch.landing_pad(lpad_ty, bcx.ccx.eh_personality(), 1, catch.llfn());
924 catch.add_clause(vals, C_null(Type::i8p(ccx)));
925 let ptr = catch.extract_value(vals, 0);
926 catch.store(ptr, catch.bitcast(local_ptr, Type::i8p(ccx).ptr_to()), None);
927 catch.ret(C_i32(ccx, 1));
930 // Note that no invoke is used here because by definition this function
931 // can't panic (that's what it's catching).
932 let ret = bcx.call(llfn, &[func, data, local_ptr], None);
933 bcx.store(ret, dest, None);
936 // Helper function to give a Block to a closure to translate a shim function.
937 // This is currently primarily used for the `try` intrinsic functions above.
938 fn gen_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
940 inputs: Vec<Ty<'tcx>>,
942 trans: &mut for<'b> FnMut(Builder<'b, 'tcx>))
944 let rust_fn_ty = ccx.tcx().mk_fn_ptr(ty::Binder(ccx.tcx().mk_fn_sig(
948 hir::Unsafety::Unsafe,
951 let llfn = declare::define_internal_fn(ccx, name, rust_fn_ty);
952 let bcx = Builder::new_block(ccx, llfn, "entry-block");
957 // Helper function used to get a handle to the `__rust_try` function used to
960 // This function is only generated once and is then cached.
961 fn get_rust_try_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
962 trans: &mut for<'b> FnMut(Builder<'b, 'tcx>))
964 if let Some(llfn) = ccx.rust_try_fn().get() {
968 // Define the type up front for the signature of the rust_try function.
970 let i8p = tcx.mk_mut_ptr(tcx.types.i8);
971 let fn_ty = tcx.mk_fn_ptr(ty::Binder(tcx.mk_fn_sig(
975 hir::Unsafety::Unsafe,
978 let output = tcx.types.i32;
979 let rust_try = gen_fn(ccx, "__rust_try", vec![fn_ty, i8p, i8p], output, trans);
980 ccx.rust_try_fn().set(Some(rust_try));
984 fn span_invalid_monomorphization_error(a: &Session, b: Span, c: &str) {
985 span_err!(a, b, E0511, "{}", c);
988 fn generic_simd_intrinsic<'a, 'tcx>(
989 bcx: &Builder<'a, 'tcx>,
992 args: &[OperandRef<'tcx>],
996 ) -> Result<ValueRef, ()> {
997 // macros for error handling:
998 macro_rules! emit_error {
1002 ($msg: tt, $($fmt: tt)*) => {
1003 span_invalid_monomorphization_error(
1005 &format!(concat!("invalid monomorphization of `{}` intrinsic: ",
1010 macro_rules! require {
1011 ($cond: expr, $($fmt: tt)*) => {
1013 emit_error!($($fmt)*);
1018 macro_rules! require_simd {
1019 ($ty: expr, $position: expr) => {
1020 require!($ty.is_simd(), "expected SIMD {} type, found non-SIMD `{}`", $position, $ty)
1026 let tcx = bcx.tcx();
1027 let sig = tcx.erase_late_bound_regions_and_normalize(&callee_ty.fn_sig(tcx));
1028 let arg_tys = sig.inputs();
1030 // every intrinsic takes a SIMD vector as its first argument
1031 require_simd!(arg_tys[0], "input");
1032 let in_ty = arg_tys[0];
1033 let in_elem = arg_tys[0].simd_type(tcx);
1034 let in_len = arg_tys[0].simd_size(tcx);
1036 let comparison = match name {
1037 "simd_eq" => Some(hir::BiEq),
1038 "simd_ne" => Some(hir::BiNe),
1039 "simd_lt" => Some(hir::BiLt),
1040 "simd_le" => Some(hir::BiLe),
1041 "simd_gt" => Some(hir::BiGt),
1042 "simd_ge" => Some(hir::BiGe),
1046 if let Some(cmp_op) = comparison {
1047 require_simd!(ret_ty, "return");
1049 let out_len = ret_ty.simd_size(tcx);
1050 require!(in_len == out_len,
1051 "expected return type with length {} (same as input type `{}`), \
1052 found `{}` with length {}",
1055 require!(llret_ty.element_type().kind() == llvm::Integer,
1056 "expected return type with integer elements, found `{}` with non-integer `{}`",
1058 ret_ty.simd_type(tcx));
1060 return Ok(compare_simd_types(bcx,
1061 args[0].immediate(),
1062 args[1].immediate(),
1068 if name.starts_with("simd_shuffle") {
1069 let n: usize = match name["simd_shuffle".len()..].parse() {
1071 Err(_) => span_bug!(span,
1072 "bad `simd_shuffle` instruction only caught in trans?")
1075 require_simd!(ret_ty, "return");
1077 let out_len = ret_ty.simd_size(tcx);
1078 require!(out_len == n,
1079 "expected return type of length {}, found `{}` with length {}",
1080 n, ret_ty, out_len);
1081 require!(in_elem == ret_ty.simd_type(tcx),
1082 "expected return element type `{}` (element of input `{}`), \
1083 found `{}` with element type `{}`",
1085 ret_ty, ret_ty.simd_type(tcx));
1087 let total_len = in_len as u128 * 2;
1089 let vector = args[2].immediate();
1091 let indices: Option<Vec<_>> = (0..n)
1094 let val = const_get_elt(vector, i as u64);
1095 match const_to_opt_u128(val, true) {
1097 emit_error!("shuffle index #{} is not a constant", arg_idx);
1100 Some(idx) if idx >= total_len => {
1101 emit_error!("shuffle index #{} is out of bounds (limit {})",
1102 arg_idx, total_len);
1105 Some(idx) => Some(C_i32(bcx.ccx, idx as i32)),
1109 let indices = match indices {
1111 None => return Ok(C_null(llret_ty))
1114 return Ok(bcx.shuffle_vector(args[0].immediate(),
1115 args[1].immediate(),
1116 C_vector(&indices)))
1119 if name == "simd_insert" {
1120 require!(in_elem == arg_tys[2],
1121 "expected inserted type `{}` (element of input `{}`), found `{}`",
1122 in_elem, in_ty, arg_tys[2]);
1123 return Ok(bcx.insert_element(args[0].immediate(),
1124 args[2].immediate(),
1125 args[1].immediate()))
1127 if name == "simd_extract" {
1128 require!(ret_ty == in_elem,
1129 "expected return type `{}` (element of input `{}`), found `{}`",
1130 in_elem, in_ty, ret_ty);
1131 return Ok(bcx.extract_element(args[0].immediate(), args[1].immediate()))
1134 if name == "simd_cast" {
1135 require_simd!(ret_ty, "return");
1136 let out_len = ret_ty.simd_size(tcx);
1137 require!(in_len == out_len,
1138 "expected return type with length {} (same as input type `{}`), \
1139 found `{}` with length {}",
1142 // casting cares about nominal type, not just structural type
1143 let out_elem = ret_ty.simd_type(tcx);
1145 if in_elem == out_elem { return Ok(args[0].immediate()); }
1147 enum Style { Float, Int(/* is signed? */ bool), Unsupported }
1149 let (in_style, in_width) = match in_elem.sty {
1150 // vectors of pointer-sized integers should've been
1151 // disallowed before here, so this unwrap is safe.
1152 ty::TyInt(i) => (Style::Int(true), i.bit_width().unwrap()),
1153 ty::TyUint(u) => (Style::Int(false), u.bit_width().unwrap()),
1154 ty::TyFloat(f) => (Style::Float, f.bit_width()),
1155 _ => (Style::Unsupported, 0)
1157 let (out_style, out_width) = match out_elem.sty {
1158 ty::TyInt(i) => (Style::Int(true), i.bit_width().unwrap()),
1159 ty::TyUint(u) => (Style::Int(false), u.bit_width().unwrap()),
1160 ty::TyFloat(f) => (Style::Float, f.bit_width()),
1161 _ => (Style::Unsupported, 0)
1164 match (in_style, out_style) {
1165 (Style::Int(in_is_signed), Style::Int(_)) => {
1166 return Ok(match in_width.cmp(&out_width) {
1167 Ordering::Greater => bcx.trunc(args[0].immediate(), llret_ty),
1168 Ordering::Equal => args[0].immediate(),
1169 Ordering::Less => if in_is_signed {
1170 bcx.sext(args[0].immediate(), llret_ty)
1172 bcx.zext(args[0].immediate(), llret_ty)
1176 (Style::Int(in_is_signed), Style::Float) => {
1177 return Ok(if in_is_signed {
1178 bcx.sitofp(args[0].immediate(), llret_ty)
1180 bcx.uitofp(args[0].immediate(), llret_ty)
1183 (Style::Float, Style::Int(out_is_signed)) => {
1184 return Ok(if out_is_signed {
1185 bcx.fptosi(args[0].immediate(), llret_ty)
1187 bcx.fptoui(args[0].immediate(), llret_ty)
1190 (Style::Float, Style::Float) => {
1191 return Ok(match in_width.cmp(&out_width) {
1192 Ordering::Greater => bcx.fptrunc(args[0].immediate(), llret_ty),
1193 Ordering::Equal => args[0].immediate(),
1194 Ordering::Less => bcx.fpext(args[0].immediate(), llret_ty)
1197 _ => {/* Unsupported. Fallthrough. */}
1200 "unsupported cast from `{}` with element `{}` to `{}` with element `{}`",
1204 macro_rules! arith {
1205 ($($name: ident: $($($p: ident),* => $call: ident),*;)*) => {
1206 $(if name == stringify!($name) {
1208 $($(ty::$p(_))|* => {
1209 return Ok(bcx.$call(args[0].immediate(), args[1].immediate()))
1214 "unsupported operation on `{}` with element `{}`",
1221 simd_add: TyUint, TyInt => add, TyFloat => fadd;
1222 simd_sub: TyUint, TyInt => sub, TyFloat => fsub;
1223 simd_mul: TyUint, TyInt => mul, TyFloat => fmul;
1224 simd_div: TyUint => udiv, TyInt => sdiv, TyFloat => fdiv;
1225 simd_rem: TyUint => urem, TyInt => srem, TyFloat => frem;
1226 simd_shl: TyUint, TyInt => shl;
1227 simd_shr: TyUint => lshr, TyInt => ashr;
1228 simd_and: TyUint, TyInt => and;
1229 simd_or: TyUint, TyInt => or;
1230 simd_xor: TyUint, TyInt => xor;
1232 span_bug!(span, "unknown SIMD intrinsic");
1235 // Returns the width of an int Ty, and if it's signed or not
1236 // Returns None if the type is not an integer
1237 // FIXME: there’s multiple of this functions, investigate using some of the already existing
1239 fn int_type_width_signed(ty: Ty, ccx: &CrateContext) -> Option<(u64, bool)> {
1241 ty::TyInt(t) => Some((match t {
1243 match &ccx.tcx().sess.target.target.target_pointer_width[..] {
1247 tws => bug!("Unsupported target word size for isize: {}", tws),
1250 ast::IntTy::I8 => 8,
1251 ast::IntTy::I16 => 16,
1252 ast::IntTy::I32 => 32,
1253 ast::IntTy::I64 => 64,
1254 ast::IntTy::I128 => 128,
1256 ty::TyUint(t) => Some((match t {
1257 ast::UintTy::Us => {
1258 match &ccx.tcx().sess.target.target.target_pointer_width[..] {
1262 tws => bug!("Unsupported target word size for usize: {}", tws),
1265 ast::UintTy::U8 => 8,
1266 ast::UintTy::U16 => 16,
1267 ast::UintTy::U32 => 32,
1268 ast::UintTy::U64 => 64,
1269 ast::UintTy::U128 => 128,
1275 // Returns the width of a float TypeVariant
1276 // Returns None if the type is not a float
1277 fn float_type_width<'tcx>(sty: &ty::TypeVariants<'tcx>)
1279 use rustc::ty::TyFloat;
1281 TyFloat(t) => Some(match t {
1282 ast::FloatTy::F32 => 32,
1283 ast::FloatTy::F64 => 64,