1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 #![allow(non_upper_case_globals)]
13 use intrinsics::{self, Intrinsic};
17 use abi::{Abi, FnType};
19 use mir::lvalue::{LvalueRef, Alignment};
27 use rustc::ty::{self, Ty};
30 use syntax::symbol::Symbol;
33 use rustc::session::Session;
36 use std::cmp::Ordering;
39 fn get_simple_intrinsic(ccx: &CrateContext, name: &str) -> Option<ValueRef> {
40 let llvm_name = match name {
41 "sqrtf32" => "llvm.sqrt.f32",
42 "sqrtf64" => "llvm.sqrt.f64",
43 "powif32" => "llvm.powi.f32",
44 "powif64" => "llvm.powi.f64",
45 "sinf32" => "llvm.sin.f32",
46 "sinf64" => "llvm.sin.f64",
47 "cosf32" => "llvm.cos.f32",
48 "cosf64" => "llvm.cos.f64",
49 "powf32" => "llvm.pow.f32",
50 "powf64" => "llvm.pow.f64",
51 "expf32" => "llvm.exp.f32",
52 "expf64" => "llvm.exp.f64",
53 "exp2f32" => "llvm.exp2.f32",
54 "exp2f64" => "llvm.exp2.f64",
55 "logf32" => "llvm.log.f32",
56 "logf64" => "llvm.log.f64",
57 "log10f32" => "llvm.log10.f32",
58 "log10f64" => "llvm.log10.f64",
59 "log2f32" => "llvm.log2.f32",
60 "log2f64" => "llvm.log2.f64",
61 "fmaf32" => "llvm.fma.f32",
62 "fmaf64" => "llvm.fma.f64",
63 "fabsf32" => "llvm.fabs.f32",
64 "fabsf64" => "llvm.fabs.f64",
65 "copysignf32" => "llvm.copysign.f32",
66 "copysignf64" => "llvm.copysign.f64",
67 "floorf32" => "llvm.floor.f32",
68 "floorf64" => "llvm.floor.f64",
69 "ceilf32" => "llvm.ceil.f32",
70 "ceilf64" => "llvm.ceil.f64",
71 "truncf32" => "llvm.trunc.f32",
72 "truncf64" => "llvm.trunc.f64",
73 "rintf32" => "llvm.rint.f32",
74 "rintf64" => "llvm.rint.f64",
75 "nearbyintf32" => "llvm.nearbyint.f32",
76 "nearbyintf64" => "llvm.nearbyint.f64",
77 "roundf32" => "llvm.round.f32",
78 "roundf64" => "llvm.round.f64",
79 "assume" => "llvm.assume",
80 "abort" => "llvm.trap",
83 Some(ccx.get_intrinsic(&llvm_name))
86 /// Remember to add all intrinsics here, in librustc_typeck/check/mod.rs,
87 /// and in libcore/intrinsics.rs; if you need access to any llvm intrinsics,
88 /// add them to librustc_trans/trans/context.rs
89 pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
98 let (def_id, substs) = match callee_ty.sty {
99 ty::TyFnDef(def_id, substs) => (def_id, substs),
100 _ => bug!("expected fn item type, found {}", callee_ty)
103 let sig = callee_ty.fn_sig(tcx);
104 let sig = tcx.erase_late_bound_regions_and_normalize(&sig);
105 let arg_tys = sig.inputs();
106 let ret_ty = sig.output();
107 let name = &*tcx.item_name(def_id).as_str();
109 let llret_ty = type_of::type_of(ccx, ret_ty);
111 let simple = get_simple_intrinsic(ccx, name);
112 let llval = match name {
113 _ if simple.is_some() => {
114 bcx.call(simple.unwrap(), &llargs, None)
120 let expect = ccx.get_intrinsic(&("llvm.expect.i1"));
121 bcx.call(expect, &[llargs[0], C_bool(ccx, true)], None)
124 let expect = ccx.get_intrinsic(&("llvm.expect.i1"));
125 bcx.call(expect, &[llargs[0], C_bool(ccx, false)], None)
128 try_intrinsic(bcx, ccx, llargs[0], llargs[1], llargs[2], llresult);
132 let llfn = ccx.get_intrinsic(&("llvm.debugtrap"));
133 bcx.call(llfn, &[], None)
136 let tp_ty = substs.type_at(0);
137 let lltp_ty = type_of::type_of(ccx, tp_ty);
138 C_uint(ccx, machine::llsize_of_alloc(ccx, lltp_ty))
141 let tp_ty = substs.type_at(0);
142 if !bcx.ccx.shared().type_is_sized(tp_ty) {
144 glue::size_and_align_of_dst(bcx, tp_ty, llargs[1]);
147 let lltp_ty = type_of::type_of(ccx, tp_ty);
148 C_uint(ccx, machine::llsize_of_alloc(ccx, lltp_ty))
152 let tp_ty = substs.type_at(0);
153 C_uint(ccx, ccx.align_of(tp_ty))
155 "min_align_of_val" => {
156 let tp_ty = substs.type_at(0);
157 if !bcx.ccx.shared().type_is_sized(tp_ty) {
159 glue::size_and_align_of_dst(bcx, tp_ty, llargs[1]);
162 C_uint(ccx, ccx.align_of(tp_ty))
166 let tp_ty = substs.type_at(0);
167 let lltp_ty = type_of::type_of(ccx, tp_ty);
168 C_uint(ccx, machine::llalign_of_pref(ccx, lltp_ty))
171 let tp_ty = substs.type_at(0);
172 let ty_name = Symbol::intern(&tp_ty.to_string()).as_str();
173 C_str_slice(ccx, ty_name)
176 C_u64(ccx, ccx.tcx().type_id_hash(substs.type_at(0)))
179 let ty = substs.type_at(0);
180 if !type_is_zero_size(ccx, ty) {
181 // Just zero out the stack slot.
182 // If we store a zero constant, LLVM will drown in vreg allocation for large data
183 // structures, and the generated code will be awful. (A telltale sign of this is
184 // large quantities of `mov [byte ptr foo],0` in the generated code.)
185 memset_intrinsic(bcx, false, ty, llresult, C_u8(ccx, 0), C_uint(ccx, 1usize));
189 // Effectively no-ops
194 let tp_ty = substs.type_at(0);
196 C_bool(ccx, bcx.ccx.shared().type_needs_drop(tp_ty))
200 let offset = llargs[1];
201 bcx.inbounds_gep(ptr, &[offset])
205 let offset = llargs[1];
206 bcx.gep(ptr, &[offset])
209 "copy_nonoverlapping" => {
210 copy_intrinsic(bcx, false, false, substs.type_at(0), llargs[1], llargs[0], llargs[2])
213 copy_intrinsic(bcx, true, false, substs.type_at(0), llargs[1], llargs[0], llargs[2])
216 memset_intrinsic(bcx, false, substs.type_at(0), llargs[0], llargs[1], llargs[2])
219 "volatile_copy_nonoverlapping_memory" => {
220 copy_intrinsic(bcx, false, true, substs.type_at(0), llargs[0], llargs[1], llargs[2])
222 "volatile_copy_memory" => {
223 copy_intrinsic(bcx, true, true, substs.type_at(0), llargs[0], llargs[1], llargs[2])
225 "volatile_set_memory" => {
226 memset_intrinsic(bcx, true, substs.type_at(0), llargs[0], llargs[1], llargs[2])
229 let tp_ty = substs.type_at(0);
230 let mut ptr = llargs[0];
231 if let Some(ty) = fn_ty.ret.cast {
232 ptr = bcx.pointercast(ptr, ty.ptr_to());
234 let load = bcx.volatile_load(ptr);
236 llvm::LLVMSetAlignment(load, ccx.align_of(tp_ty));
238 to_immediate(bcx, load, tp_ty)
240 "volatile_store" => {
241 let tp_ty = substs.type_at(0);
242 if type_is_fat_ptr(bcx.ccx, tp_ty) {
243 bcx.volatile_store(llargs[1], get_dataptr(bcx, llargs[0]));
244 bcx.volatile_store(llargs[2], get_meta(bcx, llargs[0]));
246 let val = if fn_ty.args[1].is_indirect() {
247 bcx.load(llargs[1], None)
249 if !type_is_zero_size(ccx, tp_ty) {
250 from_immediate(bcx, llargs[1])
255 let ptr = bcx.pointercast(llargs[0], val_ty(val).ptr_to());
256 let store = bcx.volatile_store(val, ptr);
258 llvm::LLVMSetAlignment(store, ccx.align_of(tp_ty));
263 "prefetch_read_data" | "prefetch_write_data" |
264 "prefetch_read_instruction" | "prefetch_write_instruction" => {
265 let expect = ccx.get_intrinsic(&("llvm.prefetch"));
266 let (rw, cache_type) = match name {
267 "prefetch_read_data" => (0, 1),
268 "prefetch_write_data" => (1, 1),
269 "prefetch_read_instruction" => (0, 0),
270 "prefetch_write_instruction" => (1, 0),
273 bcx.call(expect, &[llargs[0], C_i32(ccx, rw), llargs[1], C_i32(ccx, cache_type)], None)
275 "ctlz" | "ctlz_nonzero" | "cttz" | "cttz_nonzero" | "ctpop" | "bswap" |
276 "add_with_overflow" | "sub_with_overflow" | "mul_with_overflow" |
277 "overflowing_add" | "overflowing_sub" | "overflowing_mul" |
278 "unchecked_div" | "unchecked_rem" | "unchecked_shl" | "unchecked_shr" => {
279 let sty = &arg_tys[0].sty;
280 match int_type_width_signed(sty, ccx) {
281 Some((width, signed)) =>
284 let y = C_bool(bcx.ccx, false);
285 let llfn = ccx.get_intrinsic(&format!("llvm.{}.i{}", name, width));
286 bcx.call(llfn, &[llargs[0], y], None)
288 "ctlz_nonzero" | "cttz_nonzero" => {
289 let y = C_bool(bcx.ccx, true);
290 let llvm_name = &format!("llvm.{}.i{}", &name[..4], width);
291 let llfn = ccx.get_intrinsic(llvm_name);
292 bcx.call(llfn, &[llargs[0], y], None)
294 "ctpop" => bcx.call(ccx.get_intrinsic(&format!("llvm.ctpop.i{}", width)),
298 llargs[0] // byte swap a u8/i8 is just a no-op
300 bcx.call(ccx.get_intrinsic(&format!("llvm.bswap.i{}", width)),
304 "add_with_overflow" | "sub_with_overflow" | "mul_with_overflow" => {
305 let intrinsic = format!("llvm.{}{}.with.overflow.i{}",
306 if signed { 's' } else { 'u' },
308 let llfn = bcx.ccx.get_intrinsic(&intrinsic);
310 // Convert `i1` to a `bool`, and write it to the out parameter
311 let val = bcx.call(llfn, &[llargs[0], llargs[1]], None);
312 let result = bcx.extract_value(val, 0);
313 let overflow = bcx.zext(bcx.extract_value(val, 1), Type::bool(ccx));
314 bcx.store(result, bcx.struct_gep(llresult, 0), None);
315 bcx.store(overflow, bcx.struct_gep(llresult, 1), None);
319 "overflowing_add" => bcx.add(llargs[0], llargs[1]),
320 "overflowing_sub" => bcx.sub(llargs[0], llargs[1]),
321 "overflowing_mul" => bcx.mul(llargs[0], llargs[1]),
324 bcx.sdiv(llargs[0], llargs[1])
326 bcx.udiv(llargs[0], llargs[1])
330 bcx.srem(llargs[0], llargs[1])
332 bcx.urem(llargs[0], llargs[1])
334 "unchecked_shl" => bcx.shl(llargs[0], llargs[1]),
337 bcx.ashr(llargs[0], llargs[1])
339 bcx.lshr(llargs[0], llargs[1])
344 span_invalid_monomorphization_error(
346 &format!("invalid monomorphization of `{}` intrinsic: \
347 expected basic integer type, found `{}`", name, sty));
353 "fadd_fast" | "fsub_fast" | "fmul_fast" | "fdiv_fast" | "frem_fast" => {
354 let sty = &arg_tys[0].sty;
355 match float_type_width(sty) {
358 "fadd_fast" => bcx.fadd_fast(llargs[0], llargs[1]),
359 "fsub_fast" => bcx.fsub_fast(llargs[0], llargs[1]),
360 "fmul_fast" => bcx.fmul_fast(llargs[0], llargs[1]),
361 "fdiv_fast" => bcx.fdiv_fast(llargs[0], llargs[1]),
362 "frem_fast" => bcx.frem_fast(llargs[0], llargs[1]),
366 span_invalid_monomorphization_error(
368 &format!("invalid monomorphization of `{}` intrinsic: \
369 expected basic float type, found `{}`", name, sty));
376 "discriminant_value" => {
377 let val_ty = substs.type_at(0);
379 ty::TyAdt(adt, ..) if adt.is_enum() => {
380 adt::trans_get_discr(bcx, val_ty, llargs[0], Alignment::AbiAligned,
381 Some(llret_ty), true)
383 _ => C_null(llret_ty)
389 let ptr_val = bcx.ptrtoint(llargs[0], bcx.ccx.int_type());
391 let offset = bcx.urem(ptr_val, llargs[1]);
392 let zero = C_null(bcx.ccx.int_type());
394 let is_zero = bcx.icmp(llvm::IntPredicate::IntEQ, offset, zero);
395 // `if offset == 0 { 0 } else { offset - align }`
396 bcx.select(is_zero, zero, bcx.sub(offset, llargs[1]))
398 name if name.starts_with("simd_") => {
399 generic_simd_intrinsic(bcx, name,
405 // This requires that atomic intrinsics follow a specific naming pattern:
406 // "atomic_<operation>[_<ordering>]", and no ordering means SeqCst
407 name if name.starts_with("atomic_") => {
408 use llvm::AtomicOrdering::*;
410 let split: Vec<&str> = name.split('_').collect();
412 let is_cxchg = split[1] == "cxchg" || split[1] == "cxchgweak";
413 let (order, failorder) = match split.len() {
414 2 => (SequentiallyConsistent, SequentiallyConsistent),
415 3 => match split[2] {
416 "unordered" => (Unordered, Unordered),
417 "relaxed" => (Monotonic, Monotonic),
418 "acq" => (Acquire, Acquire),
419 "rel" => (Release, Monotonic),
420 "acqrel" => (AcquireRelease, Acquire),
421 "failrelaxed" if is_cxchg =>
422 (SequentiallyConsistent, Monotonic),
423 "failacq" if is_cxchg =>
424 (SequentiallyConsistent, Acquire),
425 _ => ccx.sess().fatal("unknown ordering in atomic intrinsic")
427 4 => match (split[2], split[3]) {
428 ("acq", "failrelaxed") if is_cxchg =>
429 (Acquire, Monotonic),
430 ("acqrel", "failrelaxed") if is_cxchg =>
431 (AcquireRelease, Monotonic),
432 _ => ccx.sess().fatal("unknown ordering in atomic intrinsic")
434 _ => ccx.sess().fatal("Atomic intrinsic not in correct format"),
437 let invalid_monomorphization = |sty| {
438 span_invalid_monomorphization_error(tcx.sess, span,
439 &format!("invalid monomorphization of `{}` intrinsic: \
440 expected basic integer type, found `{}`", name, sty));
444 "cxchg" | "cxchgweak" => {
445 let sty = &substs.type_at(0).sty;
446 if int_type_width_signed(sty, ccx).is_some() {
447 let weak = if split[1] == "cxchgweak" { llvm::True } else { llvm::False };
448 let val = bcx.atomic_cmpxchg(llargs[0], llargs[1], llargs[2], order,
450 let result = bcx.extract_value(val, 0);
451 let success = bcx.zext(bcx.extract_value(val, 1), Type::bool(bcx.ccx));
452 bcx.store(result, bcx.struct_gep(llresult, 0), None);
453 bcx.store(success, bcx.struct_gep(llresult, 1), None);
455 invalid_monomorphization(sty);
461 let sty = &substs.type_at(0).sty;
462 if int_type_width_signed(sty, ccx).is_some() {
463 bcx.atomic_load(llargs[0], order)
465 invalid_monomorphization(sty);
471 let sty = &substs.type_at(0).sty;
472 if int_type_width_signed(sty, ccx).is_some() {
473 bcx.atomic_store(llargs[1], llargs[0], order);
475 invalid_monomorphization(sty);
481 bcx.atomic_fence(order, llvm::SynchronizationScope::CrossThread);
485 "singlethreadfence" => {
486 bcx.atomic_fence(order, llvm::SynchronizationScope::SingleThread);
490 // These are all AtomicRMW ops
492 let atom_op = match op {
493 "xchg" => llvm::AtomicXchg,
494 "xadd" => llvm::AtomicAdd,
495 "xsub" => llvm::AtomicSub,
496 "and" => llvm::AtomicAnd,
497 "nand" => llvm::AtomicNand,
498 "or" => llvm::AtomicOr,
499 "xor" => llvm::AtomicXor,
500 "max" => llvm::AtomicMax,
501 "min" => llvm::AtomicMin,
502 "umax" => llvm::AtomicUMax,
503 "umin" => llvm::AtomicUMin,
504 _ => ccx.sess().fatal("unknown atomic operation")
507 let sty = &substs.type_at(0).sty;
508 if int_type_width_signed(sty, ccx).is_some() {
509 bcx.atomic_rmw(atom_op, llargs[0], llargs[1], order)
511 invalid_monomorphization(sty);
519 let intr = match Intrinsic::find(&name) {
521 None => bug!("unknown intrinsic '{}'", name),
523 fn one<T>(x: Vec<T>) -> T {
524 assert_eq!(x.len(), 1);
525 x.into_iter().next().unwrap()
527 fn ty_to_type(ccx: &CrateContext, t: &intrinsics::Type,
528 any_changes_needed: &mut bool) -> Vec<Type> {
529 use intrinsics::Type::*;
531 Void => vec![Type::void(ccx)],
532 Integer(_signed, width, llvm_width) => {
533 *any_changes_needed |= width != llvm_width;
534 vec![Type::ix(ccx, llvm_width as u64)]
538 32 => vec![Type::f32(ccx)],
539 64 => vec![Type::f64(ccx)],
543 Pointer(ref t, ref llvm_elem, _const) => {
544 *any_changes_needed |= llvm_elem.is_some();
546 let t = llvm_elem.as_ref().unwrap_or(t);
547 let elem = one(ty_to_type(ccx, t, any_changes_needed));
550 Vector(ref t, ref llvm_elem, length) => {
551 *any_changes_needed |= llvm_elem.is_some();
553 let t = llvm_elem.as_ref().unwrap_or(t);
554 let elem = one(ty_to_type(ccx, t, any_changes_needed));
555 vec![Type::vector(&elem, length as u64)]
557 Aggregate(false, ref contents) => {
558 let elems = contents.iter()
559 .map(|t| one(ty_to_type(ccx, t, any_changes_needed)))
560 .collect::<Vec<_>>();
561 vec![Type::struct_(ccx, &elems, false)]
563 Aggregate(true, ref contents) => {
564 *any_changes_needed = true;
566 .flat_map(|t| ty_to_type(ccx, t, any_changes_needed))
572 // This allows an argument list like `foo, (bar, baz),
573 // qux` to be converted into `foo, bar, baz, qux`, integer
574 // arguments to be truncated as needed and pointers to be
576 fn modify_as_needed<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
577 t: &intrinsics::Type,
583 intrinsics::Type::Aggregate(true, ref contents) => {
584 // We found a tuple that needs squishing! So
585 // run over the tuple and load each field.
587 // This assumes the type is "simple", i.e. no
588 // destructors, and the contents are SIMD
590 assert!(!bcx.ccx.shared().type_needs_drop(arg_type));
591 let arg = LvalueRef::new_sized_ty(llarg, arg_type, Alignment::AbiAligned);
592 (0..contents.len()).map(|i| {
593 let (ptr, align) = arg.trans_field_ptr(bcx, i);
594 bcx.load(ptr, align.to_align())
597 intrinsics::Type::Pointer(_, Some(ref llvm_elem), _) => {
598 let llvm_elem = one(ty_to_type(bcx.ccx, llvm_elem, &mut false));
599 vec![bcx.pointercast(llarg, llvm_elem.ptr_to())]
601 intrinsics::Type::Vector(_, Some(ref llvm_elem), length) => {
602 let llvm_elem = one(ty_to_type(bcx.ccx, llvm_elem, &mut false));
603 vec![bcx.bitcast(llarg, Type::vector(&llvm_elem, length as u64))]
605 intrinsics::Type::Integer(_, width, llvm_width) if width != llvm_width => {
606 // the LLVM intrinsic uses a smaller integer
607 // size than the C intrinsic's signature, so
608 // we have to trim it down here.
609 vec![bcx.trunc(llarg, Type::ix(bcx.ccx, llvm_width as u64))]
616 let mut any_changes_needed = false;
617 let inputs = intr.inputs.iter()
618 .flat_map(|t| ty_to_type(ccx, t, &mut any_changes_needed))
619 .collect::<Vec<_>>();
621 let mut out_changes = false;
622 let outputs = one(ty_to_type(ccx, &intr.output, &mut out_changes));
623 // outputting a flattened aggregate is nonsense
624 assert!(!out_changes);
626 let llargs = if !any_changes_needed {
627 // no aggregates to flatten, so no change needed
630 // there are some aggregates that need to be flattened
631 // in the LLVM call, so we need to run over the types
632 // again to find them and extract the arguments
636 .flat_map(|((t, llarg), ty)| modify_as_needed(bcx, t, ty, *llarg))
639 assert_eq!(inputs.len(), llargs.len());
641 let val = match intr.definition {
642 intrinsics::IntrinsicDef::Named(name) => {
643 let f = declare::declare_cfn(ccx,
645 Type::func(&inputs, &outputs));
646 bcx.call(f, &llargs, None)
651 intrinsics::Type::Aggregate(flatten, ref elems) => {
652 // the output is a tuple so we need to munge it properly
655 for i in 0..elems.len() {
656 let val = bcx.extract_value(val, i);
657 let lval = LvalueRef::new_sized_ty(llresult, ret_ty,
658 Alignment::AbiAligned);
659 let (dest, align) = lval.trans_field_ptr(bcx, i);
660 bcx.store(val, dest, align.to_align());
669 if val_ty(llval) != Type::void(ccx) && machine::llsize_of_alloc(ccx, val_ty(llval)) != 0 {
670 if let Some(ty) = fn_ty.ret.cast {
671 let ptr = bcx.pointercast(llresult, ty.ptr_to());
672 bcx.store(llval, ptr, Some(ccx.align_of(ret_ty)));
674 store_ty(bcx, llval, llresult, Alignment::AbiAligned, ret_ty);
679 fn copy_intrinsic<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
688 let lltp_ty = type_of::type_of(ccx, tp_ty);
689 let align = C_i32(ccx, ccx.align_of(tp_ty) as i32);
690 let size = machine::llsize_of(ccx, lltp_ty);
691 let int_size = machine::llbitsize_of_real(ccx, ccx.int_type());
693 let operation = if allow_overlap {
699 let name = format!("llvm.{}.p0i8.p0i8.i{}", operation, int_size);
701 let dst_ptr = bcx.pointercast(dst, Type::i8p(ccx));
702 let src_ptr = bcx.pointercast(src, Type::i8p(ccx));
703 let llfn = ccx.get_intrinsic(&name);
708 bcx.mul(size, count),
710 C_bool(ccx, volatile)],
714 fn memset_intrinsic<'a, 'tcx>(
715 bcx: &Builder<'a, 'tcx>,
723 let align = C_i32(ccx, ccx.align_of(ty) as i32);
724 let lltp_ty = type_of::type_of(ccx, ty);
725 let size = machine::llsize_of(ccx, lltp_ty);
726 let dst = bcx.pointercast(dst, Type::i8p(ccx));
727 call_memset(bcx, dst, val, bcx.mul(size, count), align, volatile)
730 fn try_intrinsic<'a, 'tcx>(
731 bcx: &Builder<'a, 'tcx>,
738 if bcx.sess().no_landing_pads() {
739 bcx.call(func, &[data], None);
740 bcx.store(C_null(Type::i8p(&bcx.ccx)), dest, None);
741 } else if wants_msvc_seh(bcx.sess()) {
742 trans_msvc_try(bcx, ccx, func, data, local_ptr, dest);
744 trans_gnu_try(bcx, ccx, func, data, local_ptr, dest);
748 // MSVC's definition of the `rust_try` function.
750 // This implementation uses the new exception handling instructions in LLVM
751 // which have support in LLVM for SEH on MSVC targets. Although these
752 // instructions are meant to work for all targets, as of the time of this
753 // writing, however, LLVM does not recommend the usage of these new instructions
754 // as the old ones are still more optimized.
755 fn trans_msvc_try<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
761 let llfn = get_rust_try_fn(ccx, &mut |bcx| {
764 bcx.set_personality_fn(bcx.ccx.eh_personality());
766 let normal = bcx.build_sibling_block("normal");
767 let catchswitch = bcx.build_sibling_block("catchswitch");
768 let catchpad = bcx.build_sibling_block("catchpad");
769 let caught = bcx.build_sibling_block("caught");
771 let func = llvm::get_param(bcx.llfn(), 0);
772 let data = llvm::get_param(bcx.llfn(), 1);
773 let local_ptr = llvm::get_param(bcx.llfn(), 2);
775 // We're generating an IR snippet that looks like:
777 // declare i32 @rust_try(%func, %data, %ptr) {
778 // %slot = alloca i64*
779 // invoke %func(%data) to label %normal unwind label %catchswitch
785 // %cs = catchswitch within none [%catchpad] unwind to caller
788 // %tok = catchpad within %cs [%type_descriptor, 0, %slot]
789 // %ptr[0] = %slot[0]
790 // %ptr[1] = %slot[1]
791 // catchret from %tok to label %caught
797 // This structure follows the basic usage of throw/try/catch in LLVM.
798 // For example, compile this C++ snippet to see what LLVM generates:
800 // #include <stdint.h>
802 // int bar(void (*foo)(void), uint64_t *ret) {
806 // } catch(uint64_t a[2]) {
813 // More information can be found in libstd's seh.rs implementation.
814 let i64p = Type::i64(ccx).ptr_to();
815 let slot = bcx.alloca(i64p, "slot", None);
816 bcx.invoke(func, &[data], normal.llbb(), catchswitch.llbb(),
819 normal.ret(C_i32(ccx, 0));
821 let cs = catchswitch.catch_switch(None, None, 1);
822 catchswitch.add_handler(cs, catchpad.llbb());
825 let tydesc = match tcx.lang_items().msvc_try_filter() {
826 Some(did) => ::consts::get_static(ccx, did),
827 None => bug!("msvc_try_filter not defined"),
829 let tok = catchpad.catch_pad(cs, &[tydesc, C_i32(ccx, 0), slot]);
830 let addr = catchpad.load(slot, None);
831 let arg1 = catchpad.load(addr, None);
832 let val1 = C_i32(ccx, 1);
833 let arg2 = catchpad.load(catchpad.inbounds_gep(addr, &[val1]), None);
834 let local_ptr = catchpad.bitcast(local_ptr, i64p);
835 catchpad.store(arg1, local_ptr, None);
836 catchpad.store(arg2, catchpad.inbounds_gep(local_ptr, &[val1]), None);
837 catchpad.catch_ret(tok, caught.llbb());
839 caught.ret(C_i32(ccx, 1));
842 // Note that no invoke is used here because by definition this function
843 // can't panic (that's what it's catching).
844 let ret = bcx.call(llfn, &[func, data, local_ptr], None);
845 bcx.store(ret, dest, None);
848 // Definition of the standard "try" function for Rust using the GNU-like model
849 // of exceptions (e.g. the normal semantics of LLVM's landingpad and invoke
852 // This translation is a little surprising because we always call a shim
853 // function instead of inlining the call to `invoke` manually here. This is done
854 // because in LLVM we're only allowed to have one personality per function
855 // definition. The call to the `try` intrinsic is being inlined into the
856 // function calling it, and that function may already have other personality
857 // functions in play. By calling a shim we're guaranteed that our shim will have
858 // the right personality function.
859 fn trans_gnu_try<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
865 let llfn = get_rust_try_fn(ccx, &mut |bcx| {
868 // Translates the shims described above:
871 // invoke %func(%args...) normal %normal unwind %catch
877 // (ptr, _) = landingpad
878 // store ptr, %local_ptr
881 // Note that the `local_ptr` data passed into the `try` intrinsic is
882 // expected to be `*mut *mut u8` for this to actually work, but that's
883 // managed by the standard library.
885 let then = bcx.build_sibling_block("then");
886 let catch = bcx.build_sibling_block("catch");
888 let func = llvm::get_param(bcx.llfn(), 0);
889 let data = llvm::get_param(bcx.llfn(), 1);
890 let local_ptr = llvm::get_param(bcx.llfn(), 2);
891 bcx.invoke(func, &[data], then.llbb(), catch.llbb(), None);
892 then.ret(C_i32(ccx, 0));
894 // Type indicator for the exception being thrown.
896 // The first value in this tuple is a pointer to the exception object
897 // being thrown. The second value is a "selector" indicating which of
898 // the landing pad clauses the exception's type had been matched to.
899 // rust_try ignores the selector.
900 let lpad_ty = Type::struct_(ccx, &[Type::i8p(ccx), Type::i32(ccx)],
902 let vals = catch.landing_pad(lpad_ty, bcx.ccx.eh_personality(), 1, catch.llfn());
903 catch.add_clause(vals, C_null(Type::i8p(ccx)));
904 let ptr = catch.extract_value(vals, 0);
905 catch.store(ptr, catch.bitcast(local_ptr, Type::i8p(ccx).ptr_to()), None);
906 catch.ret(C_i32(ccx, 1));
909 // Note that no invoke is used here because by definition this function
910 // can't panic (that's what it's catching).
911 let ret = bcx.call(llfn, &[func, data, local_ptr], None);
912 bcx.store(ret, dest, None);
915 // Helper function to give a Block to a closure to translate a shim function.
916 // This is currently primarily used for the `try` intrinsic functions above.
917 fn gen_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
919 inputs: Vec<Ty<'tcx>>,
921 trans: &mut for<'b> FnMut(Builder<'b, 'tcx>))
923 let rust_fn_ty = ccx.tcx().mk_fn_ptr(ty::Binder(ccx.tcx().mk_fn_sig(
927 hir::Unsafety::Unsafe,
930 let llfn = declare::define_internal_fn(ccx, name, rust_fn_ty);
931 let bcx = Builder::new_block(ccx, llfn, "entry-block");
936 // Helper function used to get a handle to the `__rust_try` function used to
939 // This function is only generated once and is then cached.
940 fn get_rust_try_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
941 trans: &mut for<'b> FnMut(Builder<'b, 'tcx>))
943 if let Some(llfn) = ccx.rust_try_fn().get() {
947 // Define the type up front for the signature of the rust_try function.
949 let i8p = tcx.mk_mut_ptr(tcx.types.i8);
950 let fn_ty = tcx.mk_fn_ptr(ty::Binder(tcx.mk_fn_sig(
954 hir::Unsafety::Unsafe,
957 let output = tcx.types.i32;
958 let rust_try = gen_fn(ccx, "__rust_try", vec![fn_ty, i8p, i8p], output, trans);
959 ccx.rust_try_fn().set(Some(rust_try));
963 fn span_invalid_monomorphization_error(a: &Session, b: Span, c: &str) {
964 span_err!(a, b, E0511, "{}", c);
967 fn generic_simd_intrinsic<'a, 'tcx>(
968 bcx: &Builder<'a, 'tcx>,
976 // macros for error handling:
977 macro_rules! emit_error {
981 ($msg: tt, $($fmt: tt)*) => {
982 span_invalid_monomorphization_error(
984 &format!(concat!("invalid monomorphization of `{}` intrinsic: ",
989 macro_rules! require {
990 ($cond: expr, $($fmt: tt)*) => {
992 emit_error!($($fmt)*);
993 return C_nil(bcx.ccx)
997 macro_rules! require_simd {
998 ($ty: expr, $position: expr) => {
999 require!($ty.is_simd(), "expected SIMD {} type, found non-SIMD `{}`", $position, $ty)
1005 let tcx = bcx.tcx();
1006 let sig = tcx.erase_late_bound_regions_and_normalize(&callee_ty.fn_sig(tcx));
1007 let arg_tys = sig.inputs();
1009 // every intrinsic takes a SIMD vector as its first argument
1010 require_simd!(arg_tys[0], "input");
1011 let in_ty = arg_tys[0];
1012 let in_elem = arg_tys[0].simd_type(tcx);
1013 let in_len = arg_tys[0].simd_size(tcx);
1015 let comparison = match name {
1016 "simd_eq" => Some(hir::BiEq),
1017 "simd_ne" => Some(hir::BiNe),
1018 "simd_lt" => Some(hir::BiLt),
1019 "simd_le" => Some(hir::BiLe),
1020 "simd_gt" => Some(hir::BiGt),
1021 "simd_ge" => Some(hir::BiGe),
1025 if let Some(cmp_op) = comparison {
1026 require_simd!(ret_ty, "return");
1028 let out_len = ret_ty.simd_size(tcx);
1029 require!(in_len == out_len,
1030 "expected return type with length {} (same as input type `{}`), \
1031 found `{}` with length {}",
1034 require!(llret_ty.element_type().kind() == llvm::Integer,
1035 "expected return type with integer elements, found `{}` with non-integer `{}`",
1037 ret_ty.simd_type(tcx));
1039 return compare_simd_types(bcx,
1047 if name.starts_with("simd_shuffle") {
1048 let n: usize = match name["simd_shuffle".len()..].parse() {
1050 Err(_) => span_bug!(span,
1051 "bad `simd_shuffle` instruction only caught in trans?")
1054 require_simd!(ret_ty, "return");
1056 let out_len = ret_ty.simd_size(tcx);
1057 require!(out_len == n,
1058 "expected return type of length {}, found `{}` with length {}",
1059 n, ret_ty, out_len);
1060 require!(in_elem == ret_ty.simd_type(tcx),
1061 "expected return element type `{}` (element of input `{}`), \
1062 found `{}` with element type `{}`",
1064 ret_ty, ret_ty.simd_type(tcx));
1066 let total_len = in_len as u128 * 2;
1068 let vector = llargs[2];
1070 let indices: Option<Vec<_>> = (0..n)
1073 let val = const_get_elt(vector, &[i as libc::c_uint]);
1074 match const_to_opt_u128(val, true) {
1076 emit_error!("shuffle index #{} is not a constant", arg_idx);
1079 Some(idx) if idx >= total_len => {
1080 emit_error!("shuffle index #{} is out of bounds (limit {})",
1081 arg_idx, total_len);
1084 Some(idx) => Some(C_i32(bcx.ccx, idx as i32)),
1088 let indices = match indices {
1090 None => return C_null(llret_ty)
1093 return bcx.shuffle_vector(llargs[0], llargs[1], C_vector(&indices))
1096 if name == "simd_insert" {
1097 require!(in_elem == arg_tys[2],
1098 "expected inserted type `{}` (element of input `{}`), found `{}`",
1099 in_elem, in_ty, arg_tys[2]);
1100 return bcx.insert_element(llargs[0], llargs[2], llargs[1])
1102 if name == "simd_extract" {
1103 require!(ret_ty == in_elem,
1104 "expected return type `{}` (element of input `{}`), found `{}`",
1105 in_elem, in_ty, ret_ty);
1106 return bcx.extract_element(llargs[0], llargs[1])
1109 if name == "simd_cast" {
1110 require_simd!(ret_ty, "return");
1111 let out_len = ret_ty.simd_size(tcx);
1112 require!(in_len == out_len,
1113 "expected return type with length {} (same as input type `{}`), \
1114 found `{}` with length {}",
1117 // casting cares about nominal type, not just structural type
1118 let out_elem = ret_ty.simd_type(tcx);
1120 if in_elem == out_elem { return llargs[0]; }
1122 enum Style { Float, Int(/* is signed? */ bool), Unsupported }
1124 let (in_style, in_width) = match in_elem.sty {
1125 // vectors of pointer-sized integers should've been
1126 // disallowed before here, so this unwrap is safe.
1127 ty::TyInt(i) => (Style::Int(true), i.bit_width().unwrap()),
1128 ty::TyUint(u) => (Style::Int(false), u.bit_width().unwrap()),
1129 ty::TyFloat(f) => (Style::Float, f.bit_width()),
1130 _ => (Style::Unsupported, 0)
1132 let (out_style, out_width) = match out_elem.sty {
1133 ty::TyInt(i) => (Style::Int(true), i.bit_width().unwrap()),
1134 ty::TyUint(u) => (Style::Int(false), u.bit_width().unwrap()),
1135 ty::TyFloat(f) => (Style::Float, f.bit_width()),
1136 _ => (Style::Unsupported, 0)
1139 match (in_style, out_style) {
1140 (Style::Int(in_is_signed), Style::Int(_)) => {
1141 return match in_width.cmp(&out_width) {
1142 Ordering::Greater => bcx.trunc(llargs[0], llret_ty),
1143 Ordering::Equal => llargs[0],
1144 Ordering::Less => if in_is_signed {
1145 bcx.sext(llargs[0], llret_ty)
1147 bcx.zext(llargs[0], llret_ty)
1151 (Style::Int(in_is_signed), Style::Float) => {
1152 return if in_is_signed {
1153 bcx.sitofp(llargs[0], llret_ty)
1155 bcx.uitofp(llargs[0], llret_ty)
1158 (Style::Float, Style::Int(out_is_signed)) => {
1159 return if out_is_signed {
1160 bcx.fptosi(llargs[0], llret_ty)
1162 bcx.fptoui(llargs[0], llret_ty)
1165 (Style::Float, Style::Float) => {
1166 return match in_width.cmp(&out_width) {
1167 Ordering::Greater => bcx.fptrunc(llargs[0], llret_ty),
1168 Ordering::Equal => llargs[0],
1169 Ordering::Less => bcx.fpext(llargs[0], llret_ty)
1172 _ => {/* Unsupported. Fallthrough. */}
1175 "unsupported cast from `{}` with element `{}` to `{}` with element `{}`",
1179 macro_rules! arith {
1180 ($($name: ident: $($($p: ident),* => $call: ident),*;)*) => {
1182 if name == stringify!($name) {
1186 return bcx.$call(llargs[0], llargs[1])
1192 "unsupported operation on `{}` with element `{}`",
1199 simd_add: TyUint, TyInt => add, TyFloat => fadd;
1200 simd_sub: TyUint, TyInt => sub, TyFloat => fsub;
1201 simd_mul: TyUint, TyInt => mul, TyFloat => fmul;
1202 simd_div: TyFloat => fdiv;
1203 simd_shl: TyUint, TyInt => shl;
1204 simd_shr: TyUint => lshr, TyInt => ashr;
1205 simd_and: TyUint, TyInt => and;
1206 simd_or: TyUint, TyInt => or;
1207 simd_xor: TyUint, TyInt => xor;
1209 span_bug!(span, "unknown SIMD intrinsic");
1212 // Returns the width of an int TypeVariant, and if it's signed or not
1213 // Returns None if the type is not an integer
1214 // FIXME: there’s multiple of this functions, investigate using some of the already existing
1216 fn int_type_width_signed<'tcx>(sty: &ty::TypeVariants<'tcx>, ccx: &CrateContext)
1217 -> Option<(u64, bool)> {
1218 use rustc::ty::{TyInt, TyUint};
1220 TyInt(t) => Some((match t {
1222 match &ccx.tcx().sess.target.target.target_pointer_width[..] {
1226 tws => bug!("Unsupported target word size for isize: {}", tws),
1229 ast::IntTy::I8 => 8,
1230 ast::IntTy::I16 => 16,
1231 ast::IntTy::I32 => 32,
1232 ast::IntTy::I64 => 64,
1233 ast::IntTy::I128 => 128,
1235 TyUint(t) => Some((match t {
1236 ast::UintTy::Us => {
1237 match &ccx.tcx().sess.target.target.target_pointer_width[..] {
1241 tws => bug!("Unsupported target word size for usize: {}", tws),
1244 ast::UintTy::U8 => 8,
1245 ast::UintTy::U16 => 16,
1246 ast::UintTy::U32 => 32,
1247 ast::UintTy::U64 => 64,
1248 ast::UintTy::U128 => 128,
1254 // Returns the width of a float TypeVariant
1255 // Returns None if the type is not a float
1256 fn float_type_width<'tcx>(sty: &ty::TypeVariants<'tcx>)
1258 use rustc::ty::TyFloat;
1260 TyFloat(t) => Some(match t {
1261 ast::FloatTy::F32 => 32,
1262 ast::FloatTy::F64 => 64,