1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 #![allow(non_upper_case_globals)]
13 use arena::TypedArena;
14 use intrinsics::{self, Intrinsic};
17 use llvm::{SequentiallyConsistent, Acquire, Release, AtomicXchg, ValueRef, TypeKind};
19 use middle::subst::FnSpace;
21 use trans::attributes;
26 use trans::cleanup::CleanupMethods;
30 use trans::debuginfo::DebugLoc;
34 use trans::type_of::*;
37 use trans::machine::llsize_of;
38 use trans::type_::Type;
39 use middle::ty::{self, Ty, HasTypeFlags};
40 use middle::subst::Substs;
42 use syntax::abi::{self, RustIntrinsic};
45 use syntax::parse::token;
47 use rustc::session::Session;
48 use syntax::codemap::Span;
50 use std::cmp::Ordering;
52 pub fn get_simple_intrinsic(ccx: &CrateContext, item: &hir::ForeignItem) -> Option<ValueRef> {
53 let name = match &*item.ident.name.as_str() {
54 "sqrtf32" => "llvm.sqrt.f32",
55 "sqrtf64" => "llvm.sqrt.f64",
56 "powif32" => "llvm.powi.f32",
57 "powif64" => "llvm.powi.f64",
58 "sinf32" => "llvm.sin.f32",
59 "sinf64" => "llvm.sin.f64",
60 "cosf32" => "llvm.cos.f32",
61 "cosf64" => "llvm.cos.f64",
62 "powf32" => "llvm.pow.f32",
63 "powf64" => "llvm.pow.f64",
64 "expf32" => "llvm.exp.f32",
65 "expf64" => "llvm.exp.f64",
66 "exp2f32" => "llvm.exp2.f32",
67 "exp2f64" => "llvm.exp2.f64",
68 "logf32" => "llvm.log.f32",
69 "logf64" => "llvm.log.f64",
70 "log10f32" => "llvm.log10.f32",
71 "log10f64" => "llvm.log10.f64",
72 "log2f32" => "llvm.log2.f32",
73 "log2f64" => "llvm.log2.f64",
74 "fmaf32" => "llvm.fma.f32",
75 "fmaf64" => "llvm.fma.f64",
76 "fabsf32" => "llvm.fabs.f32",
77 "fabsf64" => "llvm.fabs.f64",
78 "copysignf32" => "llvm.copysign.f32",
79 "copysignf64" => "llvm.copysign.f64",
80 "floorf32" => "llvm.floor.f32",
81 "floorf64" => "llvm.floor.f64",
82 "ceilf32" => "llvm.ceil.f32",
83 "ceilf64" => "llvm.ceil.f64",
84 "truncf32" => "llvm.trunc.f32",
85 "truncf64" => "llvm.trunc.f64",
86 "rintf32" => "llvm.rint.f32",
87 "rintf64" => "llvm.rint.f64",
88 "nearbyintf32" => "llvm.nearbyint.f32",
89 "nearbyintf64" => "llvm.nearbyint.f64",
90 "roundf32" => "llvm.round.f32",
91 "roundf64" => "llvm.round.f64",
92 "ctpop8" => "llvm.ctpop.i8",
93 "ctpop16" => "llvm.ctpop.i16",
94 "ctpop32" => "llvm.ctpop.i32",
95 "ctpop64" => "llvm.ctpop.i64",
96 "bswap16" => "llvm.bswap.i16",
97 "bswap32" => "llvm.bswap.i32",
98 "bswap64" => "llvm.bswap.i64",
99 "assume" => "llvm.assume",
102 Some(ccx.get_intrinsic(&name))
105 pub fn span_transmute_size_error(a: &Session, b: Span, msg: &str) {
106 span_err!(a, b, E0512, "{}", msg);
109 /// Performs late verification that intrinsics are used correctly. At present,
110 /// the only intrinsic that needs such verification is `transmute`.
111 pub fn check_intrinsics(ccx: &CrateContext) {
112 let mut last_failing_id = None;
113 for transmute_restriction in ccx.tcx().transmute_restrictions.borrow().iter() {
114 // Sometimes, a single call to transmute will push multiple
115 // type pairs to test in order to exhaustively test the
116 // possibility around a type parameter. If one of those fails,
117 // there is no sense reporting errors on the others.
118 if last_failing_id == Some(transmute_restriction.id) {
122 debug!("transmute_restriction: {:?}", transmute_restriction);
124 assert!(!transmute_restriction.substituted_from.has_param_types());
125 assert!(!transmute_restriction.substituted_to.has_param_types());
127 let llfromtype = type_of::sizing_type_of(ccx,
128 transmute_restriction.substituted_from);
129 let lltotype = type_of::sizing_type_of(ccx,
130 transmute_restriction.substituted_to);
131 let from_type_size = machine::llbitsize_of_real(ccx, llfromtype);
132 let to_type_size = machine::llbitsize_of_real(ccx, lltotype);
133 if from_type_size != to_type_size {
134 last_failing_id = Some(transmute_restriction.id);
136 if transmute_restriction.original_from != transmute_restriction.substituted_from {
137 span_transmute_size_error(ccx.sess(), transmute_restriction.span,
138 &format!("transmute called on types with potentially different sizes: \
139 {} (could be {} bit{}) to {} (could be {} bit{})",
140 transmute_restriction.original_from,
141 from_type_size as usize,
142 if from_type_size == 1 {""} else {"s"},
143 transmute_restriction.original_to,
144 to_type_size as usize,
145 if to_type_size == 1 {""} else {"s"}));
147 span_transmute_size_error(ccx.sess(), transmute_restriction.span,
148 &format!("transmute called on types with different sizes: \
149 {} ({} bit{}) to {} ({} bit{})",
150 transmute_restriction.original_from,
151 from_type_size as usize,
152 if from_type_size == 1 {""} else {"s"},
153 transmute_restriction.original_to,
154 to_type_size as usize,
155 if to_type_size == 1 {""} else {"s"}));
159 ccx.sess().abort_if_errors();
162 /// Remember to add all intrinsics here, in librustc_typeck/check/mod.rs,
163 /// and in libcore/intrinsics.rs; if you need access to any llvm intrinsics,
164 /// add them to librustc_trans/trans/context.rs
165 pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
168 cleanup_scope: cleanup::CustomScopeIndex,
169 args: callee::CallArgs<'a, 'tcx>,
171 substs: subst::Substs<'tcx>,
172 call_info: NodeIdAndSpan)
173 -> Result<'blk, 'tcx> {
178 let _icx = push_ctxt("trans_intrinsic_call");
180 let (arg_tys, ret_ty) = match callee_ty.sty {
181 ty::TyBareFn(_, ref f) => {
182 (bcx.tcx().erase_late_bound_regions(&f.sig.inputs()),
183 bcx.tcx().erase_late_bound_regions(&f.sig.output()))
185 _ => panic!("expected bare_fn in trans_intrinsic_call")
187 let foreign_item = tcx.map.expect_foreign_item(node);
188 let name = foreign_item.ident.name.as_str();
190 // For `transmute` we can just trans the input expr directly into dest
191 if name == "transmute" {
192 let llret_ty = type_of::type_of(ccx, ret_ty.unwrap());
194 callee::ArgExprs(arg_exprs) => {
195 assert_eq!(arg_exprs.len(), 1);
197 let (in_type, out_type) = (*substs.types.get(FnSpace, 0),
198 *substs.types.get(FnSpace, 1));
199 let llintype = type_of::type_of(ccx, in_type);
200 let llouttype = type_of::type_of(ccx, out_type);
202 let in_type_size = machine::llbitsize_of_real(ccx, llintype);
203 let out_type_size = machine::llbitsize_of_real(ccx, llouttype);
205 // This should be caught by the intrinsicck pass
206 assert_eq!(in_type_size, out_type_size);
208 let nonpointer_nonaggregate = |llkind: TypeKind| -> bool {
209 use llvm::TypeKind::*;
211 Half | Float | Double | X86_FP80 | FP128 |
212 PPC_FP128 | Integer | Vector | X86_MMX => true,
217 // An approximation to which types can be directly cast via
218 // LLVM's bitcast. This doesn't cover pointer -> pointer casts,
219 // but does, importantly, cover SIMD types.
220 let in_kind = llintype.kind();
221 let ret_kind = llret_ty.kind();
222 let bitcast_compatible =
223 (nonpointer_nonaggregate(in_kind) && nonpointer_nonaggregate(ret_kind)) || {
224 in_kind == TypeKind::Pointer && ret_kind == TypeKind::Pointer
227 let dest = if bitcast_compatible {
228 // if we're here, the type is scalar-like (a primitive, a
229 // SIMD type or a pointer), and so can be handled as a
230 // by-value ValueRef and can also be directly bitcast to the
231 // target type. Doing this special case makes conversions
232 // like `u32x4` -> `u64x2` much nicer for LLVM and so more
233 // efficient (these are done efficiently implicitly in C
234 // with the `__m128i` type and so this means Rust doesn't
236 let expr = &*arg_exprs[0];
237 let datum = unpack_datum!(bcx, expr::trans(bcx, expr));
238 let datum = unpack_datum!(bcx, datum.to_rvalue_datum(bcx, "transmute_temp"));
239 let val = if datum.kind.is_by_ref() {
240 load_ty(bcx, datum.val, datum.ty)
242 from_arg_ty(bcx, datum.val, datum.ty)
245 let cast_val = BitCast(bcx, val, llret_ty);
249 // this often occurs in a sequence like `Store(val,
250 // d); val2 = Load(d)`, so disappears easily.
251 Store(bcx, cast_val, d);
257 // The types are too complicated to do with a by-value
258 // bitcast, so pointer cast instead. We need to cast the
259 // dest so the types work out.
260 let dest = match dest {
261 expr::SaveIn(d) => expr::SaveIn(PointerCast(bcx, d, llintype.ptr_to())),
262 expr::Ignore => expr::Ignore
264 bcx = expr::trans_into(bcx, &*arg_exprs[0], dest);
268 fcx.scopes.borrow_mut().last_mut().unwrap().drop_non_lifetime_clean();
269 fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope);
272 expr::SaveIn(d) => Result::new(bcx, d),
273 expr::Ignore => Result::new(bcx, C_undef(llret_ty.ptr_to()))
279 ccx.sess().bug("expected expr as argument for transmute");
284 // For `move_val_init` we can evaluate the destination address
285 // (the first argument) and then trans the source value (the
286 // second argument) directly into the resulting destination
288 if name == "move_val_init" {
289 if let callee::ArgExprs(ref exprs) = args {
290 let (dest_expr, source_expr) = if exprs.len() != 2 {
291 ccx.sess().bug("expected two exprs as arguments for `move_val_init` intrinsic");
293 (&exprs[0], &exprs[1])
296 // evaluate destination address
297 let dest_datum = unpack_datum!(bcx, expr::trans(bcx, dest_expr));
298 let dest_datum = unpack_datum!(
299 bcx, dest_datum.to_rvalue_datum(bcx, "arg"));
300 let dest_datum = unpack_datum!(
301 bcx, dest_datum.to_appropriate_datum(bcx));
303 // `expr::trans_into(bcx, expr, dest)` is equiv to
305 // `trans(bcx, expr).store_to_dest(dest)`,
307 // which for `dest == expr::SaveIn(addr)`, is equivalent to:
309 // `trans(bcx, expr).store_to(bcx, addr)`.
310 let lldest = expr::Dest::SaveIn(dest_datum.val);
311 bcx = expr::trans_into(bcx, source_expr, lldest);
313 let llresult = C_nil(ccx);
314 fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope);
316 return Result::new(bcx, llresult);
318 ccx.sess().bug("expected two exprs as arguments for `move_val_init` intrinsic");
322 let call_debug_location = DebugLoc::At(call_info.id, call_info.span);
324 // For `try` we need some custom control flow
325 if &name[..] == "try" {
326 if let callee::ArgExprs(ref exprs) = args {
327 let (func, data) = if exprs.len() != 2 {
328 ccx.sess().bug("expected two exprs as arguments for \
331 (&exprs[0], &exprs[1])
334 // translate arguments
335 let func = unpack_datum!(bcx, expr::trans(bcx, func));
336 let func = unpack_datum!(bcx, func.to_rvalue_datum(bcx, "func"));
337 let data = unpack_datum!(bcx, expr::trans(bcx, data));
338 let data = unpack_datum!(bcx, data.to_rvalue_datum(bcx, "data"));
340 let dest = match dest {
341 expr::SaveIn(d) => d,
342 expr::Ignore => alloc_ty(bcx, tcx.mk_mut_ptr(tcx.types.i8),
347 bcx = try_intrinsic(bcx, func.val, data.val, dest,
348 call_debug_location);
350 fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope);
351 return Result::new(bcx, dest);
353 ccx.sess().bug("expected two exprs as arguments for \
358 // save the actual AST arguments for later (some places need to do
359 // const-evaluation on them)
360 let expr_arguments = match args {
361 callee::ArgExprs(args) => Some(args),
365 // Push the arguments.
366 let mut llargs = Vec::new();
367 bcx = callee::trans_args(bcx,
371 cleanup::CustomScope(cleanup_scope),
375 fcx.scopes.borrow_mut().last_mut().unwrap().drop_non_lifetime_clean();
377 // These are the only intrinsic functions that diverge.
379 let llfn = ccx.get_intrinsic(&("llvm.trap"));
380 Call(bcx, llfn, &[], None, call_debug_location);
381 fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope);
383 return Result::new(bcx, C_undef(Type::nil(ccx).ptr_to()));
384 } else if &name[..] == "unreachable" {
385 fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope);
387 return Result::new(bcx, C_nil(ccx));
390 let ret_ty = match ret_ty {
391 ty::FnConverging(ret_ty) => ret_ty,
392 ty::FnDiverging => unreachable!()
395 let llret_ty = type_of::type_of(ccx, ret_ty);
397 // Get location to store the result. If the user does
398 // not care about the result, just make a stack slot
399 let llresult = match dest {
400 expr::SaveIn(d) => d,
402 if !type_is_zero_size(ccx, ret_ty) {
403 let llresult = alloc_ty(bcx, ret_ty, "intrinsic_result");
404 call_lifetime_start(bcx, llresult);
407 C_undef(llret_ty.ptr_to())
412 let simple = get_simple_intrinsic(ccx, &*foreign_item);
413 let llval = match (simple, &*name) {
415 Call(bcx, llfn, &llargs, None, call_debug_location)
417 (_, "breakpoint") => {
418 let llfn = ccx.get_intrinsic(&("llvm.debugtrap"));
419 Call(bcx, llfn, &[], None, call_debug_location)
422 let tp_ty = *substs.types.get(FnSpace, 0);
423 let lltp_ty = type_of::type_of(ccx, tp_ty);
424 C_uint(ccx, machine::llsize_of_alloc(ccx, lltp_ty))
426 (_, "size_of_val") => {
427 let tp_ty = *substs.types.get(FnSpace, 0);
428 if !type_is_sized(tcx, tp_ty) {
429 let (llsize, _) = glue::size_and_align_of_dst(bcx, tp_ty, llargs[1]);
432 let lltp_ty = type_of::type_of(ccx, tp_ty);
433 C_uint(ccx, machine::llsize_of_alloc(ccx, lltp_ty))
436 (_, "min_align_of") => {
437 let tp_ty = *substs.types.get(FnSpace, 0);
438 C_uint(ccx, type_of::align_of(ccx, tp_ty))
440 (_, "min_align_of_val") => {
441 let tp_ty = *substs.types.get(FnSpace, 0);
442 if !type_is_sized(tcx, tp_ty) {
443 let (_, llalign) = glue::size_and_align_of_dst(bcx, tp_ty, llargs[1]);
446 C_uint(ccx, type_of::align_of(ccx, tp_ty))
449 (_, "pref_align_of") => {
450 let tp_ty = *substs.types.get(FnSpace, 0);
451 let lltp_ty = type_of::type_of(ccx, tp_ty);
452 C_uint(ccx, machine::llalign_of_pref(ccx, lltp_ty))
454 (_, "drop_in_place") => {
455 let tp_ty = *substs.types.get(FnSpace, 0);
456 let ptr = if type_is_sized(tcx, tp_ty) {
459 let scratch = rvalue_scratch_datum(bcx, tp_ty, "tmp");
460 Store(bcx, llargs[0], expr::get_dataptr(bcx, scratch.val));
461 Store(bcx, llargs[1], expr::get_meta(bcx, scratch.val));
462 fcx.schedule_lifetime_end(cleanup::CustomScope(cleanup_scope), scratch.val);
465 glue::drop_ty(bcx, ptr, tp_ty, call_debug_location);
468 (_, "type_name") => {
469 let tp_ty = *substs.types.get(FnSpace, 0);
470 let ty_name = token::intern_and_get_ident(&tp_ty.to_string());
471 C_str_slice(ccx, ty_name)
474 let hash = ccx.tcx().hash_crate_independent(*substs.types.get(FnSpace, 0),
475 &ccx.link_meta().crate_hash);
478 (_, "init_dropped") => {
479 let tp_ty = *substs.types.get(FnSpace, 0);
480 if !return_type_is_void(ccx, tp_ty) {
481 drop_done_fill_mem(bcx, llresult, tp_ty);
486 let tp_ty = *substs.types.get(FnSpace, 0);
487 if !return_type_is_void(ccx, tp_ty) {
488 // Just zero out the stack slot. (See comment on base::memzero for explanation)
489 init_zero_mem(bcx, llresult, tp_ty);
493 // Effectively no-ops
494 (_, "uninit") | (_, "forget") => {
497 (_, "needs_drop") => {
498 let tp_ty = *substs.types.get(FnSpace, 0);
500 C_bool(ccx, bcx.fcx.type_needs_drop(tp_ty))
504 let offset = llargs[1];
505 InBoundsGEP(bcx, ptr, &[offset])
507 (_, "arith_offset") => {
509 let offset = llargs[1];
510 GEP(bcx, ptr, &[offset])
513 (_, "copy_nonoverlapping") => {
517 *substs.types.get(FnSpace, 0),
527 *substs.types.get(FnSpace, 0),
533 (_, "write_bytes") => {
534 memset_intrinsic(bcx,
536 *substs.types.get(FnSpace, 0),
543 (_, "volatile_copy_nonoverlapping_memory") => {
547 *substs.types.get(FnSpace, 0),
553 (_, "volatile_copy_memory") => {
557 *substs.types.get(FnSpace, 0),
563 (_, "volatile_set_memory") => {
564 memset_intrinsic(bcx,
566 *substs.types.get(FnSpace, 0),
572 (_, "volatile_load") => {
573 let tp_ty = *substs.types.get(FnSpace, 0);
574 let ptr = to_arg_ty_ptr(bcx, llargs[0], tp_ty);
575 let load = VolatileLoad(bcx, ptr);
577 llvm::LLVMSetAlignment(load, type_of::align_of(ccx, tp_ty));
579 to_arg_ty(bcx, load, tp_ty)
581 (_, "volatile_store") => {
582 let tp_ty = *substs.types.get(FnSpace, 0);
583 let ptr = to_arg_ty_ptr(bcx, llargs[0], tp_ty);
584 let val = from_arg_ty(bcx, llargs[1], tp_ty);
585 let store = VolatileStore(bcx, val, ptr);
587 llvm::LLVMSetAlignment(store, type_of::align_of(ccx, tp_ty));
592 (_, "ctlz8") => count_zeros_intrinsic(bcx,
595 call_debug_location),
596 (_, "ctlz16") => count_zeros_intrinsic(bcx,
599 call_debug_location),
600 (_, "ctlz32") => count_zeros_intrinsic(bcx,
603 call_debug_location),
604 (_, "ctlz64") => count_zeros_intrinsic(bcx,
607 call_debug_location),
608 (_, "cttz8") => count_zeros_intrinsic(bcx,
611 call_debug_location),
612 (_, "cttz16") => count_zeros_intrinsic(bcx,
615 call_debug_location),
616 (_, "cttz32") => count_zeros_intrinsic(bcx,
619 call_debug_location),
620 (_, "cttz64") => count_zeros_intrinsic(bcx,
623 call_debug_location),
625 (_, "i8_add_with_overflow") =>
626 with_overflow_intrinsic(bcx,
627 "llvm.sadd.with.overflow.i8",
631 call_debug_location),
632 (_, "i16_add_with_overflow") =>
633 with_overflow_intrinsic(bcx,
634 "llvm.sadd.with.overflow.i16",
638 call_debug_location),
639 (_, "i32_add_with_overflow") =>
640 with_overflow_intrinsic(bcx,
641 "llvm.sadd.with.overflow.i32",
645 call_debug_location),
646 (_, "i64_add_with_overflow") =>
647 with_overflow_intrinsic(bcx,
648 "llvm.sadd.with.overflow.i64",
652 call_debug_location),
654 (_, "u8_add_with_overflow") =>
655 with_overflow_intrinsic(bcx,
656 "llvm.uadd.with.overflow.i8",
660 call_debug_location),
661 (_, "u16_add_with_overflow") =>
662 with_overflow_intrinsic(bcx,
663 "llvm.uadd.with.overflow.i16",
667 call_debug_location),
668 (_, "u32_add_with_overflow") =>
669 with_overflow_intrinsic(bcx,
670 "llvm.uadd.with.overflow.i32",
674 call_debug_location),
675 (_, "u64_add_with_overflow") =>
676 with_overflow_intrinsic(bcx,
677 "llvm.uadd.with.overflow.i64",
681 call_debug_location),
682 (_, "i8_sub_with_overflow") =>
683 with_overflow_intrinsic(bcx,
684 "llvm.ssub.with.overflow.i8",
688 call_debug_location),
689 (_, "i16_sub_with_overflow") =>
690 with_overflow_intrinsic(bcx,
691 "llvm.ssub.with.overflow.i16",
695 call_debug_location),
696 (_, "i32_sub_with_overflow") =>
697 with_overflow_intrinsic(bcx,
698 "llvm.ssub.with.overflow.i32",
702 call_debug_location),
703 (_, "i64_sub_with_overflow") =>
704 with_overflow_intrinsic(bcx,
705 "llvm.ssub.with.overflow.i64",
709 call_debug_location),
710 (_, "u8_sub_with_overflow") =>
711 with_overflow_intrinsic(bcx,
712 "llvm.usub.with.overflow.i8",
716 call_debug_location),
717 (_, "u16_sub_with_overflow") =>
718 with_overflow_intrinsic(bcx,
719 "llvm.usub.with.overflow.i16",
723 call_debug_location),
724 (_, "u32_sub_with_overflow") =>
725 with_overflow_intrinsic(bcx,
726 "llvm.usub.with.overflow.i32",
730 call_debug_location),
731 (_, "u64_sub_with_overflow") =>
732 with_overflow_intrinsic(bcx,
733 "llvm.usub.with.overflow.i64",
737 call_debug_location),
738 (_, "i8_mul_with_overflow") =>
739 with_overflow_intrinsic(bcx,
740 "llvm.smul.with.overflow.i8",
744 call_debug_location),
745 (_, "i16_mul_with_overflow") =>
746 with_overflow_intrinsic(bcx,
747 "llvm.smul.with.overflow.i16",
751 call_debug_location),
752 (_, "i32_mul_with_overflow") =>
753 with_overflow_intrinsic(bcx,
754 "llvm.smul.with.overflow.i32",
758 call_debug_location),
759 (_, "i64_mul_with_overflow") =>
760 with_overflow_intrinsic(bcx,
761 "llvm.smul.with.overflow.i64",
765 call_debug_location),
766 (_, "u8_mul_with_overflow") =>
767 with_overflow_intrinsic(bcx,
768 "llvm.umul.with.overflow.i8",
772 call_debug_location),
773 (_, "u16_mul_with_overflow") =>
774 with_overflow_intrinsic(bcx,
775 "llvm.umul.with.overflow.i16",
779 call_debug_location),
780 (_, "u32_mul_with_overflow") =>
781 with_overflow_intrinsic(bcx,
782 "llvm.umul.with.overflow.i32",
786 call_debug_location),
787 (_, "u64_mul_with_overflow") =>
788 with_overflow_intrinsic(bcx,
789 "llvm.umul.with.overflow.i64",
793 call_debug_location),
795 (_, "unchecked_udiv") => UDiv(bcx, llargs[0], llargs[1], call_debug_location),
796 (_, "unchecked_sdiv") => SDiv(bcx, llargs[0], llargs[1], call_debug_location),
797 (_, "unchecked_urem") => URem(bcx, llargs[0], llargs[1], call_debug_location),
798 (_, "unchecked_srem") => SRem(bcx, llargs[0], llargs[1], call_debug_location),
800 (_, "overflowing_add") => Add(bcx, llargs[0], llargs[1], call_debug_location),
801 (_, "overflowing_sub") => Sub(bcx, llargs[0], llargs[1], call_debug_location),
802 (_, "overflowing_mul") => Mul(bcx, llargs[0], llargs[1], call_debug_location),
804 (_, "return_address") => {
805 if !fcx.caller_expects_out_pointer {
806 span_err!(tcx.sess, call_info.span, E0510,
807 "invalid use of `return_address` intrinsic: function \
808 does not use out pointer");
809 C_null(Type::i8p(ccx))
811 PointerCast(bcx, llvm::get_param(fcx.llfn, 0), Type::i8p(ccx))
815 (_, "discriminant_value") => {
816 let val_ty = substs.types.get(FnSpace, 0);
819 let repr = adt::represent_type(ccx, *val_ty);
820 adt::trans_get_discr(bcx, &*repr, llargs[0], Some(llret_ty))
822 _ => C_null(llret_ty)
825 (_, name) if name.starts_with("simd_") => {
826 generic_simd_intrinsic(bcx, name,
835 // This requires that atomic intrinsics follow a specific naming pattern:
836 // "atomic_<operation>[_<ordering>]", and no ordering means SeqCst
837 (_, name) if name.starts_with("atomic_") => {
838 let split: Vec<&str> = name.split('_').collect();
839 assert!(split.len() >= 2, "Atomic intrinsic not correct format");
841 let order = if split.len() == 2 {
842 llvm::SequentiallyConsistent
845 "unordered" => llvm::Unordered,
846 "relaxed" => llvm::Monotonic,
847 "acq" => llvm::Acquire,
848 "rel" => llvm::Release,
849 "acqrel" => llvm::AcquireRelease,
850 _ => ccx.sess().fatal("unknown ordering in atomic intrinsic")
856 // See include/llvm/IR/Instructions.h for their implementation
857 // of this, I assume that it's good enough for us to use for
859 let strongest_failure_ordering = match order {
860 llvm::NotAtomic | llvm::Unordered =>
861 ccx.sess().fatal("cmpxchg must be atomic"),
863 llvm::Monotonic | llvm::Release =>
866 llvm::Acquire | llvm::AcquireRelease =>
869 llvm::SequentiallyConsistent =>
870 llvm::SequentiallyConsistent
873 let tp_ty = *substs.types.get(FnSpace, 0);
874 let ptr = to_arg_ty_ptr(bcx, llargs[0], tp_ty);
875 let cmp = from_arg_ty(bcx, llargs[1], tp_ty);
876 let src = from_arg_ty(bcx, llargs[2], tp_ty);
877 let res = AtomicCmpXchg(bcx, ptr, cmp, src, order,
878 strongest_failure_ordering);
879 ExtractValue(bcx, res, 0)
883 let tp_ty = *substs.types.get(FnSpace, 0);
884 let ptr = to_arg_ty_ptr(bcx, llargs[0], tp_ty);
885 to_arg_ty(bcx, AtomicLoad(bcx, ptr, order), tp_ty)
888 let tp_ty = *substs.types.get(FnSpace, 0);
889 let ptr = to_arg_ty_ptr(bcx, llargs[0], tp_ty);
890 let val = from_arg_ty(bcx, llargs[1], tp_ty);
891 AtomicStore(bcx, val, ptr, order);
896 AtomicFence(bcx, order, llvm::CrossThread);
900 "singlethreadfence" => {
901 AtomicFence(bcx, order, llvm::SingleThread);
905 // These are all AtomicRMW ops
907 let atom_op = match op {
908 "xchg" => llvm::AtomicXchg,
909 "xadd" => llvm::AtomicAdd,
910 "xsub" => llvm::AtomicSub,
911 "and" => llvm::AtomicAnd,
912 "nand" => llvm::AtomicNand,
913 "or" => llvm::AtomicOr,
914 "xor" => llvm::AtomicXor,
915 "max" => llvm::AtomicMax,
916 "min" => llvm::AtomicMin,
917 "umax" => llvm::AtomicUMax,
918 "umin" => llvm::AtomicUMin,
919 _ => ccx.sess().fatal("unknown atomic operation")
922 let tp_ty = *substs.types.get(FnSpace, 0);
923 let ptr = to_arg_ty_ptr(bcx, llargs[0], tp_ty);
924 let val = from_arg_ty(bcx, llargs[1], tp_ty);
925 AtomicRMW(bcx, atom_op, ptr, val, order)
932 let intr = match Intrinsic::find(tcx, &name) {
934 None => ccx.sess().span_bug(foreign_item.span, "unknown intrinsic"),
936 fn one<T>(x: Vec<T>) -> T {
937 assert_eq!(x.len(), 1);
938 x.into_iter().next().unwrap()
940 fn ty_to_type(ccx: &CrateContext, t: &intrinsics::Type,
941 any_changes_needed: &mut bool) -> Vec<Type> {
942 use intrinsics::Type::*;
944 Void => vec![Type::void(ccx)],
945 Integer(_signed, width, llvm_width) => {
946 *any_changes_needed |= width != llvm_width;
947 vec![Type::ix(ccx, llvm_width as u64)]
951 32 => vec![Type::f32(ccx)],
952 64 => vec![Type::f64(ccx)],
956 Pointer(ref t, ref llvm_elem, _const) => {
957 *any_changes_needed |= llvm_elem.is_some();
959 let t = llvm_elem.as_ref().unwrap_or(t);
960 let elem = one(ty_to_type(ccx, t,
961 any_changes_needed));
964 Vector(ref t, ref llvm_elem, length) => {
965 *any_changes_needed |= llvm_elem.is_some();
967 let t = llvm_elem.as_ref().unwrap_or(t);
968 let elem = one(ty_to_type(ccx, t,
969 any_changes_needed));
970 vec![Type::vector(&elem,
973 Aggregate(false, ref contents) => {
974 let elems = contents.iter()
975 .map(|t| one(ty_to_type(ccx, t, any_changes_needed)))
976 .collect::<Vec<_>>();
977 vec![Type::struct_(ccx, &elems, false)]
979 Aggregate(true, ref contents) => {
980 *any_changes_needed = true;
982 .flat_map(|t| ty_to_type(ccx, t, any_changes_needed))
988 // This allows an argument list like `foo, (bar, baz),
989 // qux` to be converted into `foo, bar, baz, qux`, integer
990 // arguments to be truncated as needed and pointers to be
992 fn modify_as_needed<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
993 t: &intrinsics::Type,
999 intrinsics::Type::Aggregate(true, ref contents) => {
1000 // We found a tuple that needs squishing! So
1001 // run over the tuple and load each field.
1003 // This assumes the type is "simple", i.e. no
1004 // destructors, and the contents are SIMD
1006 assert!(!bcx.fcx.type_needs_drop(arg_type));
1008 let repr = adt::represent_type(bcx.ccx(), arg_type);
1009 let repr_ptr = &*repr;
1012 Load(bcx, adt::trans_field_ptr(bcx, repr_ptr, llarg, 0, i))
1016 intrinsics::Type::Pointer(_, Some(ref llvm_elem), _) => {
1017 let llvm_elem = one(ty_to_type(bcx.ccx(), llvm_elem, &mut false));
1018 vec![PointerCast(bcx, llarg,
1019 llvm_elem.ptr_to())]
1021 intrinsics::Type::Vector(_, Some(ref llvm_elem), length) => {
1022 let llvm_elem = one(ty_to_type(bcx.ccx(), llvm_elem, &mut false));
1023 vec![BitCast(bcx, llarg,
1024 Type::vector(&llvm_elem, length as u64))]
1026 intrinsics::Type::Integer(_, width, llvm_width) if width != llvm_width => {
1027 // the LLVM intrinsic uses a smaller integer
1028 // size than the C intrinsic's signature, so
1029 // we have to trim it down here.
1030 vec![Trunc(bcx, llarg, Type::ix(bcx.ccx(), llvm_width as u64))]
1037 let mut any_changes_needed = false;
1038 let inputs = intr.inputs.iter()
1039 .flat_map(|t| ty_to_type(ccx, t, &mut any_changes_needed))
1040 .collect::<Vec<_>>();
1042 let mut out_changes = false;
1043 let outputs = one(ty_to_type(ccx, &intr.output, &mut out_changes));
1044 // outputting a flattened aggregate is nonsense
1045 assert!(!out_changes);
1047 let llargs = if !any_changes_needed {
1048 // no aggregates to flatten, so no change needed
1051 // there are some aggregates that need to be flattened
1052 // in the LLVM call, so we need to run over the types
1053 // again to find them and extract the arguments
1057 .flat_map(|((t, llarg), ty)| modify_as_needed(bcx, t, ty, *llarg))
1060 assert_eq!(inputs.len(), llargs.len());
1062 let val = match intr.definition {
1063 intrinsics::IntrinsicDef::Named(name) => {
1064 let f = declare::declare_cfn(ccx,
1066 Type::func(&inputs, &outputs),
1068 Call(bcx, f, &llargs, None, call_debug_location)
1073 intrinsics::Type::Aggregate(flatten, ref elems) => {
1074 // the output is a tuple so we need to munge it properly
1077 for i in 0..elems.len() {
1078 let val = ExtractValue(bcx, val, i);
1079 Store(bcx, val, StructGEP(bcx, llresult, i));
1088 if val_ty(llval) != Type::void(ccx) &&
1089 machine::llsize_of_alloc(ccx, val_ty(llval)) != 0 {
1090 store_ty(bcx, llval, llresult, ret_ty);
1093 // If we made a temporary stack slot, let's clean it up
1096 bcx = glue::drop_ty(bcx, llresult, ret_ty, call_debug_location);
1097 call_lifetime_end(bcx, llresult);
1099 expr::SaveIn(_) => {}
1102 fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope);
1104 Result::new(bcx, llresult)
1107 fn copy_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1108 allow_overlap: bool,
1114 call_debug_location: DebugLoc)
1116 let ccx = bcx.ccx();
1117 let lltp_ty = type_of::type_of(ccx, tp_ty);
1118 let align = C_i32(ccx, type_of::align_of(ccx, tp_ty) as i32);
1119 let size = machine::llsize_of(ccx, lltp_ty);
1120 let int_size = machine::llbitsize_of_real(ccx, ccx.int_type());
1122 let operation = if allow_overlap {
1128 let name = format!("llvm.{}.p0i8.p0i8.i{}", operation, int_size);
1130 let dst_ptr = PointerCast(bcx, dst, Type::i8p(ccx));
1131 let src_ptr = PointerCast(bcx, src, Type::i8p(ccx));
1132 let llfn = ccx.get_intrinsic(&name);
1138 Mul(bcx, size, count, DebugLoc::None),
1140 C_bool(ccx, volatile)],
1142 call_debug_location)
1145 fn memset_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1151 call_debug_location: DebugLoc)
1153 let ccx = bcx.ccx();
1154 let lltp_ty = type_of::type_of(ccx, tp_ty);
1155 let align = C_i32(ccx, type_of::align_of(ccx, tp_ty) as i32);
1156 let size = machine::llsize_of(ccx, lltp_ty);
1157 let int_size = machine::llbitsize_of_real(ccx, ccx.int_type());
1159 let name = format!("llvm.memset.p0i8.i{}", int_size);
1161 let dst_ptr = PointerCast(bcx, dst, Type::i8p(ccx));
1162 let llfn = ccx.get_intrinsic(&name);
1168 Mul(bcx, size, count, DebugLoc::None),
1170 C_bool(ccx, volatile)],
1172 call_debug_location)
1175 fn count_zeros_intrinsic(bcx: Block,
1178 call_debug_location: DebugLoc)
1180 let y = C_bool(bcx.ccx(), false);
1181 let llfn = bcx.ccx().get_intrinsic(&name);
1182 Call(bcx, llfn, &[val, y], None, call_debug_location)
1185 fn with_overflow_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1190 call_debug_location: DebugLoc)
1192 let llfn = bcx.ccx().get_intrinsic(&name);
1194 // Convert `i1` to a `bool`, and write it to the out parameter
1195 let val = Call(bcx, llfn, &[a, b], None, call_debug_location);
1196 let result = ExtractValue(bcx, val, 0);
1197 let overflow = ZExt(bcx, ExtractValue(bcx, val, 1), Type::bool(bcx.ccx()));
1198 Store(bcx, result, StructGEP(bcx, out, 0));
1199 Store(bcx, overflow, StructGEP(bcx, out, 1));
1204 fn try_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1208 dloc: DebugLoc) -> Block<'blk, 'tcx> {
1209 if bcx.sess().no_landing_pads() {
1210 Call(bcx, func, &[data], None, dloc);
1211 Store(bcx, C_null(Type::i8p(bcx.ccx())), dest);
1213 } else if wants_msvc_seh(bcx.sess()) {
1214 trans_msvc_try(bcx, func, data, dest, dloc)
1216 trans_gnu_try(bcx, func, data, dest, dloc)
1220 // MSVC's definition of the `rust_try` function. The exact implementation here
1221 // is a little different than the GNU (standard) version below, not only because
1222 // of the personality function but also because of the other fiddly bits about
1223 // SEH. LLVM also currently requires us to structure this a very particular way
1224 // as explained below.
1226 // Like with the GNU version we generate a shim wrapper
1227 fn trans_msvc_try<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1231 dloc: DebugLoc) -> Block<'blk, 'tcx> {
1232 let llfn = get_rust_try_fn(bcx.fcx, &mut |try_fn_ty, output| {
1233 let ccx = bcx.ccx();
1234 let dloc = DebugLoc::None;
1235 let rust_try = declare::define_internal_rust_fn(ccx, "__rust_try",
1237 let (fcx, block_arena);
1238 block_arena = TypedArena::new();
1239 fcx = new_fn_ctxt(ccx, rust_try, ast::DUMMY_NODE_ID, false,
1240 output, ccx.tcx().mk_substs(Substs::trans_empty()),
1241 None, &block_arena);
1242 let bcx = init_function(&fcx, true, output);
1243 let then = fcx.new_temp_block("then");
1244 let catch = fcx.new_temp_block("catch");
1245 let catch_return = fcx.new_temp_block("catch-return");
1246 let catch_resume = fcx.new_temp_block("catch-resume");
1247 let personality = fcx.eh_personality();
1249 let eh_typeid_for = ccx.get_intrinsic(&"llvm.eh.typeid.for");
1250 let rust_try_filter = match bcx.tcx().lang_items.msvc_try_filter() {
1251 Some(did) => callee::trans_fn_ref(ccx, did, ExprId(0),
1252 bcx.fcx.param_substs).val,
1253 None => bcx.sess().bug("msvc_try_filter not defined"),
1256 // Type indicator for the exception being thrown, not entirely sure
1257 // what's going on here but it's what all the examples in LLVM use.
1258 let lpad_ty = Type::struct_(ccx, &[Type::i8p(ccx), Type::i32(ccx)],
1261 llvm::SetFunctionAttribute(rust_try, llvm::Attribute::NoInline);
1262 llvm::SetFunctionAttribute(rust_try, llvm::Attribute::OptimizeNone);
1263 let func = llvm::get_param(rust_try, 0);
1264 let data = llvm::get_param(rust_try, 1);
1266 // Invoke the function, specifying our two temporary landing pads as the
1267 // ext point. After the invoke we've terminated our basic block.
1268 Invoke(bcx, func, &[data], then.llbb, catch.llbb, None, dloc);
1270 // All the magic happens in this landing pad, and this is basically the
1271 // only landing pad in rust tagged with "catch" to indicate that we're
1272 // catching an exception. The other catch handlers in the GNU version
1273 // below just catch *all* exceptions, but that's because most exceptions
1274 // are already filtered out by the gnu personality function.
1276 // For MSVC we're just using a standard personality function that we
1277 // can't customize (e.g. _except_handler3 or __C_specific_handler), so
1278 // we need to do the exception filtering ourselves. This is currently
1279 // performed by the `__rust_try_filter` function. This function,
1280 // specified in the landingpad instruction, will be invoked by Windows
1281 // SEH routines and will return whether the exception in question can be
1282 // caught (aka the Rust runtime is the one that threw the exception).
1284 // To get this to compile (currently LLVM segfaults if it's not in this
1285 // particular structure), when the landingpad is executing we test to
1286 // make sure that the ID of the exception being thrown is indeed the one
1287 // that we were expecting. If it's not, we resume the exception, and
1288 // otherwise we return the pointer that we got Full disclosure: It's not
1289 // clear to me what this `llvm.eh.typeid` stuff is doing *other* then
1290 // just allowing LLVM to compile this file without segfaulting. I would
1291 // expect the entire landing pad to just be:
1293 // %vals = landingpad ...
1294 // %ehptr = extractvalue { i8*, i32 } %vals, 0
1297 // but apparently LLVM chokes on this, so we do the more complicated
1298 // thing to placate it.
1299 let vals = LandingPad(catch, lpad_ty, personality, 1);
1300 let rust_try_filter = BitCast(catch, rust_try_filter, Type::i8p(ccx));
1301 AddClause(catch, vals, rust_try_filter);
1302 let ehptr = ExtractValue(catch, vals, 0);
1303 let sel = ExtractValue(catch, vals, 1);
1304 let filter_sel = Call(catch, eh_typeid_for, &[rust_try_filter], None,
1306 let is_filter = ICmp(catch, llvm::IntEQ, sel, filter_sel, dloc);
1307 CondBr(catch, is_filter, catch_return.llbb, catch_resume.llbb, dloc);
1309 // Our "catch-return" basic block is where we've determined that we
1310 // actually need to catch this exception, in which case we just return
1311 // the exception pointer.
1312 Ret(catch_return, ehptr, dloc);
1314 // The "catch-resume" block is where we're running this landing pad but
1315 // we actually need to not catch the exception, so just resume the
1316 // exception to return.
1317 Resume(catch_resume, vals);
1319 // On the successful branch we just return null.
1320 Ret(then, C_null(Type::i8p(ccx)), dloc);
1325 // Note that no invoke is used here because by definition this function
1326 // can't panic (that's what it's catching).
1327 let ret = Call(bcx, llfn, &[func, data], None, dloc);
1328 Store(bcx, ret, dest);
1332 // Definition of the standard "try" function for Rust using the GNU-like model
1333 // of exceptions (e.g. the normal semantics of LLVM's landingpad and invoke
1336 // This translation is a little surprising because
1337 // we always call a shim function instead of inlining the call to `invoke`
1338 // manually here. This is done because in LLVM we're only allowed to have one
1339 // personality per function definition. The call to the `try` intrinsic is
1340 // being inlined into the function calling it, and that function may already
1341 // have other personality functions in play. By calling a shim we're
1342 // guaranteed that our shim will have the right personality function.
1344 fn trans_gnu_try<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1348 dloc: DebugLoc) -> Block<'blk, 'tcx> {
1349 let llfn = get_rust_try_fn(bcx.fcx, &mut |try_fn_ty, output| {
1350 let ccx = bcx.ccx();
1351 let dloc = DebugLoc::None;
1353 // Translates the shims described above:
1356 // invoke %func(%args...) normal %normal unwind %catch
1362 // (ptr, _) = landingpad
1365 let rust_try = declare::define_internal_rust_fn(ccx, "__rust_try", try_fn_ty);
1366 attributes::emit_uwtable(rust_try, true);
1367 let catch_pers = match bcx.tcx().lang_items.eh_personality_catch() {
1368 Some(did) => callee::trans_fn_ref(ccx, did, ExprId(0),
1369 bcx.fcx.param_substs).val,
1370 None => bcx.tcx().sess.bug("eh_personality_catch not defined"),
1373 let (fcx, block_arena);
1374 block_arena = TypedArena::new();
1375 fcx = new_fn_ctxt(ccx, rust_try, ast::DUMMY_NODE_ID, false,
1376 output, ccx.tcx().mk_substs(Substs::trans_empty()),
1377 None, &block_arena);
1378 let bcx = init_function(&fcx, true, output);
1379 let then = bcx.fcx.new_temp_block("then");
1380 let catch = bcx.fcx.new_temp_block("catch");
1382 let func = llvm::get_param(rust_try, 0);
1383 let data = llvm::get_param(rust_try, 1);
1384 Invoke(bcx, func, &[data], then.llbb, catch.llbb, None, dloc);
1385 Ret(then, C_null(Type::i8p(ccx)), dloc);
1387 // Type indicator for the exception being thrown.
1388 // The first value in this tuple is a pointer to the exception object being thrown.
1389 // The second value is a "selector" indicating which of the landing pad clauses
1390 // the exception's type had been matched to. rust_try ignores the selector.
1391 let lpad_ty = Type::struct_(ccx, &[Type::i8p(ccx), Type::i32(ccx)],
1393 let vals = LandingPad(catch, lpad_ty, catch_pers, 1);
1394 AddClause(catch, vals, C_null(Type::i8p(ccx)));
1395 let ptr = ExtractValue(catch, vals, 0);
1396 Ret(catch, ptr, dloc);
1402 // Note that no invoke is used here because by definition this function
1403 // can't panic (that's what it's catching).
1404 let ret = Call(bcx, llfn, &[func, data], None, dloc);
1405 Store(bcx, ret, dest);
1409 // Helper to generate the `Ty` associated with `rust_try`
1410 fn get_rust_try_fn<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>,
1411 f: &mut FnMut(Ty<'tcx>,
1412 ty::FnOutput<'tcx>) -> ValueRef)
1415 if let Some(llfn) = *ccx.rust_try_fn().borrow() {
1419 // Define the type up front for the signature of the rust_try function.
1420 let tcx = ccx.tcx();
1421 let i8p = tcx.mk_mut_ptr(tcx.types.i8);
1422 let fn_ty = tcx.mk_bare_fn(ty::BareFnTy {
1423 unsafety: hir::Unsafety::Unsafe,
1425 sig: ty::Binder(ty::FnSig {
1427 output: ty::FnOutput::FnConverging(tcx.mk_nil()),
1431 let fn_ty = tcx.mk_fn(None, fn_ty);
1432 let output = ty::FnOutput::FnConverging(i8p);
1433 let try_fn_ty = tcx.mk_bare_fn(ty::BareFnTy {
1434 unsafety: hir::Unsafety::Unsafe,
1436 sig: ty::Binder(ty::FnSig {
1437 inputs: vec![fn_ty, i8p],
1442 let rust_try = f(tcx.mk_fn(None, try_fn_ty), output);
1443 *ccx.rust_try_fn().borrow_mut() = Some(rust_try);
1447 fn span_invalid_monomorphization_error(a: &Session, b: Span, c: &str) {
1448 span_err!(a, b, E0511, "{}", c);
1451 fn generic_simd_intrinsic<'blk, 'tcx, 'a>
1452 (bcx: Block<'blk, 'tcx>,
1454 substs: subst::Substs<'tcx>,
1455 callee_ty: Ty<'tcx>,
1456 args: Option<&[P<hir::Expr>]>,
1457 llargs: &[ValueRef],
1460 call_debug_location: DebugLoc,
1461 call_info: NodeIdAndSpan) -> ValueRef
1463 // macros for error handling:
1464 macro_rules! emit_error {
1468 ($msg: tt, $($fmt: tt)*) => {
1469 span_invalid_monomorphization_error(
1470 bcx.sess(), call_info.span,
1471 &format!(concat!("invalid monomorphization of `{}` intrinsic: ",
1476 macro_rules! require {
1477 ($cond: expr, $($fmt: tt)*) => {
1479 emit_error!($($fmt)*);
1480 return C_null(llret_ty)
1484 macro_rules! require_simd {
1485 ($ty: expr, $position: expr) => {
1486 require!($ty.is_simd(), "expected SIMD {} type, found non-SIMD `{}`", $position, $ty)
1492 let tcx = bcx.tcx();
1493 let arg_tys = match callee_ty.sty {
1494 ty::TyBareFn(_, ref f) => {
1495 bcx.tcx().erase_late_bound_regions(&f.sig.inputs())
1500 // every intrinsic takes a SIMD vector as its first argument
1501 require_simd!(arg_tys[0], "input");
1502 let in_ty = arg_tys[0];
1503 let in_elem = arg_tys[0].simd_type(tcx);
1504 let in_len = arg_tys[0].simd_size(tcx);
1506 let comparison = match name {
1507 "simd_eq" => Some(hir::BiEq),
1508 "simd_ne" => Some(hir::BiNe),
1509 "simd_lt" => Some(hir::BiLt),
1510 "simd_le" => Some(hir::BiLe),
1511 "simd_gt" => Some(hir::BiGt),
1512 "simd_ge" => Some(hir::BiGe),
1516 if let Some(cmp_op) = comparison {
1517 require_simd!(ret_ty, "return");
1519 let out_len = ret_ty.simd_size(tcx);
1520 require!(in_len == out_len,
1521 "expected return type with length {} (same as input type `{}`), \
1522 found `{}` with length {}",
1525 require!(llret_ty.element_type().kind() == llvm::Integer,
1526 "expected return type with integer elements, found `{}` with non-integer `{}`",
1528 ret_ty.simd_type(tcx));
1530 return compare_simd_types(bcx,
1536 call_debug_location)
1539 if name.starts_with("simd_shuffle") {
1540 let n: usize = match name["simd_shuffle".len()..].parse() {
1542 Err(_) => tcx.sess.span_bug(call_info.span,
1543 "bad `simd_shuffle` instruction only caught in trans?")
1546 require_simd!(ret_ty, "return");
1548 let out_len = ret_ty.simd_size(tcx);
1549 require!(out_len == n,
1550 "expected return type of length {}, found `{}` with length {}",
1551 n, ret_ty, out_len);
1552 require!(in_elem == ret_ty.simd_type(tcx),
1553 "expected return element type `{}` (element of input `{}`), \
1554 found `{}` with element type `{}`",
1556 ret_ty, ret_ty.simd_type(tcx));
1558 let total_len = in_len as u64 * 2;
1560 let vector = match args {
1561 Some(args) => &args[2],
1562 None => bcx.sess().span_bug(call_info.span,
1563 "intrinsic call with unexpected argument shape"),
1565 let vector = consts::const_expr(bcx.ccx(), vector, tcx.mk_substs(substs), None).0;
1567 let indices: Option<Vec<_>> = (0..n)
1570 let val = const_get_elt(bcx.ccx(), vector, &[i as libc::c_uint]);
1571 let c = const_to_opt_uint(val);
1574 emit_error!("shuffle index #{} is not a constant", arg_idx);
1577 Some(idx) if idx >= total_len => {
1578 emit_error!("shuffle index #{} is out of bounds (limit {})",
1579 arg_idx, total_len);
1582 Some(idx) => Some(C_i32(bcx.ccx(), idx as i32)),
1586 let indices = match indices {
1588 None => return C_null(llret_ty)
1591 return ShuffleVector(bcx, llargs[0], llargs[1], C_vector(&indices))
1594 if name == "simd_insert" {
1595 require!(in_elem == arg_tys[2],
1596 "expected inserted type `{}` (element of input `{}`), found `{}`",
1597 in_elem, in_ty, arg_tys[2]);
1598 return InsertElement(bcx, llargs[0], llargs[2], llargs[1])
1600 if name == "simd_extract" {
1601 require!(ret_ty == in_elem,
1602 "expected return type `{}` (element of input `{}`), found `{}`",
1603 in_elem, in_ty, ret_ty);
1604 return ExtractElement(bcx, llargs[0], llargs[1])
1607 if name == "simd_cast" {
1608 require_simd!(ret_ty, "return");
1609 let out_len = ret_ty.simd_size(tcx);
1610 require!(in_len == out_len,
1611 "expected return type with length {} (same as input type `{}`), \
1612 found `{}` with length {}",
1615 // casting cares about nominal type, not just structural type
1616 let out_elem = ret_ty.simd_type(tcx);
1618 if in_elem == out_elem { return llargs[0]; }
1620 enum Style { Float, Int(/* is signed? */ bool), Unsupported }
1622 let (in_style, in_width) = match in_elem.sty {
1623 // vectors of pointer-sized integers should've been
1624 // disallowed before here, so this unwrap is safe.
1625 ty::TyInt(i) => (Style::Int(true), i.bit_width().unwrap()),
1626 ty::TyUint(u) => (Style::Int(false), u.bit_width().unwrap()),
1627 ty::TyFloat(f) => (Style::Float, f.bit_width()),
1628 _ => (Style::Unsupported, 0)
1630 let (out_style, out_width) = match out_elem.sty {
1631 ty::TyInt(i) => (Style::Int(true), i.bit_width().unwrap()),
1632 ty::TyUint(u) => (Style::Int(false), u.bit_width().unwrap()),
1633 ty::TyFloat(f) => (Style::Float, f.bit_width()),
1634 _ => (Style::Unsupported, 0)
1637 match (in_style, out_style) {
1638 (Style::Int(in_is_signed), Style::Int(_)) => {
1639 return match in_width.cmp(&out_width) {
1640 Ordering::Greater => Trunc(bcx, llargs[0], llret_ty),
1641 Ordering::Equal => llargs[0],
1642 Ordering::Less => if in_is_signed {
1643 SExt(bcx, llargs[0], llret_ty)
1645 ZExt(bcx, llargs[0], llret_ty)
1649 (Style::Int(in_is_signed), Style::Float) => {
1650 return if in_is_signed {
1651 SIToFP(bcx, llargs[0], llret_ty)
1653 UIToFP(bcx, llargs[0], llret_ty)
1656 (Style::Float, Style::Int(out_is_signed)) => {
1657 return if out_is_signed {
1658 FPToSI(bcx, llargs[0], llret_ty)
1660 FPToUI(bcx, llargs[0], llret_ty)
1663 (Style::Float, Style::Float) => {
1664 return match in_width.cmp(&out_width) {
1665 Ordering::Greater => FPTrunc(bcx, llargs[0], llret_ty),
1666 Ordering::Equal => llargs[0],
1667 Ordering::Less => FPExt(bcx, llargs[0], llret_ty)
1670 _ => {/* Unsupported. Fallthrough. */}
1673 "unsupported cast from `{}` with element `{}` to `{}` with element `{}`",
1677 macro_rules! arith {
1678 ($($name: ident: $($($p: ident),* => $call: expr),*;)*) => {
1680 if name == stringify!($name) {
1684 return $call(bcx, llargs[0], llargs[1], call_debug_location)
1690 "unsupported operation on `{}` with element `{}`",
1697 simd_add: TyUint, TyInt => Add, TyFloat => FAdd;
1698 simd_sub: TyUint, TyInt => Sub, TyFloat => FSub;
1699 simd_mul: TyUint, TyInt => Mul, TyFloat => FMul;
1700 simd_div: TyFloat => FDiv;
1701 simd_shl: TyUint, TyInt => Shl;
1702 simd_shr: TyUint => LShr, TyInt => AShr;
1703 simd_and: TyUint, TyInt => And;
1704 simd_or: TyUint, TyInt => Or;
1705 simd_xor: TyUint, TyInt => Xor;
1707 bcx.sess().span_bug(call_info.span, "unknown SIMD intrinsic");