1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 #![allow(non_upper_case_globals)]
13 use arena::TypedArena;
14 use intrinsics::{self, Intrinsic};
17 use llvm::{ValueRef, TypeKind};
20 use middle::subst::FnSpace;
22 use trans::attributes;
27 use trans::cleanup::CleanupMethods;
31 use trans::debuginfo::DebugLoc;
37 use trans::type_::Type;
38 use middle::ty::{self, Ty, HasTypeFlags};
39 use middle::subst::Substs;
41 use syntax::abi::{self, RustIntrinsic};
44 use syntax::parse::token;
46 use rustc::session::Session;
47 use syntax::codemap::Span;
49 use std::cmp::Ordering;
51 pub fn get_simple_intrinsic(ccx: &CrateContext, item: &hir::ForeignItem) -> Option<ValueRef> {
52 let name = match &*item.name.as_str() {
53 "sqrtf32" => "llvm.sqrt.f32",
54 "sqrtf64" => "llvm.sqrt.f64",
55 "powif32" => "llvm.powi.f32",
56 "powif64" => "llvm.powi.f64",
57 "sinf32" => "llvm.sin.f32",
58 "sinf64" => "llvm.sin.f64",
59 "cosf32" => "llvm.cos.f32",
60 "cosf64" => "llvm.cos.f64",
61 "powf32" => "llvm.pow.f32",
62 "powf64" => "llvm.pow.f64",
63 "expf32" => "llvm.exp.f32",
64 "expf64" => "llvm.exp.f64",
65 "exp2f32" => "llvm.exp2.f32",
66 "exp2f64" => "llvm.exp2.f64",
67 "logf32" => "llvm.log.f32",
68 "logf64" => "llvm.log.f64",
69 "log10f32" => "llvm.log10.f32",
70 "log10f64" => "llvm.log10.f64",
71 "log2f32" => "llvm.log2.f32",
72 "log2f64" => "llvm.log2.f64",
73 "fmaf32" => "llvm.fma.f32",
74 "fmaf64" => "llvm.fma.f64",
75 "fabsf32" => "llvm.fabs.f32",
76 "fabsf64" => "llvm.fabs.f64",
77 "copysignf32" => "llvm.copysign.f32",
78 "copysignf64" => "llvm.copysign.f64",
79 "floorf32" => "llvm.floor.f32",
80 "floorf64" => "llvm.floor.f64",
81 "ceilf32" => "llvm.ceil.f32",
82 "ceilf64" => "llvm.ceil.f64",
83 "truncf32" => "llvm.trunc.f32",
84 "truncf64" => "llvm.trunc.f64",
85 "rintf32" => "llvm.rint.f32",
86 "rintf64" => "llvm.rint.f64",
87 "nearbyintf32" => "llvm.nearbyint.f32",
88 "nearbyintf64" => "llvm.nearbyint.f64",
89 "roundf32" => "llvm.round.f32",
90 "roundf64" => "llvm.round.f64",
91 "assume" => "llvm.assume",
94 Some(ccx.get_intrinsic(&name))
97 pub fn span_transmute_size_error(a: &Session, b: Span, msg: &str) {
98 span_err!(a, b, E0512, "{}", msg);
101 /// Performs late verification that intrinsics are used correctly. At present,
102 /// the only intrinsic that needs such verification is `transmute`.
103 pub fn check_intrinsics(ccx: &CrateContext) {
104 let mut last_failing_id = None;
105 for transmute_restriction in ccx.tcx().transmute_restrictions.borrow().iter() {
106 // Sometimes, a single call to transmute will push multiple
107 // type pairs to test in order to exhaustively test the
108 // possibility around a type parameter. If one of those fails,
109 // there is no sense reporting errors on the others.
110 if last_failing_id == Some(transmute_restriction.id) {
114 debug!("transmute_restriction: {:?}", transmute_restriction);
116 assert!(!transmute_restriction.substituted_from.has_param_types());
117 assert!(!transmute_restriction.substituted_to.has_param_types());
119 let llfromtype = type_of::sizing_type_of(ccx,
120 transmute_restriction.substituted_from);
121 let lltotype = type_of::sizing_type_of(ccx,
122 transmute_restriction.substituted_to);
123 let from_type_size = machine::llbitsize_of_real(ccx, llfromtype);
124 let to_type_size = machine::llbitsize_of_real(ccx, lltotype);
125 if from_type_size != to_type_size {
126 last_failing_id = Some(transmute_restriction.id);
128 if transmute_restriction.original_from != transmute_restriction.substituted_from {
129 span_transmute_size_error(ccx.sess(), transmute_restriction.span,
130 &format!("transmute called with differently sized types: \
131 {} (could be {} bit{}) to {} (could be {} bit{})",
132 transmute_restriction.original_from,
133 from_type_size as usize,
134 if from_type_size == 1 {""} else {"s"},
135 transmute_restriction.original_to,
136 to_type_size as usize,
137 if to_type_size == 1 {""} else {"s"}));
139 span_transmute_size_error(ccx.sess(), transmute_restriction.span,
140 &format!("transmute called with differently sized types: \
141 {} ({} bit{}) to {} ({} bit{})",
142 transmute_restriction.original_from,
143 from_type_size as usize,
144 if from_type_size == 1 {""} else {"s"},
145 transmute_restriction.original_to,
146 to_type_size as usize,
147 if to_type_size == 1 {""} else {"s"}));
151 ccx.sess().abort_if_errors();
154 /// Remember to add all intrinsics here, in librustc_typeck/check/mod.rs,
155 /// and in libcore/intrinsics.rs; if you need access to any llvm intrinsics,
156 /// add them to librustc_trans/trans/context.rs
157 pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
160 cleanup_scope: cleanup::CustomScopeIndex,
161 args: callee::CallArgs<'a, 'tcx>,
163 substs: subst::Substs<'tcx>,
164 call_info: NodeIdAndSpan)
165 -> Result<'blk, 'tcx> {
170 let _icx = push_ctxt("trans_intrinsic_call");
172 let sig = ccx.tcx().erase_late_bound_regions(callee_ty.fn_sig());
173 let sig = infer::normalize_associated_type(ccx.tcx(), &sig);
174 let arg_tys = sig.inputs;
175 let ret_ty = sig.output;
176 let foreign_item = tcx.map.expect_foreign_item(node);
177 let name = foreign_item.name.as_str();
179 // For `transmute` we can just trans the input expr directly into dest
180 if name == "transmute" {
181 let llret_ty = type_of::type_of(ccx, ret_ty.unwrap());
183 callee::ArgExprs(arg_exprs) => {
184 assert_eq!(arg_exprs.len(), 1);
186 let (in_type, out_type) = (*substs.types.get(FnSpace, 0),
187 *substs.types.get(FnSpace, 1));
188 let llintype = type_of::type_of(ccx, in_type);
189 let llouttype = type_of::type_of(ccx, out_type);
191 let in_type_size = machine::llbitsize_of_real(ccx, llintype);
192 let out_type_size = machine::llbitsize_of_real(ccx, llouttype);
194 // This should be caught by the intrinsicck pass
195 assert_eq!(in_type_size, out_type_size);
197 let nonpointer_nonaggregate = |llkind: TypeKind| -> bool {
198 use llvm::TypeKind::*;
200 Half | Float | Double | X86_FP80 | FP128 |
201 PPC_FP128 | Integer | Vector | X86_MMX => true,
206 // An approximation to which types can be directly cast via
207 // LLVM's bitcast. This doesn't cover pointer -> pointer casts,
208 // but does, importantly, cover SIMD types.
209 let in_kind = llintype.kind();
210 let ret_kind = llret_ty.kind();
211 let bitcast_compatible =
212 (nonpointer_nonaggregate(in_kind) && nonpointer_nonaggregate(ret_kind)) || {
213 in_kind == TypeKind::Pointer && ret_kind == TypeKind::Pointer
216 let dest = if bitcast_compatible {
217 // if we're here, the type is scalar-like (a primitive, a
218 // SIMD type or a pointer), and so can be handled as a
219 // by-value ValueRef and can also be directly bitcast to the
220 // target type. Doing this special case makes conversions
221 // like `u32x4` -> `u64x2` much nicer for LLVM and so more
222 // efficient (these are done efficiently implicitly in C
223 // with the `__m128i` type and so this means Rust doesn't
225 let expr = &*arg_exprs[0];
226 let datum = unpack_datum!(bcx, expr::trans(bcx, expr));
227 let datum = unpack_datum!(bcx, datum.to_rvalue_datum(bcx, "transmute_temp"));
228 let val = if datum.kind.is_by_ref() {
229 load_ty(bcx, datum.val, datum.ty)
231 from_arg_ty(bcx, datum.val, datum.ty)
234 let cast_val = BitCast(bcx, val, llret_ty);
238 // this often occurs in a sequence like `Store(val,
239 // d); val2 = Load(d)`, so disappears easily.
240 Store(bcx, cast_val, d);
246 // The types are too complicated to do with a by-value
247 // bitcast, so pointer cast instead. We need to cast the
248 // dest so the types work out.
249 let dest = match dest {
250 expr::SaveIn(d) => expr::SaveIn(PointerCast(bcx, d, llintype.ptr_to())),
251 expr::Ignore => expr::Ignore
253 bcx = expr::trans_into(bcx, &*arg_exprs[0], dest);
257 fcx.scopes.borrow_mut().last_mut().unwrap().drop_non_lifetime_clean();
258 fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope);
261 expr::SaveIn(d) => Result::new(bcx, d),
262 expr::Ignore => Result::new(bcx, C_undef(llret_ty.ptr_to()))
268 ccx.sess().bug("expected expr as argument for transmute");
273 // For `move_val_init` we can evaluate the destination address
274 // (the first argument) and then trans the source value (the
275 // second argument) directly into the resulting destination
277 if name == "move_val_init" {
278 if let callee::ArgExprs(ref exprs) = args {
279 let (dest_expr, source_expr) = if exprs.len() != 2 {
280 ccx.sess().bug("expected two exprs as arguments for `move_val_init` intrinsic");
282 (&exprs[0], &exprs[1])
285 // evaluate destination address
286 let dest_datum = unpack_datum!(bcx, expr::trans(bcx, dest_expr));
287 let dest_datum = unpack_datum!(
288 bcx, dest_datum.to_rvalue_datum(bcx, "arg"));
289 let dest_datum = unpack_datum!(
290 bcx, dest_datum.to_appropriate_datum(bcx));
292 // `expr::trans_into(bcx, expr, dest)` is equiv to
294 // `trans(bcx, expr).store_to_dest(dest)`,
296 // which for `dest == expr::SaveIn(addr)`, is equivalent to:
298 // `trans(bcx, expr).store_to(bcx, addr)`.
299 let lldest = expr::Dest::SaveIn(dest_datum.val);
300 bcx = expr::trans_into(bcx, source_expr, lldest);
302 let llresult = C_nil(ccx);
303 fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope);
305 return Result::new(bcx, llresult);
307 ccx.sess().bug("expected two exprs as arguments for `move_val_init` intrinsic");
311 let call_debug_location = DebugLoc::At(call_info.id, call_info.span);
313 // For `try` we need some custom control flow
314 if &name[..] == "try" {
315 if let callee::ArgExprs(ref exprs) = args {
316 let (func, data) = if exprs.len() != 2 {
317 ccx.sess().bug("expected two exprs as arguments for \
320 (&exprs[0], &exprs[1])
323 // translate arguments
324 let func = unpack_datum!(bcx, expr::trans(bcx, func));
325 let func = unpack_datum!(bcx, func.to_rvalue_datum(bcx, "func"));
326 let data = unpack_datum!(bcx, expr::trans(bcx, data));
327 let data = unpack_datum!(bcx, data.to_rvalue_datum(bcx, "data"));
329 let dest = match dest {
330 expr::SaveIn(d) => d,
331 expr::Ignore => alloc_ty(bcx, tcx.mk_mut_ptr(tcx.types.i8),
336 bcx = try_intrinsic(bcx, func.val, data.val, dest,
337 call_debug_location);
339 fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope);
340 return Result::new(bcx, dest);
342 ccx.sess().bug("expected two exprs as arguments for \
347 // save the actual AST arguments for later (some places need to do
348 // const-evaluation on them)
349 let expr_arguments = match args {
350 callee::ArgExprs(args) => Some(args),
354 // Push the arguments.
355 let mut llargs = Vec::new();
356 bcx = callee::trans_args(bcx,
360 cleanup::CustomScope(cleanup_scope),
364 fcx.scopes.borrow_mut().last_mut().unwrap().drop_non_lifetime_clean();
366 // These are the only intrinsic functions that diverge.
368 let llfn = ccx.get_intrinsic(&("llvm.trap"));
369 Call(bcx, llfn, &[], None, call_debug_location);
370 fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope);
372 return Result::new(bcx, C_undef(Type::nil(ccx).ptr_to()));
373 } else if &name[..] == "unreachable" {
374 fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope);
376 return Result::new(bcx, C_nil(ccx));
379 let ret_ty = match ret_ty {
380 ty::FnConverging(ret_ty) => ret_ty,
381 ty::FnDiverging => unreachable!()
384 let llret_ty = type_of::type_of(ccx, ret_ty);
386 // Get location to store the result. If the user does
387 // not care about the result, just make a stack slot
388 let llresult = match dest {
389 expr::SaveIn(d) => d,
391 if !type_is_zero_size(ccx, ret_ty) {
392 let llresult = alloc_ty(bcx, ret_ty, "intrinsic_result");
393 call_lifetime_start(bcx, llresult);
396 C_undef(llret_ty.ptr_to())
401 let simple = get_simple_intrinsic(ccx, &*foreign_item);
402 let llval = match (simple, &*name) {
404 Call(bcx, llfn, &llargs, None, call_debug_location)
406 (_, "breakpoint") => {
407 let llfn = ccx.get_intrinsic(&("llvm.debugtrap"));
408 Call(bcx, llfn, &[], None, call_debug_location)
411 let tp_ty = *substs.types.get(FnSpace, 0);
412 let lltp_ty = type_of::type_of(ccx, tp_ty);
413 C_uint(ccx, machine::llsize_of_alloc(ccx, lltp_ty))
415 (_, "size_of_val") => {
416 let tp_ty = *substs.types.get(FnSpace, 0);
417 if !type_is_sized(tcx, tp_ty) {
418 let (llsize, _) = glue::size_and_align_of_dst(bcx, tp_ty, llargs[1]);
421 let lltp_ty = type_of::type_of(ccx, tp_ty);
422 C_uint(ccx, machine::llsize_of_alloc(ccx, lltp_ty))
425 (_, "min_align_of") => {
426 let tp_ty = *substs.types.get(FnSpace, 0);
427 C_uint(ccx, type_of::align_of(ccx, tp_ty))
429 (_, "min_align_of_val") => {
430 let tp_ty = *substs.types.get(FnSpace, 0);
431 if !type_is_sized(tcx, tp_ty) {
432 let (_, llalign) = glue::size_and_align_of_dst(bcx, tp_ty, llargs[1]);
435 C_uint(ccx, type_of::align_of(ccx, tp_ty))
438 (_, "pref_align_of") => {
439 let tp_ty = *substs.types.get(FnSpace, 0);
440 let lltp_ty = type_of::type_of(ccx, tp_ty);
441 C_uint(ccx, machine::llalign_of_pref(ccx, lltp_ty))
443 (_, "drop_in_place") => {
444 let tp_ty = *substs.types.get(FnSpace, 0);
445 let ptr = if type_is_sized(tcx, tp_ty) {
448 let scratch = rvalue_scratch_datum(bcx, tp_ty, "tmp");
449 Store(bcx, llargs[0], expr::get_dataptr(bcx, scratch.val));
450 Store(bcx, llargs[1], expr::get_meta(bcx, scratch.val));
451 fcx.schedule_lifetime_end(cleanup::CustomScope(cleanup_scope), scratch.val);
454 glue::drop_ty(bcx, ptr, tp_ty, call_debug_location);
457 (_, "type_name") => {
458 let tp_ty = *substs.types.get(FnSpace, 0);
459 let ty_name = token::intern_and_get_ident(&tp_ty.to_string());
460 C_str_slice(ccx, ty_name)
463 let hash = ccx.tcx().hash_crate_independent(*substs.types.get(FnSpace, 0),
464 &ccx.link_meta().crate_hash);
467 (_, "init_dropped") => {
468 let tp_ty = *substs.types.get(FnSpace, 0);
469 if !return_type_is_void(ccx, tp_ty) {
470 drop_done_fill_mem(bcx, llresult, tp_ty);
475 let tp_ty = *substs.types.get(FnSpace, 0);
476 if !return_type_is_void(ccx, tp_ty) {
477 // Just zero out the stack slot. (See comment on base::memzero for explanation)
478 init_zero_mem(bcx, llresult, tp_ty);
482 // Effectively no-ops
483 (_, "uninit") | (_, "forget") => {
486 (_, "needs_drop") => {
487 let tp_ty = *substs.types.get(FnSpace, 0);
489 C_bool(ccx, bcx.fcx.type_needs_drop(tp_ty))
493 let offset = llargs[1];
494 InBoundsGEP(bcx, ptr, &[offset])
496 (_, "arith_offset") => {
498 let offset = llargs[1];
499 GEP(bcx, ptr, &[offset])
502 (_, "copy_nonoverlapping") => {
506 *substs.types.get(FnSpace, 0),
516 *substs.types.get(FnSpace, 0),
522 (_, "write_bytes") => {
523 memset_intrinsic(bcx,
525 *substs.types.get(FnSpace, 0),
532 (_, "volatile_copy_nonoverlapping_memory") => {
536 *substs.types.get(FnSpace, 0),
542 (_, "volatile_copy_memory") => {
546 *substs.types.get(FnSpace, 0),
552 (_, "volatile_set_memory") => {
553 memset_intrinsic(bcx,
555 *substs.types.get(FnSpace, 0),
561 (_, "volatile_load") => {
562 let tp_ty = *substs.types.get(FnSpace, 0);
563 let ptr = to_arg_ty_ptr(bcx, llargs[0], tp_ty);
564 let load = VolatileLoad(bcx, ptr);
566 llvm::LLVMSetAlignment(load, type_of::align_of(ccx, tp_ty));
568 to_arg_ty(bcx, load, tp_ty)
570 (_, "volatile_store") => {
571 let tp_ty = *substs.types.get(FnSpace, 0);
572 let ptr = to_arg_ty_ptr(bcx, llargs[0], tp_ty);
573 let val = from_arg_ty(bcx, llargs[1], tp_ty);
574 let store = VolatileStore(bcx, val, ptr);
576 llvm::LLVMSetAlignment(store, type_of::align_of(ccx, tp_ty));
581 (_, "ctlz") | (_, "cttz") | (_, "ctpop") | (_, "bswap") |
582 (_, "add_with_overflow") | (_, "sub_with_overflow") | (_, "mul_with_overflow") |
583 (_, "overflowing_add") | (_, "overflowing_sub") | (_, "overflowing_mul") |
584 (_, "unchecked_div") | (_, "unchecked_rem") => {
585 let sty = &arg_tys[0].sty;
586 match int_type_width_signed(sty, ccx) {
587 Some((width, signed)) =>
589 "ctlz" => count_zeros_intrinsic(bcx, &format!("llvm.ctlz.i{}", width),
590 llargs[0], call_debug_location),
591 "cttz" => count_zeros_intrinsic(bcx, &format!("llvm.cttz.i{}", width),
592 llargs[0], call_debug_location),
593 "ctpop" => Call(bcx, ccx.get_intrinsic(&format!("llvm.ctpop.i{}", width)),
594 &llargs, None, call_debug_location),
597 llargs[0] // byte swap a u8/i8 is just a no-op
599 Call(bcx, ccx.get_intrinsic(&format!("llvm.bswap.i{}", width)),
600 &llargs, None, call_debug_location)
603 "add_with_overflow" | "sub_with_overflow" | "mul_with_overflow" => {
604 let intrinsic = format!("llvm.{}{}.with.overflow.i{}",
605 if signed { 's' } else { 'u' },
607 with_overflow_intrinsic(bcx, &intrinsic, llargs[0], llargs[1], llresult,
610 "overflowing_add" => Add(bcx, llargs[0], llargs[1], call_debug_location),
611 "overflowing_sub" => Sub(bcx, llargs[0], llargs[1], call_debug_location),
612 "overflowing_mul" => Mul(bcx, llargs[0], llargs[1], call_debug_location),
615 SDiv(bcx, llargs[0], llargs[1], call_debug_location)
617 UDiv(bcx, llargs[0], llargs[1], call_debug_location)
621 SRem(bcx, llargs[0], llargs[1], call_debug_location)
623 URem(bcx, llargs[0], llargs[1], call_debug_location)
628 span_invalid_monomorphization_error(
629 tcx.sess, call_info.span,
630 &format!("invalid monomorphization of `{}` intrinsic: \
631 expected basic integer type, found `{}`", name, sty));
639 (_, "return_address") => {
640 if !fcx.caller_expects_out_pointer {
641 span_err!(tcx.sess, call_info.span, E0510,
642 "invalid use of `return_address` intrinsic: function \
643 does not use out pointer");
644 C_null(Type::i8p(ccx))
646 PointerCast(bcx, llvm::get_param(fcx.llfn, 0), Type::i8p(ccx))
650 (_, "discriminant_value") => {
651 let val_ty = substs.types.get(FnSpace, 0);
654 let repr = adt::represent_type(ccx, *val_ty);
655 adt::trans_get_discr(bcx, &*repr, llargs[0], Some(llret_ty))
657 _ => C_null(llret_ty)
660 (_, name) if name.starts_with("simd_") => {
661 generic_simd_intrinsic(bcx, name,
670 // This requires that atomic intrinsics follow a specific naming pattern:
671 // "atomic_<operation>[_<ordering>]", and no ordering means SeqCst
672 (_, name) if name.starts_with("atomic_") => {
673 let split: Vec<&str> = name.split('_').collect();
674 assert!(split.len() >= 2, "Atomic intrinsic not correct format");
676 let order = if split.len() == 2 {
677 llvm::SequentiallyConsistent
680 "unordered" => llvm::Unordered,
681 "relaxed" => llvm::Monotonic,
682 "acq" => llvm::Acquire,
683 "rel" => llvm::Release,
684 "acqrel" => llvm::AcquireRelease,
685 _ => ccx.sess().fatal("unknown ordering in atomic intrinsic")
691 // See include/llvm/IR/Instructions.h for their implementation
692 // of this, I assume that it's good enough for us to use for
694 let strongest_failure_ordering = match order {
695 llvm::NotAtomic | llvm::Unordered =>
696 ccx.sess().fatal("cmpxchg must be atomic"),
698 llvm::Monotonic | llvm::Release =>
701 llvm::Acquire | llvm::AcquireRelease =>
704 llvm::SequentiallyConsistent =>
705 llvm::SequentiallyConsistent
708 let tp_ty = *substs.types.get(FnSpace, 0);
709 let ptr = to_arg_ty_ptr(bcx, llargs[0], tp_ty);
710 let cmp = from_arg_ty(bcx, llargs[1], tp_ty);
711 let src = from_arg_ty(bcx, llargs[2], tp_ty);
712 let res = AtomicCmpXchg(bcx, ptr, cmp, src, order,
713 strongest_failure_ordering);
714 ExtractValue(bcx, res, 0)
718 let tp_ty = *substs.types.get(FnSpace, 0);
719 let ptr = to_arg_ty_ptr(bcx, llargs[0], tp_ty);
720 to_arg_ty(bcx, AtomicLoad(bcx, ptr, order), tp_ty)
723 let tp_ty = *substs.types.get(FnSpace, 0);
724 let ptr = to_arg_ty_ptr(bcx, llargs[0], tp_ty);
725 let val = from_arg_ty(bcx, llargs[1], tp_ty);
726 AtomicStore(bcx, val, ptr, order);
731 AtomicFence(bcx, order, llvm::CrossThread);
735 "singlethreadfence" => {
736 AtomicFence(bcx, order, llvm::SingleThread);
740 // These are all AtomicRMW ops
742 let atom_op = match op {
743 "xchg" => llvm::AtomicXchg,
744 "xadd" => llvm::AtomicAdd,
745 "xsub" => llvm::AtomicSub,
746 "and" => llvm::AtomicAnd,
747 "nand" => llvm::AtomicNand,
748 "or" => llvm::AtomicOr,
749 "xor" => llvm::AtomicXor,
750 "max" => llvm::AtomicMax,
751 "min" => llvm::AtomicMin,
752 "umax" => llvm::AtomicUMax,
753 "umin" => llvm::AtomicUMin,
754 _ => ccx.sess().fatal("unknown atomic operation")
757 let tp_ty = *substs.types.get(FnSpace, 0);
758 let ptr = to_arg_ty_ptr(bcx, llargs[0], tp_ty);
759 let val = from_arg_ty(bcx, llargs[1], tp_ty);
760 AtomicRMW(bcx, atom_op, ptr, val, order)
767 let intr = match Intrinsic::find(tcx, &name) {
769 None => ccx.sess().span_bug(foreign_item.span,
770 &format!("unknown intrinsic '{}'", name)),
772 fn one<T>(x: Vec<T>) -> T {
773 assert_eq!(x.len(), 1);
774 x.into_iter().next().unwrap()
776 fn ty_to_type(ccx: &CrateContext, t: &intrinsics::Type,
777 any_changes_needed: &mut bool) -> Vec<Type> {
778 use intrinsics::Type::*;
780 Void => vec![Type::void(ccx)],
781 Integer(_signed, width, llvm_width) => {
782 *any_changes_needed |= width != llvm_width;
783 vec![Type::ix(ccx, llvm_width as u64)]
787 32 => vec![Type::f32(ccx)],
788 64 => vec![Type::f64(ccx)],
792 Pointer(ref t, ref llvm_elem, _const) => {
793 *any_changes_needed |= llvm_elem.is_some();
795 let t = llvm_elem.as_ref().unwrap_or(t);
796 let elem = one(ty_to_type(ccx, t,
797 any_changes_needed));
800 Vector(ref t, ref llvm_elem, length) => {
801 *any_changes_needed |= llvm_elem.is_some();
803 let t = llvm_elem.as_ref().unwrap_or(t);
804 let elem = one(ty_to_type(ccx, t,
805 any_changes_needed));
806 vec![Type::vector(&elem,
809 Aggregate(false, ref contents) => {
810 let elems = contents.iter()
811 .map(|t| one(ty_to_type(ccx, t, any_changes_needed)))
812 .collect::<Vec<_>>();
813 vec![Type::struct_(ccx, &elems, false)]
815 Aggregate(true, ref contents) => {
816 *any_changes_needed = true;
818 .flat_map(|t| ty_to_type(ccx, t, any_changes_needed))
824 // This allows an argument list like `foo, (bar, baz),
825 // qux` to be converted into `foo, bar, baz, qux`, integer
826 // arguments to be truncated as needed and pointers to be
828 fn modify_as_needed<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
829 t: &intrinsics::Type,
835 intrinsics::Type::Aggregate(true, ref contents) => {
836 // We found a tuple that needs squishing! So
837 // run over the tuple and load each field.
839 // This assumes the type is "simple", i.e. no
840 // destructors, and the contents are SIMD
842 assert!(!bcx.fcx.type_needs_drop(arg_type));
844 let repr = adt::represent_type(bcx.ccx(), arg_type);
845 let repr_ptr = &*repr;
846 let arg = adt::MaybeSizedValue::sized(llarg);
849 Load(bcx, adt::trans_field_ptr(bcx, repr_ptr, arg, 0, i))
853 intrinsics::Type::Pointer(_, Some(ref llvm_elem), _) => {
854 let llvm_elem = one(ty_to_type(bcx.ccx(), llvm_elem, &mut false));
855 vec![PointerCast(bcx, llarg,
858 intrinsics::Type::Vector(_, Some(ref llvm_elem), length) => {
859 let llvm_elem = one(ty_to_type(bcx.ccx(), llvm_elem, &mut false));
860 vec![BitCast(bcx, llarg,
861 Type::vector(&llvm_elem, length as u64))]
863 intrinsics::Type::Integer(_, width, llvm_width) if width != llvm_width => {
864 // the LLVM intrinsic uses a smaller integer
865 // size than the C intrinsic's signature, so
866 // we have to trim it down here.
867 vec![Trunc(bcx, llarg, Type::ix(bcx.ccx(), llvm_width as u64))]
874 let mut any_changes_needed = false;
875 let inputs = intr.inputs.iter()
876 .flat_map(|t| ty_to_type(ccx, t, &mut any_changes_needed))
877 .collect::<Vec<_>>();
879 let mut out_changes = false;
880 let outputs = one(ty_to_type(ccx, &intr.output, &mut out_changes));
881 // outputting a flattened aggregate is nonsense
882 assert!(!out_changes);
884 let llargs = if !any_changes_needed {
885 // no aggregates to flatten, so no change needed
888 // there are some aggregates that need to be flattened
889 // in the LLVM call, so we need to run over the types
890 // again to find them and extract the arguments
894 .flat_map(|((t, llarg), ty)| modify_as_needed(bcx, t, ty, *llarg))
897 assert_eq!(inputs.len(), llargs.len());
899 let val = match intr.definition {
900 intrinsics::IntrinsicDef::Named(name) => {
901 let f = declare::declare_cfn(ccx,
903 Type::func(&inputs, &outputs),
905 Call(bcx, f, &llargs, None, call_debug_location)
910 intrinsics::Type::Aggregate(flatten, ref elems) => {
911 // the output is a tuple so we need to munge it properly
914 for i in 0..elems.len() {
915 let val = ExtractValue(bcx, val, i);
916 Store(bcx, val, StructGEP(bcx, llresult, i));
925 if val_ty(llval) != Type::void(ccx) &&
926 machine::llsize_of_alloc(ccx, val_ty(llval)) != 0 {
927 store_ty(bcx, llval, llresult, ret_ty);
930 // If we made a temporary stack slot, let's clean it up
933 bcx = glue::drop_ty(bcx, llresult, ret_ty, call_debug_location);
934 call_lifetime_end(bcx, llresult);
936 expr::SaveIn(_) => {}
939 fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope);
941 Result::new(bcx, llresult)
944 fn copy_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
951 call_debug_location: DebugLoc)
954 let lltp_ty = type_of::type_of(ccx, tp_ty);
955 let align = C_i32(ccx, type_of::align_of(ccx, tp_ty) as i32);
956 let size = machine::llsize_of(ccx, lltp_ty);
957 let int_size = machine::llbitsize_of_real(ccx, ccx.int_type());
959 let operation = if allow_overlap {
965 let name = format!("llvm.{}.p0i8.p0i8.i{}", operation, int_size);
967 let dst_ptr = PointerCast(bcx, dst, Type::i8p(ccx));
968 let src_ptr = PointerCast(bcx, src, Type::i8p(ccx));
969 let llfn = ccx.get_intrinsic(&name);
975 Mul(bcx, size, count, DebugLoc::None),
977 C_bool(ccx, volatile)],
982 fn memset_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
988 call_debug_location: DebugLoc)
991 let lltp_ty = type_of::type_of(ccx, tp_ty);
992 let align = C_i32(ccx, type_of::align_of(ccx, tp_ty) as i32);
993 let size = machine::llsize_of(ccx, lltp_ty);
994 let int_size = machine::llbitsize_of_real(ccx, ccx.int_type());
996 let name = format!("llvm.memset.p0i8.i{}", int_size);
998 let dst_ptr = PointerCast(bcx, dst, Type::i8p(ccx));
999 let llfn = ccx.get_intrinsic(&name);
1005 Mul(bcx, size, count, DebugLoc::None),
1007 C_bool(ccx, volatile)],
1009 call_debug_location)
1012 fn count_zeros_intrinsic(bcx: Block,
1015 call_debug_location: DebugLoc)
1017 let y = C_bool(bcx.ccx(), false);
1018 let llfn = bcx.ccx().get_intrinsic(&name);
1019 Call(bcx, llfn, &[val, y], None, call_debug_location)
1022 fn with_overflow_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1027 call_debug_location: DebugLoc)
1029 let llfn = bcx.ccx().get_intrinsic(&name);
1031 // Convert `i1` to a `bool`, and write it to the out parameter
1032 let val = Call(bcx, llfn, &[a, b], None, call_debug_location);
1033 let result = ExtractValue(bcx, val, 0);
1034 let overflow = ZExt(bcx, ExtractValue(bcx, val, 1), Type::bool(bcx.ccx()));
1035 Store(bcx, result, StructGEP(bcx, out, 0));
1036 Store(bcx, overflow, StructGEP(bcx, out, 1));
1041 fn try_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1045 dloc: DebugLoc) -> Block<'blk, 'tcx> {
1046 if bcx.sess().no_landing_pads() {
1047 Call(bcx, func, &[data], None, dloc);
1048 Store(bcx, C_null(Type::i8p(bcx.ccx())), dest);
1050 } else if wants_msvc_seh(bcx.sess()) {
1051 trans_msvc_try(bcx, func, data, dest, dloc)
1053 trans_gnu_try(bcx, func, data, dest, dloc)
1057 // MSVC's definition of the `rust_try` function. The exact implementation here
1058 // is a little different than the GNU (standard) version below, not only because
1059 // of the personality function but also because of the other fiddly bits about
1060 // SEH. LLVM also currently requires us to structure this in a very particular
1061 // way as explained below.
1063 // Like with the GNU version we generate a shim wrapper
1064 fn trans_msvc_try<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1068 dloc: DebugLoc) -> Block<'blk, 'tcx> {
1069 let llfn = get_rust_try_fn(bcx.fcx, &mut |try_fn_ty, output| {
1070 let ccx = bcx.ccx();
1071 let dloc = DebugLoc::None;
1072 let rust_try = declare::define_internal_rust_fn(ccx, "__rust_try",
1074 let (fcx, block_arena);
1075 block_arena = TypedArena::new();
1076 fcx = new_fn_ctxt(ccx, rust_try, ast::DUMMY_NODE_ID, false,
1077 output, ccx.tcx().mk_substs(Substs::trans_empty()),
1078 None, &block_arena);
1079 let bcx = init_function(&fcx, true, output);
1080 let then = fcx.new_temp_block("then");
1081 let catch = fcx.new_temp_block("catch");
1082 let catch_return = fcx.new_temp_block("catch-return");
1083 let catch_resume = fcx.new_temp_block("catch-resume");
1084 let personality = fcx.eh_personality();
1086 let eh_typeid_for = ccx.get_intrinsic(&"llvm.eh.typeid.for");
1087 let rust_try_filter = match bcx.tcx().lang_items.msvc_try_filter() {
1088 Some(did) => callee::trans_fn_ref(ccx, did, ExprId(0),
1089 bcx.fcx.param_substs).val,
1090 None => bcx.sess().bug("msvc_try_filter not defined"),
1093 // Type indicator for the exception being thrown, not entirely sure
1094 // what's going on here but it's what all the examples in LLVM use.
1095 let lpad_ty = Type::struct_(ccx, &[Type::i8p(ccx), Type::i32(ccx)],
1098 llvm::SetFunctionAttribute(rust_try, llvm::Attribute::NoInline);
1099 llvm::SetFunctionAttribute(rust_try, llvm::Attribute::OptimizeNone);
1100 let func = llvm::get_param(rust_try, 0);
1101 let data = llvm::get_param(rust_try, 1);
1103 // Invoke the function, specifying our two temporary landing pads as the
1104 // ext point. After the invoke we've terminated our basic block.
1105 Invoke(bcx, func, &[data], then.llbb, catch.llbb, None, dloc);
1107 // All the magic happens in this landing pad, and this is basically the
1108 // only landing pad in rust tagged with "catch" to indicate that we're
1109 // catching an exception. The other catch handlers in the GNU version
1110 // below just catch *all* exceptions, but that's because most exceptions
1111 // are already filtered out by the gnu personality function.
1113 // For MSVC we're just using a standard personality function that we
1114 // can't customize (e.g. _except_handler3 or __C_specific_handler), so
1115 // we need to do the exception filtering ourselves. This is currently
1116 // performed by the `__rust_try_filter` function. This function,
1117 // specified in the landingpad instruction, will be invoked by Windows
1118 // SEH routines and will return whether the exception in question can be
1119 // caught (aka the Rust runtime is the one that threw the exception).
1121 // To get this to compile (currently LLVM segfaults if it's not in this
1122 // particular structure), when the landingpad is executing we test to
1123 // make sure that the ID of the exception being thrown is indeed the one
1124 // that we were expecting. If it's not, we resume the exception, and
1125 // otherwise we return the pointer that we got Full disclosure: It's not
1126 // clear to me what this `llvm.eh.typeid` stuff is doing *other* then
1127 // just allowing LLVM to compile this file without segfaulting. I would
1128 // expect the entire landing pad to just be:
1130 // %vals = landingpad ...
1131 // %ehptr = extractvalue { i8*, i32 } %vals, 0
1134 // but apparently LLVM chokes on this, so we do the more complicated
1135 // thing to placate it.
1136 let vals = LandingPad(catch, lpad_ty, personality, 1);
1137 let rust_try_filter = BitCast(catch, rust_try_filter, Type::i8p(ccx));
1138 AddClause(catch, vals, rust_try_filter);
1139 let ehptr = ExtractValue(catch, vals, 0);
1140 let sel = ExtractValue(catch, vals, 1);
1141 let filter_sel = Call(catch, eh_typeid_for, &[rust_try_filter], None,
1143 let is_filter = ICmp(catch, llvm::IntEQ, sel, filter_sel, dloc);
1144 CondBr(catch, is_filter, catch_return.llbb, catch_resume.llbb, dloc);
1146 // Our "catch-return" basic block is where we've determined that we
1147 // actually need to catch this exception, in which case we just return
1148 // the exception pointer.
1149 Ret(catch_return, ehptr, dloc);
1151 // The "catch-resume" block is where we're running this landing pad but
1152 // we actually need to not catch the exception, so just resume the
1153 // exception to return.
1154 trans_unwind_resume(catch_resume, vals);
1156 // On the successful branch we just return null.
1157 Ret(then, C_null(Type::i8p(ccx)), dloc);
1162 // Note that no invoke is used here because by definition this function
1163 // can't panic (that's what it's catching).
1164 let ret = Call(bcx, llfn, &[func, data], None, dloc);
1165 Store(bcx, ret, dest);
1169 // Definition of the standard "try" function for Rust using the GNU-like model
1170 // of exceptions (e.g. the normal semantics of LLVM's landingpad and invoke
1173 // This translation is a little surprising because
1174 // we always call a shim function instead of inlining the call to `invoke`
1175 // manually here. This is done because in LLVM we're only allowed to have one
1176 // personality per function definition. The call to the `try` intrinsic is
1177 // being inlined into the function calling it, and that function may already
1178 // have other personality functions in play. By calling a shim we're
1179 // guaranteed that our shim will have the right personality function.
1181 fn trans_gnu_try<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1185 dloc: DebugLoc) -> Block<'blk, 'tcx> {
1186 let llfn = get_rust_try_fn(bcx.fcx, &mut |try_fn_ty, output| {
1187 let ccx = bcx.ccx();
1188 let dloc = DebugLoc::None;
1190 // Translates the shims described above:
1193 // invoke %func(%args...) normal %normal unwind %catch
1199 // (ptr, _) = landingpad
1202 let rust_try = declare::define_internal_rust_fn(ccx, "__rust_try", try_fn_ty);
1203 attributes::emit_uwtable(rust_try, true);
1204 let catch_pers = match bcx.tcx().lang_items.eh_personality_catch() {
1205 Some(did) => callee::trans_fn_ref(ccx, did, ExprId(0),
1206 bcx.fcx.param_substs).val,
1207 None => bcx.tcx().sess.bug("eh_personality_catch not defined"),
1210 let (fcx, block_arena);
1211 block_arena = TypedArena::new();
1212 fcx = new_fn_ctxt(ccx, rust_try, ast::DUMMY_NODE_ID, false,
1213 output, ccx.tcx().mk_substs(Substs::trans_empty()),
1214 None, &block_arena);
1215 let bcx = init_function(&fcx, true, output);
1216 let then = bcx.fcx.new_temp_block("then");
1217 let catch = bcx.fcx.new_temp_block("catch");
1219 let func = llvm::get_param(rust_try, 0);
1220 let data = llvm::get_param(rust_try, 1);
1221 Invoke(bcx, func, &[data], then.llbb, catch.llbb, None, dloc);
1222 Ret(then, C_null(Type::i8p(ccx)), dloc);
1224 // Type indicator for the exception being thrown.
1225 // The first value in this tuple is a pointer to the exception object being thrown.
1226 // The second value is a "selector" indicating which of the landing pad clauses
1227 // the exception's type had been matched to. rust_try ignores the selector.
1228 let lpad_ty = Type::struct_(ccx, &[Type::i8p(ccx), Type::i32(ccx)],
1230 let vals = LandingPad(catch, lpad_ty, catch_pers, 1);
1231 AddClause(catch, vals, C_null(Type::i8p(ccx)));
1232 let ptr = ExtractValue(catch, vals, 0);
1233 Ret(catch, ptr, dloc);
1239 // Note that no invoke is used here because by definition this function
1240 // can't panic (that's what it's catching).
1241 let ret = Call(bcx, llfn, &[func, data], None, dloc);
1242 Store(bcx, ret, dest);
1246 // Helper to generate the `Ty` associated with `rust_try`
1247 fn get_rust_try_fn<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>,
1248 f: &mut FnMut(Ty<'tcx>,
1249 ty::FnOutput<'tcx>) -> ValueRef)
1252 if let Some(llfn) = *ccx.rust_try_fn().borrow() {
1256 // Define the type up front for the signature of the rust_try function.
1257 let tcx = ccx.tcx();
1258 let i8p = tcx.mk_mut_ptr(tcx.types.i8);
1259 let fn_ty = tcx.mk_bare_fn(ty::BareFnTy {
1260 unsafety: hir::Unsafety::Unsafe,
1262 sig: ty::Binder(ty::FnSig {
1264 output: ty::FnOutput::FnConverging(tcx.mk_nil()),
1268 let fn_ty = tcx.mk_fn(None, fn_ty);
1269 let output = ty::FnOutput::FnConverging(i8p);
1270 let try_fn_ty = tcx.mk_bare_fn(ty::BareFnTy {
1271 unsafety: hir::Unsafety::Unsafe,
1273 sig: ty::Binder(ty::FnSig {
1274 inputs: vec![fn_ty, i8p],
1279 let rust_try = f(tcx.mk_fn(None, try_fn_ty), output);
1280 *ccx.rust_try_fn().borrow_mut() = Some(rust_try);
1284 fn span_invalid_monomorphization_error(a: &Session, b: Span, c: &str) {
1285 span_err!(a, b, E0511, "{}", c);
1288 fn generic_simd_intrinsic<'blk, 'tcx, 'a>
1289 (bcx: Block<'blk, 'tcx>,
1291 substs: subst::Substs<'tcx>,
1292 callee_ty: Ty<'tcx>,
1293 args: Option<&[P<hir::Expr>]>,
1294 llargs: &[ValueRef],
1297 call_debug_location: DebugLoc,
1298 call_info: NodeIdAndSpan) -> ValueRef
1300 // macros for error handling:
1301 macro_rules! emit_error {
1305 ($msg: tt, $($fmt: tt)*) => {
1306 span_invalid_monomorphization_error(
1307 bcx.sess(), call_info.span,
1308 &format!(concat!("invalid monomorphization of `{}` intrinsic: ",
1313 macro_rules! require {
1314 ($cond: expr, $($fmt: tt)*) => {
1316 emit_error!($($fmt)*);
1317 return C_null(llret_ty)
1321 macro_rules! require_simd {
1322 ($ty: expr, $position: expr) => {
1323 require!($ty.is_simd(), "expected SIMD {} type, found non-SIMD `{}`", $position, $ty)
1329 let tcx = bcx.tcx();
1330 let sig = tcx.erase_late_bound_regions(callee_ty.fn_sig());
1331 let sig = infer::normalize_associated_type(tcx, &sig);
1332 let arg_tys = sig.inputs;
1334 // every intrinsic takes a SIMD vector as its first argument
1335 require_simd!(arg_tys[0], "input");
1336 let in_ty = arg_tys[0];
1337 let in_elem = arg_tys[0].simd_type(tcx);
1338 let in_len = arg_tys[0].simd_size(tcx);
1340 let comparison = match name {
1341 "simd_eq" => Some(hir::BiEq),
1342 "simd_ne" => Some(hir::BiNe),
1343 "simd_lt" => Some(hir::BiLt),
1344 "simd_le" => Some(hir::BiLe),
1345 "simd_gt" => Some(hir::BiGt),
1346 "simd_ge" => Some(hir::BiGe),
1350 if let Some(cmp_op) = comparison {
1351 require_simd!(ret_ty, "return");
1353 let out_len = ret_ty.simd_size(tcx);
1354 require!(in_len == out_len,
1355 "expected return type with length {} (same as input type `{}`), \
1356 found `{}` with length {}",
1359 require!(llret_ty.element_type().kind() == llvm::Integer,
1360 "expected return type with integer elements, found `{}` with non-integer `{}`",
1362 ret_ty.simd_type(tcx));
1364 return compare_simd_types(bcx,
1370 call_debug_location)
1373 if name.starts_with("simd_shuffle") {
1374 let n: usize = match name["simd_shuffle".len()..].parse() {
1376 Err(_) => tcx.sess.span_bug(call_info.span,
1377 "bad `simd_shuffle` instruction only caught in trans?")
1380 require_simd!(ret_ty, "return");
1382 let out_len = ret_ty.simd_size(tcx);
1383 require!(out_len == n,
1384 "expected return type of length {}, found `{}` with length {}",
1385 n, ret_ty, out_len);
1386 require!(in_elem == ret_ty.simd_type(tcx),
1387 "expected return element type `{}` (element of input `{}`), \
1388 found `{}` with element type `{}`",
1390 ret_ty, ret_ty.simd_type(tcx));
1392 let total_len = in_len as u64 * 2;
1394 let vector = match args {
1395 Some(args) => &args[2],
1396 None => bcx.sess().span_bug(call_info.span,
1397 "intrinsic call with unexpected argument shape"),
1399 let vector = match consts::const_expr(
1402 tcx.mk_substs(substs),
1404 consts::TrueConst::Yes, // this should probably help simd error reporting
1406 Ok((vector, _)) => vector,
1407 Err(err) => bcx.sess().span_fatal(call_info.span, &err.description()),
1410 let indices: Option<Vec<_>> = (0..n)
1413 let val = const_get_elt(bcx.ccx(), vector, &[i as libc::c_uint]);
1414 let c = const_to_opt_uint(val);
1417 emit_error!("shuffle index #{} is not a constant", arg_idx);
1420 Some(idx) if idx >= total_len => {
1421 emit_error!("shuffle index #{} is out of bounds (limit {})",
1422 arg_idx, total_len);
1425 Some(idx) => Some(C_i32(bcx.ccx(), idx as i32)),
1429 let indices = match indices {
1431 None => return C_null(llret_ty)
1434 return ShuffleVector(bcx, llargs[0], llargs[1], C_vector(&indices))
1437 if name == "simd_insert" {
1438 require!(in_elem == arg_tys[2],
1439 "expected inserted type `{}` (element of input `{}`), found `{}`",
1440 in_elem, in_ty, arg_tys[2]);
1441 return InsertElement(bcx, llargs[0], llargs[2], llargs[1])
1443 if name == "simd_extract" {
1444 require!(ret_ty == in_elem,
1445 "expected return type `{}` (element of input `{}`), found `{}`",
1446 in_elem, in_ty, ret_ty);
1447 return ExtractElement(bcx, llargs[0], llargs[1])
1450 if name == "simd_cast" {
1451 require_simd!(ret_ty, "return");
1452 let out_len = ret_ty.simd_size(tcx);
1453 require!(in_len == out_len,
1454 "expected return type with length {} (same as input type `{}`), \
1455 found `{}` with length {}",
1458 // casting cares about nominal type, not just structural type
1459 let out_elem = ret_ty.simd_type(tcx);
1461 if in_elem == out_elem { return llargs[0]; }
1463 enum Style { Float, Int(/* is signed? */ bool), Unsupported }
1465 let (in_style, in_width) = match in_elem.sty {
1466 // vectors of pointer-sized integers should've been
1467 // disallowed before here, so this unwrap is safe.
1468 ty::TyInt(i) => (Style::Int(true), i.bit_width().unwrap()),
1469 ty::TyUint(u) => (Style::Int(false), u.bit_width().unwrap()),
1470 ty::TyFloat(f) => (Style::Float, f.bit_width()),
1471 _ => (Style::Unsupported, 0)
1473 let (out_style, out_width) = match out_elem.sty {
1474 ty::TyInt(i) => (Style::Int(true), i.bit_width().unwrap()),
1475 ty::TyUint(u) => (Style::Int(false), u.bit_width().unwrap()),
1476 ty::TyFloat(f) => (Style::Float, f.bit_width()),
1477 _ => (Style::Unsupported, 0)
1480 match (in_style, out_style) {
1481 (Style::Int(in_is_signed), Style::Int(_)) => {
1482 return match in_width.cmp(&out_width) {
1483 Ordering::Greater => Trunc(bcx, llargs[0], llret_ty),
1484 Ordering::Equal => llargs[0],
1485 Ordering::Less => if in_is_signed {
1486 SExt(bcx, llargs[0], llret_ty)
1488 ZExt(bcx, llargs[0], llret_ty)
1492 (Style::Int(in_is_signed), Style::Float) => {
1493 return if in_is_signed {
1494 SIToFP(bcx, llargs[0], llret_ty)
1496 UIToFP(bcx, llargs[0], llret_ty)
1499 (Style::Float, Style::Int(out_is_signed)) => {
1500 return if out_is_signed {
1501 FPToSI(bcx, llargs[0], llret_ty)
1503 FPToUI(bcx, llargs[0], llret_ty)
1506 (Style::Float, Style::Float) => {
1507 return match in_width.cmp(&out_width) {
1508 Ordering::Greater => FPTrunc(bcx, llargs[0], llret_ty),
1509 Ordering::Equal => llargs[0],
1510 Ordering::Less => FPExt(bcx, llargs[0], llret_ty)
1513 _ => {/* Unsupported. Fallthrough. */}
1516 "unsupported cast from `{}` with element `{}` to `{}` with element `{}`",
1520 macro_rules! arith {
1521 ($($name: ident: $($($p: ident),* => $call: expr),*;)*) => {
1523 if name == stringify!($name) {
1527 return $call(bcx, llargs[0], llargs[1], call_debug_location)
1533 "unsupported operation on `{}` with element `{}`",
1540 simd_add: TyUint, TyInt => Add, TyFloat => FAdd;
1541 simd_sub: TyUint, TyInt => Sub, TyFloat => FSub;
1542 simd_mul: TyUint, TyInt => Mul, TyFloat => FMul;
1543 simd_div: TyFloat => FDiv;
1544 simd_shl: TyUint, TyInt => Shl;
1545 simd_shr: TyUint => LShr, TyInt => AShr;
1546 simd_and: TyUint, TyInt => And;
1547 simd_or: TyUint, TyInt => Or;
1548 simd_xor: TyUint, TyInt => Xor;
1550 bcx.sess().span_bug(call_info.span, "unknown SIMD intrinsic");
1553 // Returns the width of an int TypeVariant, and if it's signed or not
1554 // Returns None if the type is not an integer
1555 fn int_type_width_signed<'tcx>(sty: &ty::TypeVariants<'tcx>, ccx: &CrateContext)
1556 -> Option<(u64, bool)> {
1557 use rustc::middle::ty::{TyInt, TyUint};
1559 TyInt(t) => Some((match t {
1561 match &ccx.tcx().sess.target.target.target_pointer_width[..] {
1564 tws => panic!("Unsupported target word size for isize: {}", tws),
1572 TyUint(t) => Some((match t {
1574 match &ccx.tcx().sess.target.target.target_pointer_width[..] {
1577 tws => panic!("Unsupported target word size for usize: {}", tws),