1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 #![allow(non_upper_case_globals)]
13 use arena::TypedArena;
14 use intrinsics::{self, Intrinsic};
17 use llvm::{ValueRef, TypeKind};
19 use rustc::ty::subst::FnSpace;
20 use abi::{Abi, FnType};
25 use callee::{self, Callee};
27 use cleanup::CleanupMethods;
31 use debuginfo::DebugLoc;
38 use rustc::ty::{self, Ty};
40 use rustc::ty::subst::Substs;
44 use syntax::parse::token;
46 use rustc::session::Session;
47 use rustc_const_eval::fatal_const_eval_err;
48 use syntax_pos::{Span, DUMMY_SP};
50 use std::cmp::Ordering;
52 fn get_simple_intrinsic(ccx: &CrateContext, name: &str) -> Option<ValueRef> {
53 let llvm_name = match name {
54 "sqrtf32" => "llvm.sqrt.f32",
55 "sqrtf64" => "llvm.sqrt.f64",
56 "powif32" => "llvm.powi.f32",
57 "powif64" => "llvm.powi.f64",
58 "sinf32" => "llvm.sin.f32",
59 "sinf64" => "llvm.sin.f64",
60 "cosf32" => "llvm.cos.f32",
61 "cosf64" => "llvm.cos.f64",
62 "powf32" => "llvm.pow.f32",
63 "powf64" => "llvm.pow.f64",
64 "expf32" => "llvm.exp.f32",
65 "expf64" => "llvm.exp.f64",
66 "exp2f32" => "llvm.exp2.f32",
67 "exp2f64" => "llvm.exp2.f64",
68 "logf32" => "llvm.log.f32",
69 "logf64" => "llvm.log.f64",
70 "log10f32" => "llvm.log10.f32",
71 "log10f64" => "llvm.log10.f64",
72 "log2f32" => "llvm.log2.f32",
73 "log2f64" => "llvm.log2.f64",
74 "fmaf32" => "llvm.fma.f32",
75 "fmaf64" => "llvm.fma.f64",
76 "fabsf32" => "llvm.fabs.f32",
77 "fabsf64" => "llvm.fabs.f64",
78 "copysignf32" => "llvm.copysign.f32",
79 "copysignf64" => "llvm.copysign.f64",
80 "floorf32" => "llvm.floor.f32",
81 "floorf64" => "llvm.floor.f64",
82 "ceilf32" => "llvm.ceil.f32",
83 "ceilf64" => "llvm.ceil.f64",
84 "truncf32" => "llvm.trunc.f32",
85 "truncf64" => "llvm.trunc.f64",
86 "rintf32" => "llvm.rint.f32",
87 "rintf64" => "llvm.rint.f64",
88 "nearbyintf32" => "llvm.nearbyint.f32",
89 "nearbyintf64" => "llvm.nearbyint.f64",
90 "roundf32" => "llvm.round.f32",
91 "roundf64" => "llvm.round.f64",
92 "assume" => "llvm.assume",
95 Some(ccx.get_intrinsic(&llvm_name))
98 /// Remember to add all intrinsics here, in librustc_typeck/check/mod.rs,
99 /// and in libcore/intrinsics.rs; if you need access to any llvm intrinsics,
100 /// add them to librustc_trans/trans/context.rs
101 pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
104 args: callee::CallArgs<'a, 'tcx>,
106 call_debug_location: DebugLoc)
107 -> Result<'blk, 'tcx> {
112 let _icx = push_ctxt("trans_intrinsic_call");
114 let (def_id, substs, sig) = match callee_ty.sty {
115 ty::TyFnDef(def_id, substs, fty) => {
116 let sig = tcx.erase_late_bound_regions(&fty.sig);
117 (def_id, substs, tcx.normalize_associated_type(&sig))
119 _ => bug!("expected fn item type, found {}", callee_ty)
121 let arg_tys = sig.inputs;
122 let ret_ty = sig.output;
123 let name = tcx.item_name(def_id).as_str();
125 let span = match call_debug_location {
126 DebugLoc::At(_, span) | DebugLoc::ScopeAt(_, span) => span,
128 span_bug!(fcx.span.unwrap_or(DUMMY_SP),
129 "intrinsic `{}` called with missing span", name);
133 let cleanup_scope = fcx.push_custom_cleanup_scope();
135 // For `transmute` we can just trans the input expr directly into dest
136 if name == "transmute" {
137 let llret_ty = type_of::type_of(ccx, ret_ty.unwrap());
139 callee::ArgExprs(arg_exprs) => {
140 assert_eq!(arg_exprs.len(), 1);
142 let (in_type, out_type) = (*substs.types.get(FnSpace, 0),
143 *substs.types.get(FnSpace, 1));
144 let llintype = type_of::type_of(ccx, in_type);
145 let llouttype = type_of::type_of(ccx, out_type);
147 let in_type_size = machine::llbitsize_of_real(ccx, llintype);
148 let out_type_size = machine::llbitsize_of_real(ccx, llouttype);
150 if let ty::TyFnDef(def_id, substs, _) = in_type.sty {
151 if out_type_size != 0 {
152 // FIXME #19925 Remove this hack after a release cycle.
153 let _ = unpack_datum!(bcx, expr::trans(bcx, &arg_exprs[0]));
154 let llfn = Callee::def(ccx, def_id, substs).reify(ccx).val;
155 let llfnty = val_ty(llfn);
156 let llresult = match dest {
157 expr::SaveIn(d) => d,
158 expr::Ignore => alloc_ty(bcx, out_type, "ret")
160 Store(bcx, llfn, PointerCast(bcx, llresult, llfnty.ptr_to()));
161 if dest == expr::Ignore {
162 bcx = glue::drop_ty(bcx, llresult, out_type,
163 call_debug_location);
165 fcx.scopes.borrow_mut().last_mut().unwrap().drop_non_lifetime_clean();
166 fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope);
167 return Result::new(bcx, llresult);
171 // This should be caught by the intrinsicck pass
172 assert_eq!(in_type_size, out_type_size);
174 let nonpointer_nonaggregate = |llkind: TypeKind| -> bool {
175 use llvm::TypeKind::*;
177 Half | Float | Double | X86_FP80 | FP128 |
178 PPC_FP128 | Integer | Vector | X86_MMX => true,
183 // An approximation to which types can be directly cast via
184 // LLVM's bitcast. This doesn't cover pointer -> pointer casts,
185 // but does, importantly, cover SIMD types.
186 let in_kind = llintype.kind();
187 let ret_kind = llret_ty.kind();
188 let bitcast_compatible =
189 (nonpointer_nonaggregate(in_kind) && nonpointer_nonaggregate(ret_kind)) || {
190 in_kind == TypeKind::Pointer && ret_kind == TypeKind::Pointer
193 let dest = if bitcast_compatible {
194 // if we're here, the type is scalar-like (a primitive, a
195 // SIMD type or a pointer), and so can be handled as a
196 // by-value ValueRef and can also be directly bitcast to the
197 // target type. Doing this special case makes conversions
198 // like `u32x4` -> `u64x2` much nicer for LLVM and so more
199 // efficient (these are done efficiently implicitly in C
200 // with the `__m128i` type and so this means Rust doesn't
202 let expr = &arg_exprs[0];
203 let datum = unpack_datum!(bcx, expr::trans(bcx, expr));
204 let datum = unpack_datum!(bcx, datum.to_rvalue_datum(bcx, "transmute_temp"));
205 let val = if datum.kind.is_by_ref() {
206 load_ty(bcx, datum.val, datum.ty)
208 from_immediate(bcx, datum.val)
211 let cast_val = BitCast(bcx, val, llret_ty);
215 // this often occurs in a sequence like `Store(val,
216 // d); val2 = Load(d)`, so disappears easily.
217 Store(bcx, cast_val, d);
223 // The types are too complicated to do with a by-value
224 // bitcast, so pointer cast instead. We need to cast the
225 // dest so the types work out.
226 let dest = match dest {
227 expr::SaveIn(d) => expr::SaveIn(PointerCast(bcx, d, llintype.ptr_to())),
228 expr::Ignore => expr::Ignore
230 bcx = expr::trans_into(bcx, &arg_exprs[0], dest);
234 fcx.scopes.borrow_mut().last_mut().unwrap().drop_non_lifetime_clean();
235 fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope);
238 expr::SaveIn(d) => Result::new(bcx, d),
239 expr::Ignore => Result::new(bcx, C_undef(llret_ty.ptr_to()))
245 bug!("expected expr as argument for transmute");
250 // For `move_val_init` we can evaluate the destination address
251 // (the first argument) and then trans the source value (the
252 // second argument) directly into the resulting destination
254 if name == "move_val_init" {
255 if let callee::ArgExprs(ref exprs) = args {
256 let (dest_expr, source_expr) = if exprs.len() != 2 {
257 bug!("expected two exprs as arguments for `move_val_init` intrinsic");
259 (&exprs[0], &exprs[1])
262 // evaluate destination address
263 let dest_datum = unpack_datum!(bcx, expr::trans(bcx, dest_expr));
264 let dest_datum = unpack_datum!(
265 bcx, dest_datum.to_rvalue_datum(bcx, "arg"));
266 let dest_datum = unpack_datum!(
267 bcx, dest_datum.to_appropriate_datum(bcx));
269 // `expr::trans_into(bcx, expr, dest)` is equiv to
271 // `trans(bcx, expr).store_to_dest(dest)`,
273 // which for `dest == expr::SaveIn(addr)`, is equivalent to:
275 // `trans(bcx, expr).store_to(bcx, addr)`.
276 let lldest = expr::Dest::SaveIn(dest_datum.val);
277 bcx = expr::trans_into(bcx, source_expr, lldest);
279 let llresult = C_nil(ccx);
280 fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope);
282 return Result::new(bcx, llresult);
284 bug!("expected two exprs as arguments for `move_val_init` intrinsic");
288 // save the actual AST arguments for later (some places need to do
289 // const-evaluation on them)
290 let expr_arguments = match args {
291 callee::ArgExprs(args) => Some(args),
295 // Push the arguments.
296 let mut llargs = Vec::new();
297 bcx = callee::trans_args(bcx,
300 &mut callee::Intrinsic,
303 cleanup::CustomScope(cleanup_scope));
305 fcx.scopes.borrow_mut().last_mut().unwrap().drop_non_lifetime_clean();
307 // These are the only intrinsic functions that diverge.
309 let llfn = ccx.get_intrinsic(&("llvm.trap"));
310 Call(bcx, llfn, &[], call_debug_location);
311 fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope);
313 return Result::new(bcx, C_undef(Type::nil(ccx).ptr_to()));
314 } else if &name[..] == "unreachable" {
315 fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope);
317 return Result::new(bcx, C_nil(ccx));
320 let ret_ty = match ret_ty {
321 ty::FnConverging(ret_ty) => ret_ty,
322 ty::FnDiverging => bug!()
325 let llret_ty = type_of::type_of(ccx, ret_ty);
327 // Get location to store the result. If the user does
328 // not care about the result, just make a stack slot
329 let llresult = match dest {
330 expr::SaveIn(d) => d,
332 if !type_is_zero_size(ccx, ret_ty) {
333 let llresult = alloc_ty(bcx, ret_ty, "intrinsic_result");
334 call_lifetime_start(bcx, llresult);
337 C_undef(llret_ty.ptr_to())
342 let simple = get_simple_intrinsic(ccx, &name);
343 let llval = match (simple, &name[..]) {
345 Call(bcx, llfn, &llargs, call_debug_location)
348 bcx = try_intrinsic(bcx, llargs[0], llargs[1], llargs[2], llresult,
349 call_debug_location);
352 (_, "breakpoint") => {
353 let llfn = ccx.get_intrinsic(&("llvm.debugtrap"));
354 Call(bcx, llfn, &[], call_debug_location)
357 let tp_ty = *substs.types.get(FnSpace, 0);
358 let lltp_ty = type_of::type_of(ccx, tp_ty);
359 C_uint(ccx, machine::llsize_of_alloc(ccx, lltp_ty))
361 (_, "size_of_val") => {
362 let tp_ty = *substs.types.get(FnSpace, 0);
363 if !type_is_sized(tcx, tp_ty) {
365 glue::size_and_align_of_dst(&bcx.build(), tp_ty, llargs[1]);
368 let lltp_ty = type_of::type_of(ccx, tp_ty);
369 C_uint(ccx, machine::llsize_of_alloc(ccx, lltp_ty))
372 (_, "min_align_of") => {
373 let tp_ty = *substs.types.get(FnSpace, 0);
374 C_uint(ccx, type_of::align_of(ccx, tp_ty))
376 (_, "min_align_of_val") => {
377 let tp_ty = *substs.types.get(FnSpace, 0);
378 if !type_is_sized(tcx, tp_ty) {
380 glue::size_and_align_of_dst(&bcx.build(), tp_ty, llargs[1]);
383 C_uint(ccx, type_of::align_of(ccx, tp_ty))
386 (_, "pref_align_of") => {
387 let tp_ty = *substs.types.get(FnSpace, 0);
388 let lltp_ty = type_of::type_of(ccx, tp_ty);
389 C_uint(ccx, machine::llalign_of_pref(ccx, lltp_ty))
391 (_, "drop_in_place") => {
392 let tp_ty = *substs.types.get(FnSpace, 0);
393 let ptr = if type_is_sized(tcx, tp_ty) {
396 let scratch = rvalue_scratch_datum(bcx, tp_ty, "tmp");
397 Store(bcx, llargs[0], expr::get_dataptr(bcx, scratch.val));
398 Store(bcx, llargs[1], expr::get_meta(bcx, scratch.val));
399 fcx.schedule_lifetime_end(cleanup::CustomScope(cleanup_scope), scratch.val);
402 glue::drop_ty(bcx, ptr, tp_ty, call_debug_location);
405 (_, "type_name") => {
406 let tp_ty = *substs.types.get(FnSpace, 0);
407 let ty_name = token::intern_and_get_ident(&tp_ty.to_string());
408 C_str_slice(ccx, ty_name)
411 let hash = ccx.tcx().hash_crate_independent(*substs.types.get(FnSpace, 0),
412 &ccx.link_meta().crate_hash);
415 (_, "init_dropped") => {
416 let tp_ty = *substs.types.get(FnSpace, 0);
417 if !type_is_zero_size(ccx, tp_ty) {
418 drop_done_fill_mem(bcx, llresult, tp_ty);
423 let tp_ty = *substs.types.get(FnSpace, 0);
424 if !type_is_zero_size(ccx, tp_ty) {
425 // Just zero out the stack slot. (See comment on base::memzero for explanation)
426 init_zero_mem(bcx, llresult, tp_ty);
430 // Effectively no-ops
431 (_, "uninit") | (_, "forget") => {
434 (_, "needs_drop") => {
435 let tp_ty = *substs.types.get(FnSpace, 0);
437 C_bool(ccx, bcx.fcx.type_needs_drop(tp_ty))
441 let offset = llargs[1];
442 InBoundsGEP(bcx, ptr, &[offset])
444 (_, "arith_offset") => {
446 let offset = llargs[1];
447 GEP(bcx, ptr, &[offset])
450 (_, "copy_nonoverlapping") => {
454 *substs.types.get(FnSpace, 0),
464 *substs.types.get(FnSpace, 0),
470 (_, "write_bytes") => {
471 memset_intrinsic(bcx,
473 *substs.types.get(FnSpace, 0),
480 (_, "volatile_copy_nonoverlapping_memory") => {
484 *substs.types.get(FnSpace, 0),
490 (_, "volatile_copy_memory") => {
494 *substs.types.get(FnSpace, 0),
500 (_, "volatile_set_memory") => {
501 memset_intrinsic(bcx,
503 *substs.types.get(FnSpace, 0),
509 (_, "volatile_load") => {
510 let tp_ty = *substs.types.get(FnSpace, 0);
511 let mut ptr = llargs[0];
512 if let Some(ty) = fn_ty.ret.cast {
513 ptr = PointerCast(bcx, ptr, ty.ptr_to());
515 let load = VolatileLoad(bcx, ptr);
517 llvm::LLVMSetAlignment(load, type_of::align_of(ccx, tp_ty));
519 to_immediate(bcx, load, tp_ty)
521 (_, "volatile_store") => {
522 let tp_ty = *substs.types.get(FnSpace, 0);
523 if type_is_fat_ptr(bcx.tcx(), tp_ty) {
524 VolatileStore(bcx, llargs[1], expr::get_dataptr(bcx, llargs[0]));
525 VolatileStore(bcx, llargs[2], expr::get_meta(bcx, llargs[0]));
527 let val = if fn_ty.args[1].is_indirect() {
530 from_immediate(bcx, llargs[1])
532 let ptr = PointerCast(bcx, llargs[0], val_ty(val).ptr_to());
533 let store = VolatileStore(bcx, val, ptr);
535 llvm::LLVMSetAlignment(store, type_of::align_of(ccx, tp_ty));
541 (_, "ctlz") | (_, "cttz") | (_, "ctpop") | (_, "bswap") |
542 (_, "add_with_overflow") | (_, "sub_with_overflow") | (_, "mul_with_overflow") |
543 (_, "overflowing_add") | (_, "overflowing_sub") | (_, "overflowing_mul") |
544 (_, "unchecked_div") | (_, "unchecked_rem") => {
545 let sty = &arg_tys[0].sty;
546 match int_type_width_signed(sty, ccx) {
547 Some((width, signed)) =>
549 "ctlz" => count_zeros_intrinsic(bcx, &format!("llvm.ctlz.i{}", width),
550 llargs[0], call_debug_location),
551 "cttz" => count_zeros_intrinsic(bcx, &format!("llvm.cttz.i{}", width),
552 llargs[0], call_debug_location),
553 "ctpop" => Call(bcx, ccx.get_intrinsic(&format!("llvm.ctpop.i{}", width)),
554 &llargs, call_debug_location),
557 llargs[0] // byte swap a u8/i8 is just a no-op
559 Call(bcx, ccx.get_intrinsic(&format!("llvm.bswap.i{}", width)),
560 &llargs, call_debug_location)
563 "add_with_overflow" | "sub_with_overflow" | "mul_with_overflow" => {
564 let intrinsic = format!("llvm.{}{}.with.overflow.i{}",
565 if signed { 's' } else { 'u' },
567 with_overflow_intrinsic(bcx, &intrinsic, llargs[0], llargs[1], llresult,
570 "overflowing_add" => Add(bcx, llargs[0], llargs[1], call_debug_location),
571 "overflowing_sub" => Sub(bcx, llargs[0], llargs[1], call_debug_location),
572 "overflowing_mul" => Mul(bcx, llargs[0], llargs[1], call_debug_location),
575 SDiv(bcx, llargs[0], llargs[1], call_debug_location)
577 UDiv(bcx, llargs[0], llargs[1], call_debug_location)
581 SRem(bcx, llargs[0], llargs[1], call_debug_location)
583 URem(bcx, llargs[0], llargs[1], call_debug_location)
588 span_invalid_monomorphization_error(
590 &format!("invalid monomorphization of `{}` intrinsic: \
591 expected basic integer type, found `{}`", name, sty));
597 (_, "fadd_fast") | (_, "fsub_fast") | (_, "fmul_fast") | (_, "fdiv_fast") |
598 (_, "frem_fast") => {
599 let sty = &arg_tys[0].sty;
600 match float_type_width(sty) {
603 "fadd_fast" => FAddFast(bcx, llargs[0], llargs[1], call_debug_location),
604 "fsub_fast" => FSubFast(bcx, llargs[0], llargs[1], call_debug_location),
605 "fmul_fast" => FMulFast(bcx, llargs[0], llargs[1], call_debug_location),
606 "fdiv_fast" => FDivFast(bcx, llargs[0], llargs[1], call_debug_location),
607 "frem_fast" => FRemFast(bcx, llargs[0], llargs[1], call_debug_location),
611 span_invalid_monomorphization_error(
613 &format!("invalid monomorphization of `{}` intrinsic: \
614 expected basic float type, found `{}`", name, sty));
621 (_, "discriminant_value") => {
622 let val_ty = substs.types.get(FnSpace, 0);
625 let repr = adt::represent_type(ccx, *val_ty);
626 adt::trans_get_discr(bcx, &repr, llargs[0],
627 Some(llret_ty), true)
629 _ => C_null(llret_ty)
632 (_, name) if name.starts_with("simd_") => {
633 generic_simd_intrinsic(bcx, name,
642 // This requires that atomic intrinsics follow a specific naming pattern:
643 // "atomic_<operation>[_<ordering>]", and no ordering means SeqCst
644 (_, name) if name.starts_with("atomic_") => {
645 let split: Vec<&str> = name.split('_').collect();
647 let is_cxchg = split[1] == "cxchg" || split[1] == "cxchgweak";
648 let (order, failorder) = match split.len() {
649 2 => (llvm::SequentiallyConsistent, llvm::SequentiallyConsistent),
650 3 => match split[2] {
651 "unordered" => (llvm::Unordered, llvm::Unordered),
652 "relaxed" => (llvm::Monotonic, llvm::Monotonic),
653 "acq" => (llvm::Acquire, llvm::Acquire),
654 "rel" => (llvm::Release, llvm::Monotonic),
655 "acqrel" => (llvm::AcquireRelease, llvm::Acquire),
656 "failrelaxed" if is_cxchg =>
657 (llvm::SequentiallyConsistent, llvm::Monotonic),
658 "failacq" if is_cxchg =>
659 (llvm::SequentiallyConsistent, llvm::Acquire),
660 _ => ccx.sess().fatal("unknown ordering in atomic intrinsic")
662 4 => match (split[2], split[3]) {
663 ("acq", "failrelaxed") if is_cxchg =>
664 (llvm::Acquire, llvm::Monotonic),
665 ("acqrel", "failrelaxed") if is_cxchg =>
666 (llvm::AcquireRelease, llvm::Monotonic),
667 _ => ccx.sess().fatal("unknown ordering in atomic intrinsic")
669 _ => ccx.sess().fatal("Atomic intrinsic not in correct format"),
673 "cxchg" | "cxchgweak" => {
674 let sty = &substs.types.get(FnSpace, 0).sty;
675 if int_type_width_signed(sty, ccx).is_some() {
676 let weak = if split[1] == "cxchgweak" { llvm::True } else { llvm::False };
677 let val = AtomicCmpXchg(bcx, llargs[0], llargs[1], llargs[2],
678 order, failorder, weak);
679 let result = ExtractValue(bcx, val, 0);
680 let success = ZExt(bcx, ExtractValue(bcx, val, 1), Type::bool(bcx.ccx()));
681 Store(bcx, result, StructGEP(bcx, llresult, 0));
682 Store(bcx, success, StructGEP(bcx, llresult, 1));
684 span_invalid_monomorphization_error(
686 &format!("invalid monomorphization of `{}` intrinsic: \
687 expected basic integer type, found `{}`", name, sty));
693 let sty = &substs.types.get(FnSpace, 0).sty;
694 if int_type_width_signed(sty, ccx).is_some() {
695 AtomicLoad(bcx, llargs[0], order)
697 span_invalid_monomorphization_error(
699 &format!("invalid monomorphization of `{}` intrinsic: \
700 expected basic integer type, found `{}`", name, sty));
706 let sty = &substs.types.get(FnSpace, 0).sty;
707 if int_type_width_signed(sty, ccx).is_some() {
708 AtomicStore(bcx, llargs[1], llargs[0], order);
710 span_invalid_monomorphization_error(
712 &format!("invalid monomorphization of `{}` intrinsic: \
713 expected basic integer type, found `{}`", name, sty));
719 AtomicFence(bcx, order, llvm::CrossThread);
723 "singlethreadfence" => {
724 AtomicFence(bcx, order, llvm::SingleThread);
728 // These are all AtomicRMW ops
730 let atom_op = match op {
731 "xchg" => llvm::AtomicXchg,
732 "xadd" => llvm::AtomicAdd,
733 "xsub" => llvm::AtomicSub,
734 "and" => llvm::AtomicAnd,
735 "nand" => llvm::AtomicNand,
736 "or" => llvm::AtomicOr,
737 "xor" => llvm::AtomicXor,
738 "max" => llvm::AtomicMax,
739 "min" => llvm::AtomicMin,
740 "umax" => llvm::AtomicUMax,
741 "umin" => llvm::AtomicUMin,
742 _ => ccx.sess().fatal("unknown atomic operation")
745 let sty = &substs.types.get(FnSpace, 0).sty;
746 if int_type_width_signed(sty, ccx).is_some() {
747 AtomicRMW(bcx, atom_op, llargs[0], llargs[1], order)
749 span_invalid_monomorphization_error(
751 &format!("invalid monomorphization of `{}` intrinsic: \
752 expected basic integer type, found `{}`", name, sty));
761 let intr = match Intrinsic::find(&name) {
763 None => bug!("unknown intrinsic '{}'", name),
765 fn one<T>(x: Vec<T>) -> T {
766 assert_eq!(x.len(), 1);
767 x.into_iter().next().unwrap()
769 fn ty_to_type(ccx: &CrateContext, t: &intrinsics::Type,
770 any_changes_needed: &mut bool) -> Vec<Type> {
771 use intrinsics::Type::*;
773 Void => vec![Type::void(ccx)],
774 Integer(_signed, width, llvm_width) => {
775 *any_changes_needed |= width != llvm_width;
776 vec![Type::ix(ccx, llvm_width as u64)]
780 32 => vec![Type::f32(ccx)],
781 64 => vec![Type::f64(ccx)],
785 Pointer(ref t, ref llvm_elem, _const) => {
786 *any_changes_needed |= llvm_elem.is_some();
788 let t = llvm_elem.as_ref().unwrap_or(t);
789 let elem = one(ty_to_type(ccx, t,
790 any_changes_needed));
793 Vector(ref t, ref llvm_elem, length) => {
794 *any_changes_needed |= llvm_elem.is_some();
796 let t = llvm_elem.as_ref().unwrap_or(t);
797 let elem = one(ty_to_type(ccx, t,
798 any_changes_needed));
799 vec![Type::vector(&elem,
802 Aggregate(false, ref contents) => {
803 let elems = contents.iter()
804 .map(|t| one(ty_to_type(ccx, t, any_changes_needed)))
805 .collect::<Vec<_>>();
806 vec![Type::struct_(ccx, &elems, false)]
808 Aggregate(true, ref contents) => {
809 *any_changes_needed = true;
811 .flat_map(|t| ty_to_type(ccx, t, any_changes_needed))
817 // This allows an argument list like `foo, (bar, baz),
818 // qux` to be converted into `foo, bar, baz, qux`, integer
819 // arguments to be truncated as needed and pointers to be
821 fn modify_as_needed<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
822 t: &intrinsics::Type,
828 intrinsics::Type::Aggregate(true, ref contents) => {
829 // We found a tuple that needs squishing! So
830 // run over the tuple and load each field.
832 // This assumes the type is "simple", i.e. no
833 // destructors, and the contents are SIMD
835 assert!(!bcx.fcx.type_needs_drop(arg_type));
837 let repr = adt::represent_type(bcx.ccx(), arg_type);
838 let repr_ptr = &repr;
839 let arg = adt::MaybeSizedValue::sized(llarg);
842 Load(bcx, adt::trans_field_ptr(bcx, repr_ptr, arg, Disr(0), i))
846 intrinsics::Type::Pointer(_, Some(ref llvm_elem), _) => {
847 let llvm_elem = one(ty_to_type(bcx.ccx(), llvm_elem, &mut false));
848 vec![PointerCast(bcx, llarg,
851 intrinsics::Type::Vector(_, Some(ref llvm_elem), length) => {
852 let llvm_elem = one(ty_to_type(bcx.ccx(), llvm_elem, &mut false));
853 vec![BitCast(bcx, llarg,
854 Type::vector(&llvm_elem, length as u64))]
856 intrinsics::Type::Integer(_, width, llvm_width) if width != llvm_width => {
857 // the LLVM intrinsic uses a smaller integer
858 // size than the C intrinsic's signature, so
859 // we have to trim it down here.
860 vec![Trunc(bcx, llarg, Type::ix(bcx.ccx(), llvm_width as u64))]
867 let mut any_changes_needed = false;
868 let inputs = intr.inputs.iter()
869 .flat_map(|t| ty_to_type(ccx, t, &mut any_changes_needed))
870 .collect::<Vec<_>>();
872 let mut out_changes = false;
873 let outputs = one(ty_to_type(ccx, &intr.output, &mut out_changes));
874 // outputting a flattened aggregate is nonsense
875 assert!(!out_changes);
877 let llargs = if !any_changes_needed {
878 // no aggregates to flatten, so no change needed
881 // there are some aggregates that need to be flattened
882 // in the LLVM call, so we need to run over the types
883 // again to find them and extract the arguments
887 .flat_map(|((t, llarg), ty)| modify_as_needed(bcx, t, ty, *llarg))
890 assert_eq!(inputs.len(), llargs.len());
892 let val = match intr.definition {
893 intrinsics::IntrinsicDef::Named(name) => {
894 let f = declare::declare_cfn(ccx,
896 Type::func(&inputs, &outputs));
897 Call(bcx, f, &llargs, call_debug_location)
902 intrinsics::Type::Aggregate(flatten, ref elems) => {
903 // the output is a tuple so we need to munge it properly
906 for i in 0..elems.len() {
907 let val = ExtractValue(bcx, val, i);
908 Store(bcx, val, StructGEP(bcx, llresult, i));
917 if val_ty(llval) != Type::void(ccx) &&
918 machine::llsize_of_alloc(ccx, val_ty(llval)) != 0 {
919 if let Some(ty) = fn_ty.ret.cast {
920 let ptr = PointerCast(bcx, llresult, ty.ptr_to());
921 let store = Store(bcx, llval, ptr);
923 llvm::LLVMSetAlignment(store, type_of::align_of(ccx, ret_ty));
926 store_ty(bcx, llval, llresult, ret_ty);
930 // If we made a temporary stack slot, let's clean it up
933 bcx = glue::drop_ty(bcx, llresult, ret_ty, call_debug_location);
934 call_lifetime_end(bcx, llresult);
936 expr::SaveIn(_) => {}
939 fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope);
941 Result::new(bcx, llresult)
944 fn copy_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
951 call_debug_location: DebugLoc)
954 let lltp_ty = type_of::type_of(ccx, tp_ty);
955 let align = C_i32(ccx, type_of::align_of(ccx, tp_ty) as i32);
956 let size = machine::llsize_of(ccx, lltp_ty);
957 let int_size = machine::llbitsize_of_real(ccx, ccx.int_type());
959 let operation = if allow_overlap {
965 let name = format!("llvm.{}.p0i8.p0i8.i{}", operation, int_size);
967 let dst_ptr = PointerCast(bcx, dst, Type::i8p(ccx));
968 let src_ptr = PointerCast(bcx, src, Type::i8p(ccx));
969 let llfn = ccx.get_intrinsic(&name);
975 Mul(bcx, size, count, DebugLoc::None),
977 C_bool(ccx, volatile)],
981 fn memset_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
987 call_debug_location: DebugLoc)
990 let lltp_ty = type_of::type_of(ccx, tp_ty);
991 let align = C_i32(ccx, type_of::align_of(ccx, tp_ty) as i32);
992 let size = machine::llsize_of(ccx, lltp_ty);
993 let int_size = machine::llbitsize_of_real(ccx, ccx.int_type());
995 let name = format!("llvm.memset.p0i8.i{}", int_size);
997 let dst_ptr = PointerCast(bcx, dst, Type::i8p(ccx));
998 let llfn = ccx.get_intrinsic(&name);
1004 Mul(bcx, size, count, DebugLoc::None),
1006 C_bool(ccx, volatile)],
1007 call_debug_location)
1010 fn count_zeros_intrinsic(bcx: Block,
1013 call_debug_location: DebugLoc)
1015 let y = C_bool(bcx.ccx(), false);
1016 let llfn = bcx.ccx().get_intrinsic(&name);
1017 Call(bcx, llfn, &[val, y], call_debug_location)
1020 fn with_overflow_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1025 call_debug_location: DebugLoc)
1027 let llfn = bcx.ccx().get_intrinsic(&name);
1029 // Convert `i1` to a `bool`, and write it to the out parameter
1030 let val = Call(bcx, llfn, &[a, b], call_debug_location);
1031 let result = ExtractValue(bcx, val, 0);
1032 let overflow = ZExt(bcx, ExtractValue(bcx, val, 1), Type::bool(bcx.ccx()));
1033 Store(bcx, result, StructGEP(bcx, out, 0));
1034 Store(bcx, overflow, StructGEP(bcx, out, 1));
1039 fn try_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1042 local_ptr: ValueRef,
1044 dloc: DebugLoc) -> Block<'blk, 'tcx> {
1045 if bcx.sess().no_landing_pads() {
1046 Call(bcx, func, &[data], dloc);
1047 Store(bcx, C_null(Type::i8p(bcx.ccx())), dest);
1049 } else if wants_msvc_seh(bcx.sess()) {
1050 trans_msvc_try(bcx, func, data, local_ptr, dest, dloc)
1052 trans_gnu_try(bcx, func, data, local_ptr, dest, dloc)
1056 // MSVC's definition of the `rust_try` function.
1058 // This implementation uses the new exception handling instructions in LLVM
1059 // which have support in LLVM for SEH on MSVC targets. Although these
1060 // instructions are meant to work for all targets, as of the time of this
1061 // writing, however, LLVM does not recommend the usage of these new instructions
1062 // as the old ones are still more optimized.
1063 fn trans_msvc_try<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1066 local_ptr: ValueRef,
1068 dloc: DebugLoc) -> Block<'blk, 'tcx> {
1069 let llfn = get_rust_try_fn(bcx.fcx, &mut |bcx| {
1070 let ccx = bcx.ccx();
1071 let dloc = DebugLoc::None;
1073 SetPersonalityFn(bcx, bcx.fcx.eh_personality());
1075 let normal = bcx.fcx.new_temp_block("normal");
1076 let catchswitch = bcx.fcx.new_temp_block("catchswitch");
1077 let catchpad = bcx.fcx.new_temp_block("catchpad");
1078 let caught = bcx.fcx.new_temp_block("caught");
1080 let func = llvm::get_param(bcx.fcx.llfn, 0);
1081 let data = llvm::get_param(bcx.fcx.llfn, 1);
1082 let local_ptr = llvm::get_param(bcx.fcx.llfn, 2);
1084 // We're generating an IR snippet that looks like:
1086 // declare i32 @rust_try(%func, %data, %ptr) {
1087 // %slot = alloca i64*
1088 // invoke %func(%data) to label %normal unwind label %catchswitch
1094 // %cs = catchswitch within none [%catchpad] unwind to caller
1097 // %tok = catchpad within %cs [%type_descriptor, 0, %slot]
1098 // %ptr[0] = %slot[0]
1099 // %ptr[1] = %slot[1]
1100 // catchret from %tok to label %caught
1106 // This structure follows the basic usage of throw/try/catch in LLVM.
1107 // For example, compile this C++ snippet to see what LLVM generates:
1109 // #include <stdint.h>
1111 // int bar(void (*foo)(void), uint64_t *ret) {
1115 // } catch(uint64_t a[2]) {
1122 // More information can be found in libstd's seh.rs implementation.
1123 let i64p = Type::i64(ccx).ptr_to();
1124 let slot = Alloca(bcx, i64p, "slot");
1125 Invoke(bcx, func, &[data], normal.llbb, catchswitch.llbb, dloc);
1127 Ret(normal, C_i32(ccx, 0), dloc);
1129 let cs = CatchSwitch(catchswitch, None, None, 1);
1130 AddHandler(catchswitch, cs, catchpad.llbb);
1132 let tcx = ccx.tcx();
1133 let tydesc = match tcx.lang_items.msvc_try_filter() {
1134 Some(did) => ::consts::get_static(ccx, did).to_llref(),
1135 None => bug!("msvc_try_filter not defined"),
1137 let tok = CatchPad(catchpad, cs, &[tydesc, C_i32(ccx, 0), slot]);
1138 let addr = Load(catchpad, slot);
1139 let arg1 = Load(catchpad, addr);
1140 let val1 = C_i32(ccx, 1);
1141 let arg2 = Load(catchpad, InBoundsGEP(catchpad, addr, &[val1]));
1142 let local_ptr = BitCast(catchpad, local_ptr, i64p);
1143 Store(catchpad, arg1, local_ptr);
1144 Store(catchpad, arg2, InBoundsGEP(catchpad, local_ptr, &[val1]));
1145 CatchRet(catchpad, tok, caught.llbb);
1147 Ret(caught, C_i32(ccx, 1), dloc);
1150 // Note that no invoke is used here because by definition this function
1151 // can't panic (that's what it's catching).
1152 let ret = Call(bcx, llfn, &[func, data, local_ptr], dloc);
1153 Store(bcx, ret, dest);
1157 // Definition of the standard "try" function for Rust using the GNU-like model
1158 // of exceptions (e.g. the normal semantics of LLVM's landingpad and invoke
1161 // This translation is a little surprising because we always call a shim
1162 // function instead of inlining the call to `invoke` manually here. This is done
1163 // because in LLVM we're only allowed to have one personality per function
1164 // definition. The call to the `try` intrinsic is being inlined into the
1165 // function calling it, and that function may already have other personality
1166 // functions in play. By calling a shim we're guaranteed that our shim will have
1167 // the right personality function.
1168 fn trans_gnu_try<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1171 local_ptr: ValueRef,
1173 dloc: DebugLoc) -> Block<'blk, 'tcx> {
1174 let llfn = get_rust_try_fn(bcx.fcx, &mut |bcx| {
1175 let ccx = bcx.ccx();
1176 let tcx = ccx.tcx();
1177 let dloc = DebugLoc::None;
1179 // Translates the shims described above:
1182 // invoke %func(%args...) normal %normal unwind %catch
1188 // (ptr, _) = landingpad
1189 // store ptr, %local_ptr
1192 // Note that the `local_ptr` data passed into the `try` intrinsic is
1193 // expected to be `*mut *mut u8` for this to actually work, but that's
1194 // managed by the standard library.
1196 attributes::emit_uwtable(bcx.fcx.llfn, true);
1197 let target = &bcx.sess().target.target;
1198 let catch_pers = if target.arch == "arm" && target.target_os != "ios" {
1199 // Only ARM still uses a separate catch personality (for now)
1200 match tcx.lang_items.eh_personality_catch() {
1202 Callee::def(ccx, did, tcx.mk_substs(Substs::empty())).reify(ccx).val
1204 None => bug!("eh_personality_catch not defined"),
1207 bcx.fcx.eh_personality()
1210 let then = bcx.fcx.new_temp_block("then");
1211 let catch = bcx.fcx.new_temp_block("catch");
1213 let func = llvm::get_param(bcx.fcx.llfn, 0);
1214 let data = llvm::get_param(bcx.fcx.llfn, 1);
1215 let local_ptr = llvm::get_param(bcx.fcx.llfn, 2);
1216 Invoke(bcx, func, &[data], then.llbb, catch.llbb, dloc);
1217 Ret(then, C_i32(ccx, 0), dloc);
1219 // Type indicator for the exception being thrown.
1221 // The first value in this tuple is a pointer to the exception object
1222 // being thrown. The second value is a "selector" indicating which of
1223 // the landing pad clauses the exception's type had been matched to.
1224 // rust_try ignores the selector.
1225 let lpad_ty = Type::struct_(ccx, &[Type::i8p(ccx), Type::i32(ccx)],
1227 let vals = LandingPad(catch, lpad_ty, catch_pers, 1);
1228 AddClause(catch, vals, C_null(Type::i8p(ccx)));
1229 let ptr = ExtractValue(catch, vals, 0);
1230 Store(catch, ptr, BitCast(catch, local_ptr, Type::i8p(ccx).ptr_to()));
1231 Ret(catch, C_i32(ccx, 1), dloc);
1234 // Note that no invoke is used here because by definition this function
1235 // can't panic (that's what it's catching).
1236 let ret = Call(bcx, llfn, &[func, data, local_ptr], dloc);
1237 Store(bcx, ret, dest);
1241 // Helper function to give a Block to a closure to translate a shim function.
1242 // This is currently primarily used for the `try` intrinsic functions above.
1243 fn gen_fn<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>,
1245 inputs: Vec<Ty<'tcx>>,
1246 output: ty::FnOutput<'tcx>,
1247 trans: &mut for<'b> FnMut(Block<'b, 'tcx>))
1250 let sig = ty::FnSig {
1255 let fn_ty = FnType::new(ccx, Abi::Rust, &sig, &[]);
1257 let rust_fn_ty = ccx.tcx().mk_fn_ptr(ccx.tcx().mk_bare_fn(ty::BareFnTy {
1258 unsafety: hir::Unsafety::Unsafe,
1260 sig: ty::Binder(sig)
1262 let llfn = declare::define_internal_fn(ccx, name, rust_fn_ty);
1263 let (fcx, block_arena);
1264 block_arena = TypedArena::new();
1265 fcx = FunctionContext::new(ccx, llfn, fn_ty, None, &block_arena);
1266 let bcx = fcx.init(true, None);
1272 // Helper function used to get a handle to the `__rust_try` function used to
1273 // catch exceptions.
1275 // This function is only generated once and is then cached.
1276 fn get_rust_try_fn<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>,
1277 trans: &mut for<'b> FnMut(Block<'b, 'tcx>))
1280 if let Some(llfn) = ccx.rust_try_fn().get() {
1284 // Define the type up front for the signature of the rust_try function.
1285 let tcx = ccx.tcx();
1286 let i8p = tcx.mk_mut_ptr(tcx.types.i8);
1287 let fn_ty = tcx.mk_fn_ptr(tcx.mk_bare_fn(ty::BareFnTy {
1288 unsafety: hir::Unsafety::Unsafe,
1290 sig: ty::Binder(ty::FnSig {
1292 output: ty::FnOutput::FnConverging(tcx.mk_nil()),
1296 let output = ty::FnOutput::FnConverging(tcx.types.i32);
1297 let rust_try = gen_fn(fcx, "__rust_try", vec![fn_ty, i8p, i8p], output, trans);
1298 ccx.rust_try_fn().set(Some(rust_try));
1302 fn span_invalid_monomorphization_error(a: &Session, b: Span, c: &str) {
1303 span_err!(a, b, E0511, "{}", c);
1306 fn generic_simd_intrinsic<'blk, 'tcx, 'a>
1307 (bcx: Block<'blk, 'tcx>,
1309 substs: &'tcx subst::Substs<'tcx>,
1310 callee_ty: Ty<'tcx>,
1311 args: Option<&[P<hir::Expr>]>,
1312 llargs: &[ValueRef],
1315 call_debug_location: DebugLoc,
1316 span: Span) -> ValueRef
1318 // macros for error handling:
1319 macro_rules! emit_error {
1323 ($msg: tt, $($fmt: tt)*) => {
1324 span_invalid_monomorphization_error(
1326 &format!(concat!("invalid monomorphization of `{}` intrinsic: ",
1331 macro_rules! require {
1332 ($cond: expr, $($fmt: tt)*) => {
1334 emit_error!($($fmt)*);
1335 return C_nil(bcx.ccx())
1339 macro_rules! require_simd {
1340 ($ty: expr, $position: expr) => {
1341 require!($ty.is_simd(), "expected SIMD {} type, found non-SIMD `{}`", $position, $ty)
1347 let tcx = bcx.tcx();
1348 let sig = tcx.erase_late_bound_regions(callee_ty.fn_sig());
1349 let sig = tcx.normalize_associated_type(&sig);
1350 let arg_tys = sig.inputs;
1352 // every intrinsic takes a SIMD vector as its first argument
1353 require_simd!(arg_tys[0], "input");
1354 let in_ty = arg_tys[0];
1355 let in_elem = arg_tys[0].simd_type(tcx);
1356 let in_len = arg_tys[0].simd_size(tcx);
1358 let comparison = match name {
1359 "simd_eq" => Some(hir::BiEq),
1360 "simd_ne" => Some(hir::BiNe),
1361 "simd_lt" => Some(hir::BiLt),
1362 "simd_le" => Some(hir::BiLe),
1363 "simd_gt" => Some(hir::BiGt),
1364 "simd_ge" => Some(hir::BiGe),
1368 if let Some(cmp_op) = comparison {
1369 require_simd!(ret_ty, "return");
1371 let out_len = ret_ty.simd_size(tcx);
1372 require!(in_len == out_len,
1373 "expected return type with length {} (same as input type `{}`), \
1374 found `{}` with length {}",
1377 require!(llret_ty.element_type().kind() == llvm::Integer,
1378 "expected return type with integer elements, found `{}` with non-integer `{}`",
1380 ret_ty.simd_type(tcx));
1382 return compare_simd_types(bcx,
1388 call_debug_location)
1391 if name.starts_with("simd_shuffle") {
1392 let n: usize = match name["simd_shuffle".len()..].parse() {
1394 Err(_) => span_bug!(span,
1395 "bad `simd_shuffle` instruction only caught in trans?")
1398 require_simd!(ret_ty, "return");
1400 let out_len = ret_ty.simd_size(tcx);
1401 require!(out_len == n,
1402 "expected return type of length {}, found `{}` with length {}",
1403 n, ret_ty, out_len);
1404 require!(in_elem == ret_ty.simd_type(tcx),
1405 "expected return element type `{}` (element of input `{}`), \
1406 found `{}` with element type `{}`",
1408 ret_ty, ret_ty.simd_type(tcx));
1410 let total_len = in_len as u64 * 2;
1412 let vector = match args {
1414 match consts::const_expr(bcx.ccx(), &args[2], substs, None,
1415 // this should probably help simd error reporting
1416 consts::TrueConst::Yes) {
1417 Ok((vector, _)) => vector,
1419 fatal_const_eval_err(bcx.tcx(), err.as_inner(), span,
1427 let indices: Option<Vec<_>> = (0..n)
1430 let val = const_get_elt(vector, &[i as libc::c_uint]);
1431 match const_to_opt_uint(val) {
1433 emit_error!("shuffle index #{} is not a constant", arg_idx);
1436 Some(idx) if idx >= total_len => {
1437 emit_error!("shuffle index #{} is out of bounds (limit {})",
1438 arg_idx, total_len);
1441 Some(idx) => Some(C_i32(bcx.ccx(), idx as i32)),
1445 let indices = match indices {
1447 None => return C_null(llret_ty)
1450 return ShuffleVector(bcx, llargs[0], llargs[1], C_vector(&indices))
1453 if name == "simd_insert" {
1454 require!(in_elem == arg_tys[2],
1455 "expected inserted type `{}` (element of input `{}`), found `{}`",
1456 in_elem, in_ty, arg_tys[2]);
1457 return InsertElement(bcx, llargs[0], llargs[2], llargs[1])
1459 if name == "simd_extract" {
1460 require!(ret_ty == in_elem,
1461 "expected return type `{}` (element of input `{}`), found `{}`",
1462 in_elem, in_ty, ret_ty);
1463 return ExtractElement(bcx, llargs[0], llargs[1])
1466 if name == "simd_cast" {
1467 require_simd!(ret_ty, "return");
1468 let out_len = ret_ty.simd_size(tcx);
1469 require!(in_len == out_len,
1470 "expected return type with length {} (same as input type `{}`), \
1471 found `{}` with length {}",
1474 // casting cares about nominal type, not just structural type
1475 let out_elem = ret_ty.simd_type(tcx);
1477 if in_elem == out_elem { return llargs[0]; }
1479 enum Style { Float, Int(/* is signed? */ bool), Unsupported }
1481 let (in_style, in_width) = match in_elem.sty {
1482 // vectors of pointer-sized integers should've been
1483 // disallowed before here, so this unwrap is safe.
1484 ty::TyInt(i) => (Style::Int(true), i.bit_width().unwrap()),
1485 ty::TyUint(u) => (Style::Int(false), u.bit_width().unwrap()),
1486 ty::TyFloat(f) => (Style::Float, f.bit_width()),
1487 _ => (Style::Unsupported, 0)
1489 let (out_style, out_width) = match out_elem.sty {
1490 ty::TyInt(i) => (Style::Int(true), i.bit_width().unwrap()),
1491 ty::TyUint(u) => (Style::Int(false), u.bit_width().unwrap()),
1492 ty::TyFloat(f) => (Style::Float, f.bit_width()),
1493 _ => (Style::Unsupported, 0)
1496 match (in_style, out_style) {
1497 (Style::Int(in_is_signed), Style::Int(_)) => {
1498 return match in_width.cmp(&out_width) {
1499 Ordering::Greater => Trunc(bcx, llargs[0], llret_ty),
1500 Ordering::Equal => llargs[0],
1501 Ordering::Less => if in_is_signed {
1502 SExt(bcx, llargs[0], llret_ty)
1504 ZExt(bcx, llargs[0], llret_ty)
1508 (Style::Int(in_is_signed), Style::Float) => {
1509 return if in_is_signed {
1510 SIToFP(bcx, llargs[0], llret_ty)
1512 UIToFP(bcx, llargs[0], llret_ty)
1515 (Style::Float, Style::Int(out_is_signed)) => {
1516 return if out_is_signed {
1517 FPToSI(bcx, llargs[0], llret_ty)
1519 FPToUI(bcx, llargs[0], llret_ty)
1522 (Style::Float, Style::Float) => {
1523 return match in_width.cmp(&out_width) {
1524 Ordering::Greater => FPTrunc(bcx, llargs[0], llret_ty),
1525 Ordering::Equal => llargs[0],
1526 Ordering::Less => FPExt(bcx, llargs[0], llret_ty)
1529 _ => {/* Unsupported. Fallthrough. */}
1532 "unsupported cast from `{}` with element `{}` to `{}` with element `{}`",
1536 macro_rules! arith {
1537 ($($name: ident: $($($p: ident),* => $call: expr),*;)*) => {
1539 if name == stringify!($name) {
1543 return $call(bcx, llargs[0], llargs[1], call_debug_location)
1549 "unsupported operation on `{}` with element `{}`",
1556 simd_add: TyUint, TyInt => Add, TyFloat => FAdd;
1557 simd_sub: TyUint, TyInt => Sub, TyFloat => FSub;
1558 simd_mul: TyUint, TyInt => Mul, TyFloat => FMul;
1559 simd_div: TyFloat => FDiv;
1560 simd_shl: TyUint, TyInt => Shl;
1561 simd_shr: TyUint => LShr, TyInt => AShr;
1562 simd_and: TyUint, TyInt => And;
1563 simd_or: TyUint, TyInt => Or;
1564 simd_xor: TyUint, TyInt => Xor;
1566 span_bug!(span, "unknown SIMD intrinsic");
1569 // Returns the width of an int TypeVariant, and if it's signed or not
1570 // Returns None if the type is not an integer
1571 fn int_type_width_signed<'tcx>(sty: &ty::TypeVariants<'tcx>, ccx: &CrateContext)
1572 -> Option<(u64, bool)> {
1573 use rustc::ty::{TyInt, TyUint};
1575 TyInt(t) => Some((match t {
1577 match &ccx.tcx().sess.target.target.target_pointer_width[..] {
1581 tws => bug!("Unsupported target word size for isize: {}", tws),
1584 ast::IntTy::I8 => 8,
1585 ast::IntTy::I16 => 16,
1586 ast::IntTy::I32 => 32,
1587 ast::IntTy::I64 => 64,
1589 TyUint(t) => Some((match t {
1590 ast::UintTy::Us => {
1591 match &ccx.tcx().sess.target.target.target_pointer_width[..] {
1595 tws => bug!("Unsupported target word size for usize: {}", tws),
1598 ast::UintTy::U8 => 8,
1599 ast::UintTy::U16 => 16,
1600 ast::UintTy::U32 => 32,
1601 ast::UintTy::U64 => 64,
1607 // Returns the width of a float TypeVariant
1608 // Returns None if the type is not a float
1609 fn float_type_width<'tcx>(sty: &ty::TypeVariants<'tcx>)
1611 use rustc::ty::TyFloat;
1613 TyFloat(t) => Some(match t {
1614 ast::FloatTy::F32 => 32,
1615 ast::FloatTy::F64 => 64,