1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 #![allow(non_upper_case_globals)]
13 use arena::TypedArena;
14 use intrinsics::{self, Intrinsic};
17 use llvm::{ValueRef, TypeKind};
19 use rustc::ty::subst::FnSpace;
20 use abi::{Abi, FnType};
25 use callee::{self, Callee};
27 use cleanup::CleanupMethods;
31 use debuginfo::DebugLoc;
38 use rustc::ty::{self, Ty};
40 use rustc::ty::subst::Substs;
44 use syntax::parse::token;
46 use rustc::session::Session;
47 use syntax_pos::{Span, DUMMY_SP};
49 use std::cmp::Ordering;
51 fn get_simple_intrinsic(ccx: &CrateContext, name: &str) -> Option<ValueRef> {
52 let llvm_name = match name {
53 "sqrtf32" => "llvm.sqrt.f32",
54 "sqrtf64" => "llvm.sqrt.f64",
55 "powif32" => "llvm.powi.f32",
56 "powif64" => "llvm.powi.f64",
57 "sinf32" => "llvm.sin.f32",
58 "sinf64" => "llvm.sin.f64",
59 "cosf32" => "llvm.cos.f32",
60 "cosf64" => "llvm.cos.f64",
61 "powf32" => "llvm.pow.f32",
62 "powf64" => "llvm.pow.f64",
63 "expf32" => "llvm.exp.f32",
64 "expf64" => "llvm.exp.f64",
65 "exp2f32" => "llvm.exp2.f32",
66 "exp2f64" => "llvm.exp2.f64",
67 "logf32" => "llvm.log.f32",
68 "logf64" => "llvm.log.f64",
69 "log10f32" => "llvm.log10.f32",
70 "log10f64" => "llvm.log10.f64",
71 "log2f32" => "llvm.log2.f32",
72 "log2f64" => "llvm.log2.f64",
73 "fmaf32" => "llvm.fma.f32",
74 "fmaf64" => "llvm.fma.f64",
75 "fabsf32" => "llvm.fabs.f32",
76 "fabsf64" => "llvm.fabs.f64",
77 "copysignf32" => "llvm.copysign.f32",
78 "copysignf64" => "llvm.copysign.f64",
79 "floorf32" => "llvm.floor.f32",
80 "floorf64" => "llvm.floor.f64",
81 "ceilf32" => "llvm.ceil.f32",
82 "ceilf64" => "llvm.ceil.f64",
83 "truncf32" => "llvm.trunc.f32",
84 "truncf64" => "llvm.trunc.f64",
85 "rintf32" => "llvm.rint.f32",
86 "rintf64" => "llvm.rint.f64",
87 "nearbyintf32" => "llvm.nearbyint.f32",
88 "nearbyintf64" => "llvm.nearbyint.f64",
89 "roundf32" => "llvm.round.f32",
90 "roundf64" => "llvm.round.f64",
91 "assume" => "llvm.assume",
94 Some(ccx.get_intrinsic(&llvm_name))
97 /// Remember to add all intrinsics here, in librustc_typeck/check/mod.rs,
98 /// and in libcore/intrinsics.rs; if you need access to any llvm intrinsics,
99 /// add them to librustc_trans/trans/context.rs
100 pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
103 args: callee::CallArgs<'a, 'tcx>,
105 call_debug_location: DebugLoc)
106 -> Result<'blk, 'tcx> {
111 let _icx = push_ctxt("trans_intrinsic_call");
113 let (def_id, substs, sig) = match callee_ty.sty {
114 ty::TyFnDef(def_id, substs, fty) => {
115 let sig = tcx.erase_late_bound_regions(&fty.sig);
116 (def_id, substs, tcx.normalize_associated_type(&sig))
118 _ => bug!("expected fn item type, found {}", callee_ty)
120 let arg_tys = sig.inputs;
121 let ret_ty = sig.output;
122 let name = tcx.item_name(def_id).as_str();
124 let span = match call_debug_location {
125 DebugLoc::At(_, span) | DebugLoc::ScopeAt(_, span) => span,
127 span_bug!(fcx.span.unwrap_or(DUMMY_SP),
128 "intrinsic `{}` called with missing span", name);
132 let cleanup_scope = fcx.push_custom_cleanup_scope();
134 // For `transmute` we can just trans the input expr directly into dest
135 if name == "transmute" {
136 let llret_ty = type_of::type_of(ccx, ret_ty.unwrap());
138 callee::ArgExprs(arg_exprs) => {
139 assert_eq!(arg_exprs.len(), 1);
141 let (in_type, out_type) = (*substs.types.get(FnSpace, 0),
142 *substs.types.get(FnSpace, 1));
143 let llintype = type_of::type_of(ccx, in_type);
144 let llouttype = type_of::type_of(ccx, out_type);
146 let in_type_size = machine::llbitsize_of_real(ccx, llintype);
147 let out_type_size = machine::llbitsize_of_real(ccx, llouttype);
149 if let ty::TyFnDef(def_id, substs, _) = in_type.sty {
150 if out_type_size != 0 {
151 // FIXME #19925 Remove this hack after a release cycle.
152 let _ = unpack_datum!(bcx, expr::trans(bcx, &arg_exprs[0]));
153 let llfn = Callee::def(ccx, def_id, substs).reify(ccx).val;
154 let llfnty = val_ty(llfn);
155 let llresult = match dest {
156 expr::SaveIn(d) => d,
157 expr::Ignore => alloc_ty(bcx, out_type, "ret")
159 Store(bcx, llfn, PointerCast(bcx, llresult, llfnty.ptr_to()));
160 if dest == expr::Ignore {
161 bcx = glue::drop_ty(bcx, llresult, out_type,
162 call_debug_location);
164 fcx.scopes.borrow_mut().last_mut().unwrap().drop_non_lifetime_clean();
165 fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope);
166 return Result::new(bcx, llresult);
170 // This should be caught by the intrinsicck pass
171 assert_eq!(in_type_size, out_type_size);
173 let nonpointer_nonaggregate = |llkind: TypeKind| -> bool {
174 use llvm::TypeKind::*;
176 Half | Float | Double | X86_FP80 | FP128 |
177 PPC_FP128 | Integer | Vector | X86_MMX => true,
182 // An approximation to which types can be directly cast via
183 // LLVM's bitcast. This doesn't cover pointer -> pointer casts,
184 // but does, importantly, cover SIMD types.
185 let in_kind = llintype.kind();
186 let ret_kind = llret_ty.kind();
187 let bitcast_compatible =
188 (nonpointer_nonaggregate(in_kind) && nonpointer_nonaggregate(ret_kind)) || {
189 in_kind == TypeKind::Pointer && ret_kind == TypeKind::Pointer
192 let dest = if bitcast_compatible {
193 // if we're here, the type is scalar-like (a primitive, a
194 // SIMD type or a pointer), and so can be handled as a
195 // by-value ValueRef and can also be directly bitcast to the
196 // target type. Doing this special case makes conversions
197 // like `u32x4` -> `u64x2` much nicer for LLVM and so more
198 // efficient (these are done efficiently implicitly in C
199 // with the `__m128i` type and so this means Rust doesn't
201 let expr = &arg_exprs[0];
202 let datum = unpack_datum!(bcx, expr::trans(bcx, expr));
203 let datum = unpack_datum!(bcx, datum.to_rvalue_datum(bcx, "transmute_temp"));
204 let val = if datum.kind.is_by_ref() {
205 load_ty(bcx, datum.val, datum.ty)
207 from_immediate(bcx, datum.val)
210 let cast_val = BitCast(bcx, val, llret_ty);
214 // this often occurs in a sequence like `Store(val,
215 // d); val2 = Load(d)`, so disappears easily.
216 Store(bcx, cast_val, d);
222 // The types are too complicated to do with a by-value
223 // bitcast, so pointer cast instead. We need to cast the
224 // dest so the types work out.
225 let dest = match dest {
226 expr::SaveIn(d) => expr::SaveIn(PointerCast(bcx, d, llintype.ptr_to())),
227 expr::Ignore => expr::Ignore
229 bcx = expr::trans_into(bcx, &arg_exprs[0], dest);
233 fcx.scopes.borrow_mut().last_mut().unwrap().drop_non_lifetime_clean();
234 fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope);
237 expr::SaveIn(d) => Result::new(bcx, d),
238 expr::Ignore => Result::new(bcx, C_undef(llret_ty.ptr_to()))
244 bug!("expected expr as argument for transmute");
249 // For `move_val_init` we can evaluate the destination address
250 // (the first argument) and then trans the source value (the
251 // second argument) directly into the resulting destination
253 if name == "move_val_init" {
254 if let callee::ArgExprs(ref exprs) = args {
255 let (dest_expr, source_expr) = if exprs.len() != 2 {
256 bug!("expected two exprs as arguments for `move_val_init` intrinsic");
258 (&exprs[0], &exprs[1])
261 // evaluate destination address
262 let dest_datum = unpack_datum!(bcx, expr::trans(bcx, dest_expr));
263 let dest_datum = unpack_datum!(
264 bcx, dest_datum.to_rvalue_datum(bcx, "arg"));
265 let dest_datum = unpack_datum!(
266 bcx, dest_datum.to_appropriate_datum(bcx));
268 // `expr::trans_into(bcx, expr, dest)` is equiv to
270 // `trans(bcx, expr).store_to_dest(dest)`,
272 // which for `dest == expr::SaveIn(addr)`, is equivalent to:
274 // `trans(bcx, expr).store_to(bcx, addr)`.
275 let lldest = expr::Dest::SaveIn(dest_datum.val);
276 bcx = expr::trans_into(bcx, source_expr, lldest);
278 let llresult = C_nil(ccx);
279 fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope);
281 return Result::new(bcx, llresult);
283 bug!("expected two exprs as arguments for `move_val_init` intrinsic");
287 // save the actual AST arguments for later (some places need to do
288 // const-evaluation on them)
289 let expr_arguments = match args {
290 callee::ArgExprs(args) => Some(args),
294 // Push the arguments.
295 let mut llargs = Vec::new();
296 bcx = callee::trans_args(bcx,
299 &mut callee::Intrinsic,
302 cleanup::CustomScope(cleanup_scope));
304 fcx.scopes.borrow_mut().last_mut().unwrap().drop_non_lifetime_clean();
306 // These are the only intrinsic functions that diverge.
308 let llfn = ccx.get_intrinsic(&("llvm.trap"));
309 Call(bcx, llfn, &[], call_debug_location);
310 fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope);
312 return Result::new(bcx, C_undef(Type::nil(ccx).ptr_to()));
313 } else if &name[..] == "unreachable" {
314 fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope);
316 return Result::new(bcx, C_nil(ccx));
319 let ret_ty = match ret_ty {
320 ty::FnConverging(ret_ty) => ret_ty,
321 ty::FnDiverging => bug!()
324 let llret_ty = type_of::type_of(ccx, ret_ty);
326 // Get location to store the result. If the user does
327 // not care about the result, just make a stack slot
328 let llresult = match dest {
329 expr::SaveIn(d) => d,
331 if !type_is_zero_size(ccx, ret_ty) {
332 let llresult = alloc_ty(bcx, ret_ty, "intrinsic_result");
333 call_lifetime_start(bcx, llresult);
336 C_undef(llret_ty.ptr_to())
341 let simple = get_simple_intrinsic(ccx, &name);
342 let llval = match (simple, &name[..]) {
344 Call(bcx, llfn, &llargs, call_debug_location)
347 bcx = try_intrinsic(bcx, llargs[0], llargs[1], llargs[2], llresult,
348 call_debug_location);
351 (_, "breakpoint") => {
352 let llfn = ccx.get_intrinsic(&("llvm.debugtrap"));
353 Call(bcx, llfn, &[], call_debug_location)
356 let tp_ty = *substs.types.get(FnSpace, 0);
357 let lltp_ty = type_of::type_of(ccx, tp_ty);
358 C_uint(ccx, machine::llsize_of_alloc(ccx, lltp_ty))
360 (_, "size_of_val") => {
361 let tp_ty = *substs.types.get(FnSpace, 0);
362 if !type_is_sized(tcx, tp_ty) {
364 glue::size_and_align_of_dst(&bcx.build(), tp_ty, llargs[1]);
367 let lltp_ty = type_of::type_of(ccx, tp_ty);
368 C_uint(ccx, machine::llsize_of_alloc(ccx, lltp_ty))
371 (_, "min_align_of") => {
372 let tp_ty = *substs.types.get(FnSpace, 0);
373 C_uint(ccx, type_of::align_of(ccx, tp_ty))
375 (_, "min_align_of_val") => {
376 let tp_ty = *substs.types.get(FnSpace, 0);
377 if !type_is_sized(tcx, tp_ty) {
379 glue::size_and_align_of_dst(&bcx.build(), tp_ty, llargs[1]);
382 C_uint(ccx, type_of::align_of(ccx, tp_ty))
385 (_, "pref_align_of") => {
386 let tp_ty = *substs.types.get(FnSpace, 0);
387 let lltp_ty = type_of::type_of(ccx, tp_ty);
388 C_uint(ccx, machine::llalign_of_pref(ccx, lltp_ty))
390 (_, "drop_in_place") => {
391 let tp_ty = *substs.types.get(FnSpace, 0);
392 let ptr = if type_is_sized(tcx, tp_ty) {
395 let scratch = rvalue_scratch_datum(bcx, tp_ty, "tmp");
396 Store(bcx, llargs[0], expr::get_dataptr(bcx, scratch.val));
397 Store(bcx, llargs[1], expr::get_meta(bcx, scratch.val));
398 fcx.schedule_lifetime_end(cleanup::CustomScope(cleanup_scope), scratch.val);
401 glue::drop_ty(bcx, ptr, tp_ty, call_debug_location);
404 (_, "type_name") => {
405 let tp_ty = *substs.types.get(FnSpace, 0);
406 let ty_name = token::intern_and_get_ident(&tp_ty.to_string());
407 C_str_slice(ccx, ty_name)
410 let hash = ccx.tcx().hash_crate_independent(*substs.types.get(FnSpace, 0),
411 &ccx.link_meta().crate_hash);
414 (_, "init_dropped") => {
415 let tp_ty = *substs.types.get(FnSpace, 0);
416 if !type_is_zero_size(ccx, tp_ty) {
417 drop_done_fill_mem(bcx, llresult, tp_ty);
422 let tp_ty = *substs.types.get(FnSpace, 0);
423 if !type_is_zero_size(ccx, tp_ty) {
424 // Just zero out the stack slot. (See comment on base::memzero for explanation)
425 init_zero_mem(bcx, llresult, tp_ty);
429 // Effectively no-ops
430 (_, "uninit") | (_, "forget") => {
433 (_, "needs_drop") => {
434 let tp_ty = *substs.types.get(FnSpace, 0);
436 C_bool(ccx, bcx.fcx.type_needs_drop(tp_ty))
440 let offset = llargs[1];
441 InBoundsGEP(bcx, ptr, &[offset])
443 (_, "arith_offset") => {
445 let offset = llargs[1];
446 GEP(bcx, ptr, &[offset])
449 (_, "copy_nonoverlapping") => {
453 *substs.types.get(FnSpace, 0),
463 *substs.types.get(FnSpace, 0),
469 (_, "write_bytes") => {
470 memset_intrinsic(bcx,
472 *substs.types.get(FnSpace, 0),
479 (_, "volatile_copy_nonoverlapping_memory") => {
483 *substs.types.get(FnSpace, 0),
489 (_, "volatile_copy_memory") => {
493 *substs.types.get(FnSpace, 0),
499 (_, "volatile_set_memory") => {
500 memset_intrinsic(bcx,
502 *substs.types.get(FnSpace, 0),
508 (_, "volatile_load") => {
509 let tp_ty = *substs.types.get(FnSpace, 0);
510 let mut ptr = llargs[0];
511 if let Some(ty) = fn_ty.ret.cast {
512 ptr = PointerCast(bcx, ptr, ty.ptr_to());
514 let load = VolatileLoad(bcx, ptr);
516 llvm::LLVMSetAlignment(load, type_of::align_of(ccx, tp_ty));
518 to_immediate(bcx, load, tp_ty)
520 (_, "volatile_store") => {
521 let tp_ty = *substs.types.get(FnSpace, 0);
522 if type_is_fat_ptr(bcx.tcx(), tp_ty) {
523 VolatileStore(bcx, llargs[1], expr::get_dataptr(bcx, llargs[0]));
524 VolatileStore(bcx, llargs[2], expr::get_meta(bcx, llargs[0]));
526 let val = if fn_ty.args[1].is_indirect() {
529 from_immediate(bcx, llargs[1])
531 let ptr = PointerCast(bcx, llargs[0], val_ty(val).ptr_to());
532 let store = VolatileStore(bcx, val, ptr);
534 llvm::LLVMSetAlignment(store, type_of::align_of(ccx, tp_ty));
540 (_, "ctlz") | (_, "cttz") | (_, "ctpop") | (_, "bswap") |
541 (_, "add_with_overflow") | (_, "sub_with_overflow") | (_, "mul_with_overflow") |
542 (_, "overflowing_add") | (_, "overflowing_sub") | (_, "overflowing_mul") |
543 (_, "unchecked_div") | (_, "unchecked_rem") => {
544 let sty = &arg_tys[0].sty;
545 match int_type_width_signed(sty, ccx) {
546 Some((width, signed)) =>
548 "ctlz" => count_zeros_intrinsic(bcx, &format!("llvm.ctlz.i{}", width),
549 llargs[0], call_debug_location),
550 "cttz" => count_zeros_intrinsic(bcx, &format!("llvm.cttz.i{}", width),
551 llargs[0], call_debug_location),
552 "ctpop" => Call(bcx, ccx.get_intrinsic(&format!("llvm.ctpop.i{}", width)),
553 &llargs, call_debug_location),
556 llargs[0] // byte swap a u8/i8 is just a no-op
558 Call(bcx, ccx.get_intrinsic(&format!("llvm.bswap.i{}", width)),
559 &llargs, call_debug_location)
562 "add_with_overflow" | "sub_with_overflow" | "mul_with_overflow" => {
563 let intrinsic = format!("llvm.{}{}.with.overflow.i{}",
564 if signed { 's' } else { 'u' },
566 with_overflow_intrinsic(bcx, &intrinsic, llargs[0], llargs[1], llresult,
569 "overflowing_add" => Add(bcx, llargs[0], llargs[1], call_debug_location),
570 "overflowing_sub" => Sub(bcx, llargs[0], llargs[1], call_debug_location),
571 "overflowing_mul" => Mul(bcx, llargs[0], llargs[1], call_debug_location),
574 SDiv(bcx, llargs[0], llargs[1], call_debug_location)
576 UDiv(bcx, llargs[0], llargs[1], call_debug_location)
580 SRem(bcx, llargs[0], llargs[1], call_debug_location)
582 URem(bcx, llargs[0], llargs[1], call_debug_location)
587 span_invalid_monomorphization_error(
589 &format!("invalid monomorphization of `{}` intrinsic: \
590 expected basic integer type, found `{}`", name, sty));
596 (_, "fadd_fast") | (_, "fsub_fast") | (_, "fmul_fast") | (_, "fdiv_fast") |
597 (_, "frem_fast") => {
598 let sty = &arg_tys[0].sty;
599 match float_type_width(sty) {
602 "fadd_fast" => FAddFast(bcx, llargs[0], llargs[1], call_debug_location),
603 "fsub_fast" => FSubFast(bcx, llargs[0], llargs[1], call_debug_location),
604 "fmul_fast" => FMulFast(bcx, llargs[0], llargs[1], call_debug_location),
605 "fdiv_fast" => FDivFast(bcx, llargs[0], llargs[1], call_debug_location),
606 "frem_fast" => FRemFast(bcx, llargs[0], llargs[1], call_debug_location),
610 span_invalid_monomorphization_error(
612 &format!("invalid monomorphization of `{}` intrinsic: \
613 expected basic float type, found `{}`", name, sty));
620 (_, "discriminant_value") => {
621 let val_ty = substs.types.get(FnSpace, 0);
624 let repr = adt::represent_type(ccx, *val_ty);
625 adt::trans_get_discr(bcx, &repr, llargs[0],
626 Some(llret_ty), true)
628 _ => C_null(llret_ty)
631 (_, name) if name.starts_with("simd_") => {
632 generic_simd_intrinsic(bcx, name,
641 // This requires that atomic intrinsics follow a specific naming pattern:
642 // "atomic_<operation>[_<ordering>]", and no ordering means SeqCst
643 (_, name) if name.starts_with("atomic_") => {
644 let split: Vec<&str> = name.split('_').collect();
646 let is_cxchg = split[1] == "cxchg" || split[1] == "cxchgweak";
647 let (order, failorder) = match split.len() {
648 2 => (llvm::SequentiallyConsistent, llvm::SequentiallyConsistent),
649 3 => match split[2] {
650 "unordered" => (llvm::Unordered, llvm::Unordered),
651 "relaxed" => (llvm::Monotonic, llvm::Monotonic),
652 "acq" => (llvm::Acquire, llvm::Acquire),
653 "rel" => (llvm::Release, llvm::Monotonic),
654 "acqrel" => (llvm::AcquireRelease, llvm::Acquire),
655 "failrelaxed" if is_cxchg =>
656 (llvm::SequentiallyConsistent, llvm::Monotonic),
657 "failacq" if is_cxchg =>
658 (llvm::SequentiallyConsistent, llvm::Acquire),
659 _ => ccx.sess().fatal("unknown ordering in atomic intrinsic")
661 4 => match (split[2], split[3]) {
662 ("acq", "failrelaxed") if is_cxchg =>
663 (llvm::Acquire, llvm::Monotonic),
664 ("acqrel", "failrelaxed") if is_cxchg =>
665 (llvm::AcquireRelease, llvm::Monotonic),
666 _ => ccx.sess().fatal("unknown ordering in atomic intrinsic")
668 _ => ccx.sess().fatal("Atomic intrinsic not in correct format"),
672 "cxchg" | "cxchgweak" => {
673 let sty = &substs.types.get(FnSpace, 0).sty;
674 if int_type_width_signed(sty, ccx).is_some() {
675 let weak = if split[1] == "cxchgweak" { llvm::True } else { llvm::False };
676 let val = AtomicCmpXchg(bcx, llargs[0], llargs[1], llargs[2],
677 order, failorder, weak);
678 let result = ExtractValue(bcx, val, 0);
679 let success = ZExt(bcx, ExtractValue(bcx, val, 1), Type::bool(bcx.ccx()));
680 Store(bcx, result, StructGEP(bcx, llresult, 0));
681 Store(bcx, success, StructGEP(bcx, llresult, 1));
683 span_invalid_monomorphization_error(
685 &format!("invalid monomorphization of `{}` intrinsic: \
686 expected basic integer type, found `{}`", name, sty));
692 let sty = &substs.types.get(FnSpace, 0).sty;
693 if int_type_width_signed(sty, ccx).is_some() {
694 AtomicLoad(bcx, llargs[0], order)
696 span_invalid_monomorphization_error(
698 &format!("invalid monomorphization of `{}` intrinsic: \
699 expected basic integer type, found `{}`", name, sty));
705 let sty = &substs.types.get(FnSpace, 0).sty;
706 if int_type_width_signed(sty, ccx).is_some() {
707 AtomicStore(bcx, llargs[1], llargs[0], order);
709 span_invalid_monomorphization_error(
711 &format!("invalid monomorphization of `{}` intrinsic: \
712 expected basic integer type, found `{}`", name, sty));
718 AtomicFence(bcx, order, llvm::CrossThread);
722 "singlethreadfence" => {
723 AtomicFence(bcx, order, llvm::SingleThread);
727 // These are all AtomicRMW ops
729 let atom_op = match op {
730 "xchg" => llvm::AtomicXchg,
731 "xadd" => llvm::AtomicAdd,
732 "xsub" => llvm::AtomicSub,
733 "and" => llvm::AtomicAnd,
734 "nand" => llvm::AtomicNand,
735 "or" => llvm::AtomicOr,
736 "xor" => llvm::AtomicXor,
737 "max" => llvm::AtomicMax,
738 "min" => llvm::AtomicMin,
739 "umax" => llvm::AtomicUMax,
740 "umin" => llvm::AtomicUMin,
741 _ => ccx.sess().fatal("unknown atomic operation")
744 let sty = &substs.types.get(FnSpace, 0).sty;
745 if int_type_width_signed(sty, ccx).is_some() {
746 AtomicRMW(bcx, atom_op, llargs[0], llargs[1], order)
748 span_invalid_monomorphization_error(
750 &format!("invalid monomorphization of `{}` intrinsic: \
751 expected basic integer type, found `{}`", name, sty));
760 let intr = match Intrinsic::find(&name) {
762 None => bug!("unknown intrinsic '{}'", name),
764 fn one<T>(x: Vec<T>) -> T {
765 assert_eq!(x.len(), 1);
766 x.into_iter().next().unwrap()
768 fn ty_to_type(ccx: &CrateContext, t: &intrinsics::Type,
769 any_changes_needed: &mut bool) -> Vec<Type> {
770 use intrinsics::Type::*;
772 Void => vec![Type::void(ccx)],
773 Integer(_signed, width, llvm_width) => {
774 *any_changes_needed |= width != llvm_width;
775 vec![Type::ix(ccx, llvm_width as u64)]
779 32 => vec![Type::f32(ccx)],
780 64 => vec![Type::f64(ccx)],
784 Pointer(ref t, ref llvm_elem, _const) => {
785 *any_changes_needed |= llvm_elem.is_some();
787 let t = llvm_elem.as_ref().unwrap_or(t);
788 let elem = one(ty_to_type(ccx, t,
789 any_changes_needed));
792 Vector(ref t, ref llvm_elem, length) => {
793 *any_changes_needed |= llvm_elem.is_some();
795 let t = llvm_elem.as_ref().unwrap_or(t);
796 let elem = one(ty_to_type(ccx, t,
797 any_changes_needed));
798 vec![Type::vector(&elem,
801 Aggregate(false, ref contents) => {
802 let elems = contents.iter()
803 .map(|t| one(ty_to_type(ccx, t, any_changes_needed)))
804 .collect::<Vec<_>>();
805 vec![Type::struct_(ccx, &elems, false)]
807 Aggregate(true, ref contents) => {
808 *any_changes_needed = true;
810 .flat_map(|t| ty_to_type(ccx, t, any_changes_needed))
816 // This allows an argument list like `foo, (bar, baz),
817 // qux` to be converted into `foo, bar, baz, qux`, integer
818 // arguments to be truncated as needed and pointers to be
820 fn modify_as_needed<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
821 t: &intrinsics::Type,
827 intrinsics::Type::Aggregate(true, ref contents) => {
828 // We found a tuple that needs squishing! So
829 // run over the tuple and load each field.
831 // This assumes the type is "simple", i.e. no
832 // destructors, and the contents are SIMD
834 assert!(!bcx.fcx.type_needs_drop(arg_type));
836 let repr = adt::represent_type(bcx.ccx(), arg_type);
837 let repr_ptr = &repr;
838 let arg = adt::MaybeSizedValue::sized(llarg);
841 Load(bcx, adt::trans_field_ptr(bcx, repr_ptr, arg, Disr(0), i))
845 intrinsics::Type::Pointer(_, Some(ref llvm_elem), _) => {
846 let llvm_elem = one(ty_to_type(bcx.ccx(), llvm_elem, &mut false));
847 vec![PointerCast(bcx, llarg,
850 intrinsics::Type::Vector(_, Some(ref llvm_elem), length) => {
851 let llvm_elem = one(ty_to_type(bcx.ccx(), llvm_elem, &mut false));
852 vec![BitCast(bcx, llarg,
853 Type::vector(&llvm_elem, length as u64))]
855 intrinsics::Type::Integer(_, width, llvm_width) if width != llvm_width => {
856 // the LLVM intrinsic uses a smaller integer
857 // size than the C intrinsic's signature, so
858 // we have to trim it down here.
859 vec![Trunc(bcx, llarg, Type::ix(bcx.ccx(), llvm_width as u64))]
866 let mut any_changes_needed = false;
867 let inputs = intr.inputs.iter()
868 .flat_map(|t| ty_to_type(ccx, t, &mut any_changes_needed))
869 .collect::<Vec<_>>();
871 let mut out_changes = false;
872 let outputs = one(ty_to_type(ccx, &intr.output, &mut out_changes));
873 // outputting a flattened aggregate is nonsense
874 assert!(!out_changes);
876 let llargs = if !any_changes_needed {
877 // no aggregates to flatten, so no change needed
880 // there are some aggregates that need to be flattened
881 // in the LLVM call, so we need to run over the types
882 // again to find them and extract the arguments
886 .flat_map(|((t, llarg), ty)| modify_as_needed(bcx, t, ty, *llarg))
889 assert_eq!(inputs.len(), llargs.len());
891 let val = match intr.definition {
892 intrinsics::IntrinsicDef::Named(name) => {
893 let f = declare::declare_cfn(ccx,
895 Type::func(&inputs, &outputs));
896 Call(bcx, f, &llargs, call_debug_location)
901 intrinsics::Type::Aggregate(flatten, ref elems) => {
902 // the output is a tuple so we need to munge it properly
905 for i in 0..elems.len() {
906 let val = ExtractValue(bcx, val, i);
907 Store(bcx, val, StructGEP(bcx, llresult, i));
916 if val_ty(llval) != Type::void(ccx) &&
917 machine::llsize_of_alloc(ccx, val_ty(llval)) != 0 {
918 if let Some(ty) = fn_ty.ret.cast {
919 let ptr = PointerCast(bcx, llresult, ty.ptr_to());
920 let store = Store(bcx, llval, ptr);
922 llvm::LLVMSetAlignment(store, type_of::align_of(ccx, ret_ty));
925 store_ty(bcx, llval, llresult, ret_ty);
929 // If we made a temporary stack slot, let's clean it up
932 bcx = glue::drop_ty(bcx, llresult, ret_ty, call_debug_location);
933 call_lifetime_end(bcx, llresult);
935 expr::SaveIn(_) => {}
938 fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope);
940 Result::new(bcx, llresult)
943 fn copy_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
950 call_debug_location: DebugLoc)
953 let lltp_ty = type_of::type_of(ccx, tp_ty);
954 let align = C_i32(ccx, type_of::align_of(ccx, tp_ty) as i32);
955 let size = machine::llsize_of(ccx, lltp_ty);
956 let int_size = machine::llbitsize_of_real(ccx, ccx.int_type());
958 let operation = if allow_overlap {
964 let name = format!("llvm.{}.p0i8.p0i8.i{}", operation, int_size);
966 let dst_ptr = PointerCast(bcx, dst, Type::i8p(ccx));
967 let src_ptr = PointerCast(bcx, src, Type::i8p(ccx));
968 let llfn = ccx.get_intrinsic(&name);
974 Mul(bcx, size, count, DebugLoc::None),
976 C_bool(ccx, volatile)],
980 fn memset_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
986 call_debug_location: DebugLoc)
989 let lltp_ty = type_of::type_of(ccx, tp_ty);
990 let align = C_i32(ccx, type_of::align_of(ccx, tp_ty) as i32);
991 let size = machine::llsize_of(ccx, lltp_ty);
992 let int_size = machine::llbitsize_of_real(ccx, ccx.int_type());
994 let name = format!("llvm.memset.p0i8.i{}", int_size);
996 let dst_ptr = PointerCast(bcx, dst, Type::i8p(ccx));
997 let llfn = ccx.get_intrinsic(&name);
1003 Mul(bcx, size, count, DebugLoc::None),
1005 C_bool(ccx, volatile)],
1006 call_debug_location)
1009 fn count_zeros_intrinsic(bcx: Block,
1012 call_debug_location: DebugLoc)
1014 let y = C_bool(bcx.ccx(), false);
1015 let llfn = bcx.ccx().get_intrinsic(&name);
1016 Call(bcx, llfn, &[val, y], call_debug_location)
1019 fn with_overflow_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1024 call_debug_location: DebugLoc)
1026 let llfn = bcx.ccx().get_intrinsic(&name);
1028 // Convert `i1` to a `bool`, and write it to the out parameter
1029 let val = Call(bcx, llfn, &[a, b], call_debug_location);
1030 let result = ExtractValue(bcx, val, 0);
1031 let overflow = ZExt(bcx, ExtractValue(bcx, val, 1), Type::bool(bcx.ccx()));
1032 Store(bcx, result, StructGEP(bcx, out, 0));
1033 Store(bcx, overflow, StructGEP(bcx, out, 1));
1038 fn try_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1041 local_ptr: ValueRef,
1043 dloc: DebugLoc) -> Block<'blk, 'tcx> {
1044 if bcx.sess().no_landing_pads() {
1045 Call(bcx, func, &[data], dloc);
1046 Store(bcx, C_null(Type::i8p(bcx.ccx())), dest);
1048 } else if wants_msvc_seh(bcx.sess()) {
1049 trans_msvc_try(bcx, func, data, local_ptr, dest, dloc)
1051 trans_gnu_try(bcx, func, data, local_ptr, dest, dloc)
1055 // MSVC's definition of the `rust_try` function.
1057 // This implementation uses the new exception handling instructions in LLVM
1058 // which have support in LLVM for SEH on MSVC targets. Although these
1059 // instructions are meant to work for all targets, as of the time of this
1060 // writing, however, LLVM does not recommend the usage of these new instructions
1061 // as the old ones are still more optimized.
1062 fn trans_msvc_try<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1065 local_ptr: ValueRef,
1067 dloc: DebugLoc) -> Block<'blk, 'tcx> {
1068 let llfn = get_rust_try_fn(bcx.fcx, &mut |bcx| {
1069 let ccx = bcx.ccx();
1070 let dloc = DebugLoc::None;
1072 SetPersonalityFn(bcx, bcx.fcx.eh_personality());
1074 let normal = bcx.fcx.new_temp_block("normal");
1075 let catchswitch = bcx.fcx.new_temp_block("catchswitch");
1076 let catchpad = bcx.fcx.new_temp_block("catchpad");
1077 let caught = bcx.fcx.new_temp_block("caught");
1079 let func = llvm::get_param(bcx.fcx.llfn, 0);
1080 let data = llvm::get_param(bcx.fcx.llfn, 1);
1081 let local_ptr = llvm::get_param(bcx.fcx.llfn, 2);
1083 // We're generating an IR snippet that looks like:
1085 // declare i32 @rust_try(%func, %data, %ptr) {
1086 // %slot = alloca i64*
1087 // invoke %func(%data) to label %normal unwind label %catchswitch
1093 // %cs = catchswitch within none [%catchpad] unwind to caller
1096 // %tok = catchpad within %cs [%type_descriptor, 0, %slot]
1097 // %ptr[0] = %slot[0]
1098 // %ptr[1] = %slot[1]
1099 // catchret from %tok to label %caught
1105 // This structure follows the basic usage of throw/try/catch in LLVM.
1106 // For example, compile this C++ snippet to see what LLVM generates:
1108 // #include <stdint.h>
1110 // int bar(void (*foo)(void), uint64_t *ret) {
1114 // } catch(uint64_t a[2]) {
1121 // More information can be found in libstd's seh.rs implementation.
1122 let i64p = Type::i64(ccx).ptr_to();
1123 let slot = Alloca(bcx, i64p, "slot");
1124 Invoke(bcx, func, &[data], normal.llbb, catchswitch.llbb, dloc);
1126 Ret(normal, C_i32(ccx, 0), dloc);
1128 let cs = CatchSwitch(catchswitch, None, None, 1);
1129 AddHandler(catchswitch, cs, catchpad.llbb);
1131 let tcx = ccx.tcx();
1132 let tydesc = match tcx.lang_items.msvc_try_filter() {
1133 Some(did) => ::consts::get_static(ccx, did).to_llref(),
1134 None => bug!("msvc_try_filter not defined"),
1136 let tok = CatchPad(catchpad, cs, &[tydesc, C_i32(ccx, 0), slot]);
1137 let addr = Load(catchpad, slot);
1138 let arg1 = Load(catchpad, addr);
1139 let val1 = C_i32(ccx, 1);
1140 let arg2 = Load(catchpad, InBoundsGEP(catchpad, addr, &[val1]));
1141 let local_ptr = BitCast(catchpad, local_ptr, i64p);
1142 Store(catchpad, arg1, local_ptr);
1143 Store(catchpad, arg2, InBoundsGEP(catchpad, local_ptr, &[val1]));
1144 CatchRet(catchpad, tok, caught.llbb);
1146 Ret(caught, C_i32(ccx, 1), dloc);
1149 // Note that no invoke is used here because by definition this function
1150 // can't panic (that's what it's catching).
1151 let ret = Call(bcx, llfn, &[func, data, local_ptr], dloc);
1152 Store(bcx, ret, dest);
1156 // Definition of the standard "try" function for Rust using the GNU-like model
1157 // of exceptions (e.g. the normal semantics of LLVM's landingpad and invoke
1160 // This translation is a little surprising because we always call a shim
1161 // function instead of inlining the call to `invoke` manually here. This is done
1162 // because in LLVM we're only allowed to have one personality per function
1163 // definition. The call to the `try` intrinsic is being inlined into the
1164 // function calling it, and that function may already have other personality
1165 // functions in play. By calling a shim we're guaranteed that our shim will have
1166 // the right personality function.
1167 fn trans_gnu_try<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1170 local_ptr: ValueRef,
1172 dloc: DebugLoc) -> Block<'blk, 'tcx> {
1173 let llfn = get_rust_try_fn(bcx.fcx, &mut |bcx| {
1174 let ccx = bcx.ccx();
1175 let tcx = ccx.tcx();
1176 let dloc = DebugLoc::None;
1178 // Translates the shims described above:
1181 // invoke %func(%args...) normal %normal unwind %catch
1187 // (ptr, _) = landingpad
1188 // store ptr, %local_ptr
1191 // Note that the `local_ptr` data passed into the `try` intrinsic is
1192 // expected to be `*mut *mut u8` for this to actually work, but that's
1193 // managed by the standard library.
1195 attributes::emit_uwtable(bcx.fcx.llfn, true);
1196 let catch_pers = match tcx.lang_items.eh_personality_catch() {
1198 Callee::def(ccx, did, tcx.mk_substs(Substs::empty())).reify(ccx).val
1200 None => bug!("eh_personality_catch not defined"),
1203 let then = bcx.fcx.new_temp_block("then");
1204 let catch = bcx.fcx.new_temp_block("catch");
1206 let func = llvm::get_param(bcx.fcx.llfn, 0);
1207 let data = llvm::get_param(bcx.fcx.llfn, 1);
1208 let local_ptr = llvm::get_param(bcx.fcx.llfn, 2);
1209 Invoke(bcx, func, &[data], then.llbb, catch.llbb, dloc);
1210 Ret(then, C_i32(ccx, 0), dloc);
1212 // Type indicator for the exception being thrown.
1214 // The first value in this tuple is a pointer to the exception object
1215 // being thrown. The second value is a "selector" indicating which of
1216 // the landing pad clauses the exception's type had been matched to.
1217 // rust_try ignores the selector.
1218 let lpad_ty = Type::struct_(ccx, &[Type::i8p(ccx), Type::i32(ccx)],
1220 let vals = LandingPad(catch, lpad_ty, catch_pers, 1);
1221 AddClause(catch, vals, C_null(Type::i8p(ccx)));
1222 let ptr = ExtractValue(catch, vals, 0);
1223 Store(catch, ptr, BitCast(catch, local_ptr, Type::i8p(ccx).ptr_to()));
1224 Ret(catch, C_i32(ccx, 1), dloc);
1227 // Note that no invoke is used here because by definition this function
1228 // can't panic (that's what it's catching).
1229 let ret = Call(bcx, llfn, &[func, data, local_ptr], dloc);
1230 Store(bcx, ret, dest);
1234 // Helper function to give a Block to a closure to translate a shim function.
1235 // This is currently primarily used for the `try` intrinsic functions above.
1236 fn gen_fn<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>,
1238 inputs: Vec<Ty<'tcx>>,
1239 output: ty::FnOutput<'tcx>,
1240 trans: &mut for<'b> FnMut(Block<'b, 'tcx>))
1243 let sig = ty::FnSig {
1248 let fn_ty = FnType::new(ccx, Abi::Rust, &sig, &[]);
1250 let rust_fn_ty = ccx.tcx().mk_fn_ptr(ccx.tcx().mk_bare_fn(ty::BareFnTy {
1251 unsafety: hir::Unsafety::Unsafe,
1253 sig: ty::Binder(sig)
1255 let llfn = declare::define_internal_fn(ccx, name, rust_fn_ty);
1256 let (fcx, block_arena);
1257 block_arena = TypedArena::new();
1258 fcx = FunctionContext::new(ccx, llfn, fn_ty, None, &block_arena);
1259 let bcx = fcx.init(true, None);
1265 // Helper function used to get a handle to the `__rust_try` function used to
1266 // catch exceptions.
1268 // This function is only generated once and is then cached.
1269 fn get_rust_try_fn<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>,
1270 trans: &mut for<'b> FnMut(Block<'b, 'tcx>))
1273 if let Some(llfn) = ccx.rust_try_fn().get() {
1277 // Define the type up front for the signature of the rust_try function.
1278 let tcx = ccx.tcx();
1279 let i8p = tcx.mk_mut_ptr(tcx.types.i8);
1280 let fn_ty = tcx.mk_fn_ptr(tcx.mk_bare_fn(ty::BareFnTy {
1281 unsafety: hir::Unsafety::Unsafe,
1283 sig: ty::Binder(ty::FnSig {
1285 output: ty::FnOutput::FnConverging(tcx.mk_nil()),
1289 let output = ty::FnOutput::FnConverging(tcx.types.i32);
1290 let rust_try = gen_fn(fcx, "__rust_try", vec![fn_ty, i8p, i8p], output, trans);
1291 ccx.rust_try_fn().set(Some(rust_try));
1295 fn span_invalid_monomorphization_error(a: &Session, b: Span, c: &str) {
1296 span_err!(a, b, E0511, "{}", c);
1299 fn generic_simd_intrinsic<'blk, 'tcx, 'a>
1300 (bcx: Block<'blk, 'tcx>,
1302 substs: &'tcx subst::Substs<'tcx>,
1303 callee_ty: Ty<'tcx>,
1304 args: Option<&[P<hir::Expr>]>,
1305 llargs: &[ValueRef],
1308 call_debug_location: DebugLoc,
1309 span: Span) -> ValueRef
1311 // macros for error handling:
1312 macro_rules! emit_error {
1316 ($msg: tt, $($fmt: tt)*) => {
1317 span_invalid_monomorphization_error(
1319 &format!(concat!("invalid monomorphization of `{}` intrinsic: ",
1324 macro_rules! require {
1325 ($cond: expr, $($fmt: tt)*) => {
1327 emit_error!($($fmt)*);
1328 return C_nil(bcx.ccx())
1332 macro_rules! require_simd {
1333 ($ty: expr, $position: expr) => {
1334 require!($ty.is_simd(), "expected SIMD {} type, found non-SIMD `{}`", $position, $ty)
1340 let tcx = bcx.tcx();
1341 let sig = tcx.erase_late_bound_regions(callee_ty.fn_sig());
1342 let sig = tcx.normalize_associated_type(&sig);
1343 let arg_tys = sig.inputs;
1345 // every intrinsic takes a SIMD vector as its first argument
1346 require_simd!(arg_tys[0], "input");
1347 let in_ty = arg_tys[0];
1348 let in_elem = arg_tys[0].simd_type(tcx);
1349 let in_len = arg_tys[0].simd_size(tcx);
1351 let comparison = match name {
1352 "simd_eq" => Some(hir::BiEq),
1353 "simd_ne" => Some(hir::BiNe),
1354 "simd_lt" => Some(hir::BiLt),
1355 "simd_le" => Some(hir::BiLe),
1356 "simd_gt" => Some(hir::BiGt),
1357 "simd_ge" => Some(hir::BiGe),
1361 if let Some(cmp_op) = comparison {
1362 require_simd!(ret_ty, "return");
1364 let out_len = ret_ty.simd_size(tcx);
1365 require!(in_len == out_len,
1366 "expected return type with length {} (same as input type `{}`), \
1367 found `{}` with length {}",
1370 require!(llret_ty.element_type().kind() == llvm::Integer,
1371 "expected return type with integer elements, found `{}` with non-integer `{}`",
1373 ret_ty.simd_type(tcx));
1375 return compare_simd_types(bcx,
1381 call_debug_location)
1384 if name.starts_with("simd_shuffle") {
1385 let n: usize = match name["simd_shuffle".len()..].parse() {
1387 Err(_) => span_bug!(span,
1388 "bad `simd_shuffle` instruction only caught in trans?")
1391 require_simd!(ret_ty, "return");
1393 let out_len = ret_ty.simd_size(tcx);
1394 require!(out_len == n,
1395 "expected return type of length {}, found `{}` with length {}",
1396 n, ret_ty, out_len);
1397 require!(in_elem == ret_ty.simd_type(tcx),
1398 "expected return element type `{}` (element of input `{}`), \
1399 found `{}` with element type `{}`",
1401 ret_ty, ret_ty.simd_type(tcx));
1403 let total_len = in_len as u64 * 2;
1405 let vector = match args {
1407 match consts::const_expr(bcx.ccx(), &args[2], substs, None,
1408 // this should probably help simd error reporting
1409 consts::TrueConst::Yes) {
1410 Ok((vector, _)) => vector,
1411 Err(err) => bcx.sess().span_fatal(span, &err.description()),
1417 let indices: Option<Vec<_>> = (0..n)
1420 let val = const_get_elt(vector, &[i as libc::c_uint]);
1421 match const_to_opt_uint(val) {
1423 emit_error!("shuffle index #{} is not a constant", arg_idx);
1426 Some(idx) if idx >= total_len => {
1427 emit_error!("shuffle index #{} is out of bounds (limit {})",
1428 arg_idx, total_len);
1431 Some(idx) => Some(C_i32(bcx.ccx(), idx as i32)),
1435 let indices = match indices {
1437 None => return C_null(llret_ty)
1440 return ShuffleVector(bcx, llargs[0], llargs[1], C_vector(&indices))
1443 if name == "simd_insert" {
1444 require!(in_elem == arg_tys[2],
1445 "expected inserted type `{}` (element of input `{}`), found `{}`",
1446 in_elem, in_ty, arg_tys[2]);
1447 return InsertElement(bcx, llargs[0], llargs[2], llargs[1])
1449 if name == "simd_extract" {
1450 require!(ret_ty == in_elem,
1451 "expected return type `{}` (element of input `{}`), found `{}`",
1452 in_elem, in_ty, ret_ty);
1453 return ExtractElement(bcx, llargs[0], llargs[1])
1456 if name == "simd_cast" {
1457 require_simd!(ret_ty, "return");
1458 let out_len = ret_ty.simd_size(tcx);
1459 require!(in_len == out_len,
1460 "expected return type with length {} (same as input type `{}`), \
1461 found `{}` with length {}",
1464 // casting cares about nominal type, not just structural type
1465 let out_elem = ret_ty.simd_type(tcx);
1467 if in_elem == out_elem { return llargs[0]; }
1469 enum Style { Float, Int(/* is signed? */ bool), Unsupported }
1471 let (in_style, in_width) = match in_elem.sty {
1472 // vectors of pointer-sized integers should've been
1473 // disallowed before here, so this unwrap is safe.
1474 ty::TyInt(i) => (Style::Int(true), i.bit_width().unwrap()),
1475 ty::TyUint(u) => (Style::Int(false), u.bit_width().unwrap()),
1476 ty::TyFloat(f) => (Style::Float, f.bit_width()),
1477 _ => (Style::Unsupported, 0)
1479 let (out_style, out_width) = match out_elem.sty {
1480 ty::TyInt(i) => (Style::Int(true), i.bit_width().unwrap()),
1481 ty::TyUint(u) => (Style::Int(false), u.bit_width().unwrap()),
1482 ty::TyFloat(f) => (Style::Float, f.bit_width()),
1483 _ => (Style::Unsupported, 0)
1486 match (in_style, out_style) {
1487 (Style::Int(in_is_signed), Style::Int(_)) => {
1488 return match in_width.cmp(&out_width) {
1489 Ordering::Greater => Trunc(bcx, llargs[0], llret_ty),
1490 Ordering::Equal => llargs[0],
1491 Ordering::Less => if in_is_signed {
1492 SExt(bcx, llargs[0], llret_ty)
1494 ZExt(bcx, llargs[0], llret_ty)
1498 (Style::Int(in_is_signed), Style::Float) => {
1499 return if in_is_signed {
1500 SIToFP(bcx, llargs[0], llret_ty)
1502 UIToFP(bcx, llargs[0], llret_ty)
1505 (Style::Float, Style::Int(out_is_signed)) => {
1506 return if out_is_signed {
1507 FPToSI(bcx, llargs[0], llret_ty)
1509 FPToUI(bcx, llargs[0], llret_ty)
1512 (Style::Float, Style::Float) => {
1513 return match in_width.cmp(&out_width) {
1514 Ordering::Greater => FPTrunc(bcx, llargs[0], llret_ty),
1515 Ordering::Equal => llargs[0],
1516 Ordering::Less => FPExt(bcx, llargs[0], llret_ty)
1519 _ => {/* Unsupported. Fallthrough. */}
1522 "unsupported cast from `{}` with element `{}` to `{}` with element `{}`",
1526 macro_rules! arith {
1527 ($($name: ident: $($($p: ident),* => $call: expr),*;)*) => {
1529 if name == stringify!($name) {
1533 return $call(bcx, llargs[0], llargs[1], call_debug_location)
1539 "unsupported operation on `{}` with element `{}`",
1546 simd_add: TyUint, TyInt => Add, TyFloat => FAdd;
1547 simd_sub: TyUint, TyInt => Sub, TyFloat => FSub;
1548 simd_mul: TyUint, TyInt => Mul, TyFloat => FMul;
1549 simd_div: TyFloat => FDiv;
1550 simd_shl: TyUint, TyInt => Shl;
1551 simd_shr: TyUint => LShr, TyInt => AShr;
1552 simd_and: TyUint, TyInt => And;
1553 simd_or: TyUint, TyInt => Or;
1554 simd_xor: TyUint, TyInt => Xor;
1556 span_bug!(span, "unknown SIMD intrinsic");
1559 // Returns the width of an int TypeVariant, and if it's signed or not
1560 // Returns None if the type is not an integer
1561 fn int_type_width_signed<'tcx>(sty: &ty::TypeVariants<'tcx>, ccx: &CrateContext)
1562 -> Option<(u64, bool)> {
1563 use rustc::ty::{TyInt, TyUint};
1565 TyInt(t) => Some((match t {
1567 match &ccx.tcx().sess.target.target.target_pointer_width[..] {
1571 tws => bug!("Unsupported target word size for isize: {}", tws),
1574 ast::IntTy::I8 => 8,
1575 ast::IntTy::I16 => 16,
1576 ast::IntTy::I32 => 32,
1577 ast::IntTy::I64 => 64,
1579 TyUint(t) => Some((match t {
1580 ast::UintTy::Us => {
1581 match &ccx.tcx().sess.target.target.target_pointer_width[..] {
1585 tws => bug!("Unsupported target word size for usize: {}", tws),
1588 ast::UintTy::U8 => 8,
1589 ast::UintTy::U16 => 16,
1590 ast::UintTy::U32 => 32,
1591 ast::UintTy::U64 => 64,
1597 // Returns the width of a float TypeVariant
1598 // Returns None if the type is not a float
1599 fn float_type_width<'tcx>(sty: &ty::TypeVariants<'tcx>)
1601 use rustc::ty::TyFloat;
1603 TyFloat(t) => Some(match t {
1604 ast::FloatTy::F32 => 32,
1605 ast::FloatTy::F64 => 64,