]> git.lizzy.rs Git - rust.git/blob - src/librustc_trans/intrinsic.rs
Changed issue number to 36105
[rust.git] / src / librustc_trans / intrinsic.rs
1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
4 //
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10
11 #![allow(non_upper_case_globals)]
12
13 use arena::TypedArena;
14 use intrinsics::{self, Intrinsic};
15 use libc;
16 use llvm;
17 use llvm::{ValueRef, TypeKind};
18 use rustc::ty::subst::Substs;
19 use abi::{Abi, FnType};
20 use adt;
21 use base::*;
22 use build::*;
23 use callee::{self, Callee};
24 use cleanup;
25 use cleanup::CleanupMethods;
26 use common::*;
27 use consts;
28 use datum::*;
29 use debuginfo::DebugLoc;
30 use declare;
31 use expr;
32 use glue;
33 use type_of;
34 use machine;
35 use type_::Type;
36 use rustc::ty::{self, Ty};
37 use Disr;
38 use rustc::hir;
39 use syntax::ast;
40 use syntax::ptr::P;
41 use syntax::parse::token;
42
43 use rustc::session::Session;
44 use rustc_const_eval::fatal_const_eval_err;
45 use syntax_pos::{Span, DUMMY_SP};
46
47 use std::cmp::Ordering;
48
49 fn get_simple_intrinsic(ccx: &CrateContext, name: &str) -> Option<ValueRef> {
50     let llvm_name = match name {
51         "sqrtf32" => "llvm.sqrt.f32",
52         "sqrtf64" => "llvm.sqrt.f64",
53         "powif32" => "llvm.powi.f32",
54         "powif64" => "llvm.powi.f64",
55         "sinf32" => "llvm.sin.f32",
56         "sinf64" => "llvm.sin.f64",
57         "cosf32" => "llvm.cos.f32",
58         "cosf64" => "llvm.cos.f64",
59         "powf32" => "llvm.pow.f32",
60         "powf64" => "llvm.pow.f64",
61         "expf32" => "llvm.exp.f32",
62         "expf64" => "llvm.exp.f64",
63         "exp2f32" => "llvm.exp2.f32",
64         "exp2f64" => "llvm.exp2.f64",
65         "logf32" => "llvm.log.f32",
66         "logf64" => "llvm.log.f64",
67         "log10f32" => "llvm.log10.f32",
68         "log10f64" => "llvm.log10.f64",
69         "log2f32" => "llvm.log2.f32",
70         "log2f64" => "llvm.log2.f64",
71         "fmaf32" => "llvm.fma.f32",
72         "fmaf64" => "llvm.fma.f64",
73         "fabsf32" => "llvm.fabs.f32",
74         "fabsf64" => "llvm.fabs.f64",
75         "copysignf32" => "llvm.copysign.f32",
76         "copysignf64" => "llvm.copysign.f64",
77         "floorf32" => "llvm.floor.f32",
78         "floorf64" => "llvm.floor.f64",
79         "ceilf32" => "llvm.ceil.f32",
80         "ceilf64" => "llvm.ceil.f64",
81         "truncf32" => "llvm.trunc.f32",
82         "truncf64" => "llvm.trunc.f64",
83         "rintf32" => "llvm.rint.f32",
84         "rintf64" => "llvm.rint.f64",
85         "nearbyintf32" => "llvm.nearbyint.f32",
86         "nearbyintf64" => "llvm.nearbyint.f64",
87         "roundf32" => "llvm.round.f32",
88         "roundf64" => "llvm.round.f64",
89         "assume" => "llvm.assume",
90         _ => return None
91     };
92     Some(ccx.get_intrinsic(&llvm_name))
93 }
94
95 /// Remember to add all intrinsics here, in librustc_typeck/check/mod.rs,
96 /// and in libcore/intrinsics.rs; if you need access to any llvm intrinsics,
97 /// add them to librustc_trans/trans/context.rs
98 pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
99                                             callee_ty: Ty<'tcx>,
100                                             fn_ty: &FnType,
101                                             args: callee::CallArgs<'a, 'tcx>,
102                                             dest: expr::Dest,
103                                             call_debug_location: DebugLoc)
104                                             -> Result<'blk, 'tcx> {
105     let fcx = bcx.fcx;
106     let ccx = fcx.ccx;
107     let tcx = bcx.tcx();
108
109     let _icx = push_ctxt("trans_intrinsic_call");
110
111     let (def_id, substs, sig) = match callee_ty.sty {
112         ty::TyFnDef(def_id, substs, fty) => {
113             let sig = tcx.erase_late_bound_regions(&fty.sig);
114             (def_id, substs, tcx.normalize_associated_type(&sig))
115         }
116         _ => bug!("expected fn item type, found {}", callee_ty)
117     };
118     let arg_tys = sig.inputs;
119     let ret_ty = sig.output;
120     let name = tcx.item_name(def_id).as_str();
121
122     let span = match call_debug_location {
123         DebugLoc::At(_, span) | DebugLoc::ScopeAt(_, span) => span,
124         DebugLoc::None => {
125             span_bug!(fcx.span.unwrap_or(DUMMY_SP),
126                       "intrinsic `{}` called with missing span", name);
127         }
128     };
129
130     let cleanup_scope = fcx.push_custom_cleanup_scope();
131
132     // For `transmute` we can just trans the input expr directly into dest
133     if name == "transmute" {
134         let llret_ty = type_of::type_of(ccx, ret_ty);
135         match args {
136             callee::ArgExprs(arg_exprs) => {
137                 assert_eq!(arg_exprs.len(), 1);
138
139                 let (in_type, out_type) = (substs.types[0],
140                                            substs.types[1]);
141                 let llintype = type_of::type_of(ccx, in_type);
142                 let llouttype = type_of::type_of(ccx, out_type);
143
144                 let in_type_size = machine::llbitsize_of_real(ccx, llintype);
145                 let out_type_size = machine::llbitsize_of_real(ccx, llouttype);
146
147                 if let ty::TyFnDef(def_id, substs, _) = in_type.sty {
148                     if out_type_size != 0 {
149                         // FIXME #19925 Remove this hack after a release cycle.
150                         let _ = unpack_datum!(bcx, expr::trans(bcx, &arg_exprs[0]));
151                         let llfn = Callee::def(ccx, def_id, substs).reify(ccx).val;
152                         let llfnty = val_ty(llfn);
153                         let llresult = match dest {
154                             expr::SaveIn(d) => d,
155                             expr::Ignore => alloc_ty(bcx, out_type, "ret")
156                         };
157                         Store(bcx, llfn, PointerCast(bcx, llresult, llfnty.ptr_to()));
158                         if dest == expr::Ignore {
159                             bcx = glue::drop_ty(bcx, llresult, out_type,
160                                                 call_debug_location);
161                         }
162                         fcx.scopes.borrow_mut().last_mut().unwrap().drop_non_lifetime_clean();
163                         fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope);
164                         return Result::new(bcx, llresult);
165                     }
166                 }
167
168                 // This should be caught by the intrinsicck pass
169                 assert_eq!(in_type_size, out_type_size);
170
171                 let nonpointer_nonaggregate = |llkind: TypeKind| -> bool {
172                     use llvm::TypeKind::*;
173                     match llkind {
174                         Half | Float | Double | X86_FP80 | FP128 |
175                             PPC_FP128 | Integer | Vector | X86_MMX => true,
176                         _ => false
177                     }
178                 };
179
180                 // An approximation to which types can be directly cast via
181                 // LLVM's bitcast.  This doesn't cover pointer -> pointer casts,
182                 // but does, importantly, cover SIMD types.
183                 let in_kind = llintype.kind();
184                 let ret_kind = llret_ty.kind();
185                 let bitcast_compatible =
186                     (nonpointer_nonaggregate(in_kind) && nonpointer_nonaggregate(ret_kind)) || {
187                         in_kind == TypeKind::Pointer && ret_kind == TypeKind::Pointer
188                     };
189
190                 let dest = if bitcast_compatible {
191                     // if we're here, the type is scalar-like (a primitive, a
192                     // SIMD type or a pointer), and so can be handled as a
193                     // by-value ValueRef and can also be directly bitcast to the
194                     // target type.  Doing this special case makes conversions
195                     // like `u32x4` -> `u64x2` much nicer for LLVM and so more
196                     // efficient (these are done efficiently implicitly in C
197                     // with the `__m128i` type and so this means Rust doesn't
198                     // lose out there).
199                     let expr = &arg_exprs[0];
200                     let datum = unpack_datum!(bcx, expr::trans(bcx, expr));
201                     let datum = unpack_datum!(bcx, datum.to_rvalue_datum(bcx, "transmute_temp"));
202                     let val = if datum.kind.is_by_ref() {
203                         load_ty(bcx, datum.val, datum.ty)
204                     } else {
205                         from_immediate(bcx, datum.val)
206                     };
207
208                     let cast_val = BitCast(bcx, val, llret_ty);
209
210                     match dest {
211                         expr::SaveIn(d) => {
212                             // this often occurs in a sequence like `Store(val,
213                             // d); val2 = Load(d)`, so disappears easily.
214                             Store(bcx, cast_val, d);
215                         }
216                         expr::Ignore => {}
217                     }
218                     dest
219                 } else {
220                     // The types are too complicated to do with a by-value
221                     // bitcast, so pointer cast instead. We need to cast the
222                     // dest so the types work out.
223                     let dest = match dest {
224                         expr::SaveIn(d) => expr::SaveIn(PointerCast(bcx, d, llintype.ptr_to())),
225                         expr::Ignore => expr::Ignore
226                     };
227                     bcx = expr::trans_into(bcx, &arg_exprs[0], dest);
228                     dest
229                 };
230
231                 fcx.scopes.borrow_mut().last_mut().unwrap().drop_non_lifetime_clean();
232                 fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope);
233
234                 return match dest {
235                     expr::SaveIn(d) => Result::new(bcx, d),
236                     expr::Ignore => Result::new(bcx, C_undef(llret_ty.ptr_to()))
237                 };
238
239             }
240
241             _ => {
242                 bug!("expected expr as argument for transmute");
243             }
244         }
245     }
246
247     // For `move_val_init` we can evaluate the destination address
248     // (the first argument) and then trans the source value (the
249     // second argument) directly into the resulting destination
250     // address.
251     if name == "move_val_init" {
252         if let callee::ArgExprs(ref exprs) = args {
253             let (dest_expr, source_expr) = if exprs.len() != 2 {
254                 bug!("expected two exprs as arguments for `move_val_init` intrinsic");
255             } else {
256                 (&exprs[0], &exprs[1])
257             };
258
259             // evaluate destination address
260             let dest_datum = unpack_datum!(bcx, expr::trans(bcx, dest_expr));
261             let dest_datum = unpack_datum!(
262                 bcx, dest_datum.to_rvalue_datum(bcx, "arg"));
263             let dest_datum = unpack_datum!(
264                 bcx, dest_datum.to_appropriate_datum(bcx));
265
266             // `expr::trans_into(bcx, expr, dest)` is equiv to
267             //
268             //    `trans(bcx, expr).store_to_dest(dest)`,
269             //
270             // which for `dest == expr::SaveIn(addr)`, is equivalent to:
271             //
272             //    `trans(bcx, expr).store_to(bcx, addr)`.
273             let lldest = expr::Dest::SaveIn(dest_datum.val);
274             bcx = expr::trans_into(bcx, source_expr, lldest);
275
276             let llresult = C_nil(ccx);
277             fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope);
278
279             return Result::new(bcx, llresult);
280         } else {
281             bug!("expected two exprs as arguments for `move_val_init` intrinsic");
282         }
283     }
284
285     // save the actual AST arguments for later (some places need to do
286     // const-evaluation on them)
287     let expr_arguments = match args {
288         callee::ArgExprs(args) => Some(args),
289         _ => None,
290     };
291
292     // Push the arguments.
293     let mut llargs = Vec::new();
294     bcx = callee::trans_args(bcx,
295                              Abi::RustIntrinsic,
296                              fn_ty,
297                              &mut callee::Intrinsic,
298                              args,
299                              &mut llargs,
300                              cleanup::CustomScope(cleanup_scope));
301
302     fcx.scopes.borrow_mut().last_mut().unwrap().drop_non_lifetime_clean();
303
304     // These are the only intrinsic functions that diverge.
305     if name == "abort" {
306         let llfn = ccx.get_intrinsic(&("llvm.trap"));
307         Call(bcx, llfn, &[], call_debug_location);
308         fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope);
309         Unreachable(bcx);
310         return Result::new(bcx, C_undef(Type::nil(ccx).ptr_to()));
311     } else if &name[..] == "unreachable" {
312         fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope);
313         Unreachable(bcx);
314         return Result::new(bcx, C_nil(ccx));
315     }
316
317     let llret_ty = type_of::type_of(ccx, ret_ty);
318
319     // Get location to store the result. If the user does
320     // not care about the result, just make a stack slot
321     let llresult = match dest {
322         expr::SaveIn(d) => d,
323         expr::Ignore => {
324             if !type_is_zero_size(ccx, ret_ty) {
325                 let llresult = alloc_ty(bcx, ret_ty, "intrinsic_result");
326                 call_lifetime_start(bcx, llresult);
327                 llresult
328             } else {
329                 C_undef(llret_ty.ptr_to())
330             }
331         }
332     };
333
334     let simple = get_simple_intrinsic(ccx, &name);
335     let llval = match (simple, &name[..]) {
336         (Some(llfn), _) => {
337             Call(bcx, llfn, &llargs, call_debug_location)
338         }
339         (_, "try") => {
340             bcx = try_intrinsic(bcx, llargs[0], llargs[1], llargs[2], llresult,
341                                 call_debug_location);
342             C_nil(ccx)
343         }
344         (_, "breakpoint") => {
345             let llfn = ccx.get_intrinsic(&("llvm.debugtrap"));
346             Call(bcx, llfn, &[], call_debug_location)
347         }
348         (_, "size_of") => {
349             let tp_ty = substs.types[0];
350             let lltp_ty = type_of::type_of(ccx, tp_ty);
351             C_uint(ccx, machine::llsize_of_alloc(ccx, lltp_ty))
352         }
353         (_, "size_of_val") => {
354             let tp_ty = substs.types[0];
355             if !type_is_sized(tcx, tp_ty) {
356                 let (llsize, _) =
357                     glue::size_and_align_of_dst(&bcx.build(), tp_ty, llargs[1]);
358                 llsize
359             } else {
360                 let lltp_ty = type_of::type_of(ccx, tp_ty);
361                 C_uint(ccx, machine::llsize_of_alloc(ccx, lltp_ty))
362             }
363         }
364         (_, "min_align_of") => {
365             let tp_ty = substs.types[0];
366             C_uint(ccx, type_of::align_of(ccx, tp_ty))
367         }
368         (_, "min_align_of_val") => {
369             let tp_ty = substs.types[0];
370             if !type_is_sized(tcx, tp_ty) {
371                 let (_, llalign) =
372                     glue::size_and_align_of_dst(&bcx.build(), tp_ty, llargs[1]);
373                 llalign
374             } else {
375                 C_uint(ccx, type_of::align_of(ccx, tp_ty))
376             }
377         }
378         (_, "pref_align_of") => {
379             let tp_ty = substs.types[0];
380             let lltp_ty = type_of::type_of(ccx, tp_ty);
381             C_uint(ccx, machine::llalign_of_pref(ccx, lltp_ty))
382         }
383         (_, "drop_in_place") => {
384             let tp_ty = substs.types[0];
385             let ptr = if type_is_sized(tcx, tp_ty) {
386                 llargs[0]
387             } else {
388                 let scratch = rvalue_scratch_datum(bcx, tp_ty, "tmp");
389                 Store(bcx, llargs[0], expr::get_dataptr(bcx, scratch.val));
390                 Store(bcx, llargs[1], expr::get_meta(bcx, scratch.val));
391                 fcx.schedule_lifetime_end(cleanup::CustomScope(cleanup_scope), scratch.val);
392                 scratch.val
393             };
394             glue::drop_ty(bcx, ptr, tp_ty, call_debug_location);
395             C_nil(ccx)
396         }
397         (_, "type_name") => {
398             let tp_ty = substs.types[0];
399             let ty_name = token::intern_and_get_ident(&tp_ty.to_string());
400             C_str_slice(ccx, ty_name)
401         }
402         (_, "type_id") => {
403             C_u64(ccx, ccx.tcx().type_id_hash(substs.types[0]))
404         }
405         (_, "init_dropped") => {
406             let tp_ty = substs.types[0];
407             if !type_is_zero_size(ccx, tp_ty) {
408                 drop_done_fill_mem(bcx, llresult, tp_ty);
409             }
410             C_nil(ccx)
411         }
412         (_, "init") => {
413             let tp_ty = substs.types[0];
414             if !type_is_zero_size(ccx, tp_ty) {
415                 // Just zero out the stack slot. (See comment on base::memzero for explanation)
416                 init_zero_mem(bcx, llresult, tp_ty);
417             }
418             C_nil(ccx)
419         }
420         // Effectively no-ops
421         (_, "uninit") | (_, "forget") => {
422             C_nil(ccx)
423         }
424         (_, "needs_drop") => {
425             let tp_ty = substs.types[0];
426
427             C_bool(ccx, bcx.fcx.type_needs_drop(tp_ty))
428         }
429         (_, "offset") => {
430             let ptr = llargs[0];
431             let offset = llargs[1];
432             InBoundsGEP(bcx, ptr, &[offset])
433         }
434         (_, "arith_offset") => {
435             let ptr = llargs[0];
436             let offset = llargs[1];
437             GEP(bcx, ptr, &[offset])
438         }
439
440         (_, "copy_nonoverlapping") => {
441             copy_intrinsic(bcx,
442                            false,
443                            false,
444                            substs.types[0],
445                            llargs[1],
446                            llargs[0],
447                            llargs[2],
448                            call_debug_location)
449         }
450         (_, "copy") => {
451             copy_intrinsic(bcx,
452                            true,
453                            false,
454                            substs.types[0],
455                            llargs[1],
456                            llargs[0],
457                            llargs[2],
458                            call_debug_location)
459         }
460         (_, "write_bytes") => {
461             memset_intrinsic(bcx,
462                              false,
463                              substs.types[0],
464                              llargs[0],
465                              llargs[1],
466                              llargs[2],
467                              call_debug_location)
468         }
469
470         (_, "volatile_copy_nonoverlapping_memory") => {
471             copy_intrinsic(bcx,
472                            false,
473                            true,
474                            substs.types[0],
475                            llargs[0],
476                            llargs[1],
477                            llargs[2],
478                            call_debug_location)
479         }
480         (_, "volatile_copy_memory") => {
481             copy_intrinsic(bcx,
482                            true,
483                            true,
484                            substs.types[0],
485                            llargs[0],
486                            llargs[1],
487                            llargs[2],
488                            call_debug_location)
489         }
490         (_, "volatile_set_memory") => {
491             memset_intrinsic(bcx,
492                              true,
493                              substs.types[0],
494                              llargs[0],
495                              llargs[1],
496                              llargs[2],
497                              call_debug_location)
498         }
499         (_, "volatile_load") => {
500             let tp_ty = substs.types[0];
501             let mut ptr = llargs[0];
502             if let Some(ty) = fn_ty.ret.cast {
503                 ptr = PointerCast(bcx, ptr, ty.ptr_to());
504             }
505             let load = VolatileLoad(bcx, ptr);
506             unsafe {
507                 llvm::LLVMSetAlignment(load, type_of::align_of(ccx, tp_ty));
508             }
509             to_immediate(bcx, load, tp_ty)
510         },
511         (_, "volatile_store") => {
512             let tp_ty = substs.types[0];
513             if type_is_fat_ptr(bcx.tcx(), tp_ty) {
514                 VolatileStore(bcx, llargs[1], expr::get_dataptr(bcx, llargs[0]));
515                 VolatileStore(bcx, llargs[2], expr::get_meta(bcx, llargs[0]));
516             } else {
517                 let val = if fn_ty.args[1].is_indirect() {
518                     Load(bcx, llargs[1])
519                 } else {
520                     from_immediate(bcx, llargs[1])
521                 };
522                 let ptr = PointerCast(bcx, llargs[0], val_ty(val).ptr_to());
523                 let store = VolatileStore(bcx, val, ptr);
524                 unsafe {
525                     llvm::LLVMSetAlignment(store, type_of::align_of(ccx, tp_ty));
526                 }
527             }
528             C_nil(ccx)
529         },
530
531         (_, "ctlz") | (_, "cttz") | (_, "ctpop") | (_, "bswap") |
532         (_, "add_with_overflow") | (_, "sub_with_overflow") | (_, "mul_with_overflow") |
533         (_, "overflowing_add") | (_, "overflowing_sub") | (_, "overflowing_mul") |
534         (_, "unchecked_div") | (_, "unchecked_rem") => {
535             let sty = &arg_tys[0].sty;
536             match int_type_width_signed(sty, ccx) {
537                 Some((width, signed)) =>
538                     match &*name {
539                         "ctlz" => count_zeros_intrinsic(bcx, &format!("llvm.ctlz.i{}", width),
540                                                         llargs[0], call_debug_location),
541                         "cttz" => count_zeros_intrinsic(bcx, &format!("llvm.cttz.i{}", width),
542                                                         llargs[0], call_debug_location),
543                         "ctpop" => Call(bcx, ccx.get_intrinsic(&format!("llvm.ctpop.i{}", width)),
544                                         &llargs, call_debug_location),
545                         "bswap" => {
546                             if width == 8 {
547                                 llargs[0] // byte swap a u8/i8 is just a no-op
548                             } else {
549                                 Call(bcx, ccx.get_intrinsic(&format!("llvm.bswap.i{}", width)),
550                                         &llargs, call_debug_location)
551                             }
552                         }
553                         "add_with_overflow" | "sub_with_overflow" | "mul_with_overflow" => {
554                             let intrinsic = format!("llvm.{}{}.with.overflow.i{}",
555                                                     if signed { 's' } else { 'u' },
556                                                     &name[..3], width);
557                             with_overflow_intrinsic(bcx, &intrinsic, llargs[0], llargs[1], llresult,
558                                                     call_debug_location)
559                         },
560                         "overflowing_add" => Add(bcx, llargs[0], llargs[1], call_debug_location),
561                         "overflowing_sub" => Sub(bcx, llargs[0], llargs[1], call_debug_location),
562                         "overflowing_mul" => Mul(bcx, llargs[0], llargs[1], call_debug_location),
563                         "unchecked_div" =>
564                             if signed {
565                                 SDiv(bcx, llargs[0], llargs[1], call_debug_location)
566                             } else {
567                                 UDiv(bcx, llargs[0], llargs[1], call_debug_location)
568                             },
569                         "unchecked_rem" =>
570                             if signed {
571                                 SRem(bcx, llargs[0], llargs[1], call_debug_location)
572                             } else {
573                                 URem(bcx, llargs[0], llargs[1], call_debug_location)
574                             },
575                         _ => bug!(),
576                     },
577                 None => {
578                     span_invalid_monomorphization_error(
579                         tcx.sess, span,
580                         &format!("invalid monomorphization of `{}` intrinsic: \
581                                   expected basic integer type, found `{}`", name, sty));
582                         C_nil(ccx)
583                 }
584             }
585
586         },
587         (_, "fadd_fast") | (_, "fsub_fast") | (_, "fmul_fast") | (_, "fdiv_fast") |
588         (_, "frem_fast") => {
589             let sty = &arg_tys[0].sty;
590             match float_type_width(sty) {
591                 Some(_width) =>
592                     match &*name {
593                         "fadd_fast" => FAddFast(bcx, llargs[0], llargs[1], call_debug_location),
594                         "fsub_fast" => FSubFast(bcx, llargs[0], llargs[1], call_debug_location),
595                         "fmul_fast" => FMulFast(bcx, llargs[0], llargs[1], call_debug_location),
596                         "fdiv_fast" => FDivFast(bcx, llargs[0], llargs[1], call_debug_location),
597                         "frem_fast" => FRemFast(bcx, llargs[0], llargs[1], call_debug_location),
598                         _ => bug!(),
599                     },
600                 None => {
601                     span_invalid_monomorphization_error(
602                         tcx.sess, span,
603                         &format!("invalid monomorphization of `{}` intrinsic: \
604                                   expected basic float type, found `{}`", name, sty));
605                         C_nil(ccx)
606                 }
607             }
608
609         },
610
611         (_, "discriminant_value") => {
612             let val_ty = substs.types[0];
613             match val_ty.sty {
614                 ty::TyEnum(..) => {
615                     let repr = adt::represent_type(ccx, val_ty);
616                     adt::trans_get_discr(bcx, &repr, llargs[0],
617                                          Some(llret_ty), true)
618                 }
619                 _ => C_null(llret_ty)
620             }
621         }
622         (_, name) if name.starts_with("simd_") => {
623             generic_simd_intrinsic(bcx, name,
624                                    substs,
625                                    callee_ty,
626                                    expr_arguments,
627                                    &llargs,
628                                    ret_ty, llret_ty,
629                                    call_debug_location,
630                                    span)
631         }
632         // This requires that atomic intrinsics follow a specific naming pattern:
633         // "atomic_<operation>[_<ordering>]", and no ordering means SeqCst
634         (_, name) if name.starts_with("atomic_") => {
635             use llvm::AtomicOrdering::*;
636
637             let split: Vec<&str> = name.split('_').collect();
638
639             let is_cxchg = split[1] == "cxchg" || split[1] == "cxchgweak";
640             let (order, failorder) = match split.len() {
641                 2 => (SequentiallyConsistent, SequentiallyConsistent),
642                 3 => match split[2] {
643                     "unordered" => (Unordered, Unordered),
644                     "relaxed" => (Monotonic, Monotonic),
645                     "acq"     => (Acquire, Acquire),
646                     "rel"     => (Release, Monotonic),
647                     "acqrel"  => (AcquireRelease, Acquire),
648                     "failrelaxed" if is_cxchg =>
649                         (SequentiallyConsistent, Monotonic),
650                     "failacq" if is_cxchg =>
651                         (SequentiallyConsistent, Acquire),
652                     _ => ccx.sess().fatal("unknown ordering in atomic intrinsic")
653                 },
654                 4 => match (split[2], split[3]) {
655                     ("acq", "failrelaxed") if is_cxchg =>
656                         (Acquire, Monotonic),
657                     ("acqrel", "failrelaxed") if is_cxchg =>
658                         (AcquireRelease, Monotonic),
659                     _ => ccx.sess().fatal("unknown ordering in atomic intrinsic")
660                 },
661                 _ => ccx.sess().fatal("Atomic intrinsic not in correct format"),
662             };
663
664             match split[1] {
665                 "cxchg" | "cxchgweak" => {
666                     let sty = &substs.types[0].sty;
667                     if int_type_width_signed(sty, ccx).is_some() {
668                         let weak = if split[1] == "cxchgweak" { llvm::True } else { llvm::False };
669                         let val = AtomicCmpXchg(bcx, llargs[0], llargs[1], llargs[2],
670                                                 order, failorder, weak);
671                         let result = ExtractValue(bcx, val, 0);
672                         let success = ZExt(bcx, ExtractValue(bcx, val, 1), Type::bool(bcx.ccx()));
673                         Store(bcx, result, StructGEP(bcx, llresult, 0));
674                         Store(bcx, success, StructGEP(bcx, llresult, 1));
675                     } else {
676                         span_invalid_monomorphization_error(
677                             tcx.sess, span,
678                             &format!("invalid monomorphization of `{}` intrinsic: \
679                                       expected basic integer type, found `{}`", name, sty));
680                     }
681                     C_nil(ccx)
682                 }
683
684                 "load" => {
685                     let sty = &substs.types[0].sty;
686                     if int_type_width_signed(sty, ccx).is_some() {
687                         AtomicLoad(bcx, llargs[0], order)
688                     } else {
689                         span_invalid_monomorphization_error(
690                             tcx.sess, span,
691                             &format!("invalid monomorphization of `{}` intrinsic: \
692                                       expected basic integer type, found `{}`", name, sty));
693                         C_nil(ccx)
694                     }
695                 }
696
697                 "store" => {
698                     let sty = &substs.types[0].sty;
699                     if int_type_width_signed(sty, ccx).is_some() {
700                         AtomicStore(bcx, llargs[1], llargs[0], order);
701                     } else {
702                         span_invalid_monomorphization_error(
703                             tcx.sess, span,
704                             &format!("invalid monomorphization of `{}` intrinsic: \
705                                       expected basic integer type, found `{}`", name, sty));
706                     }
707                     C_nil(ccx)
708                 }
709
710                 "fence" => {
711                     AtomicFence(bcx, order, llvm::SynchronizationScope::CrossThread);
712                     C_nil(ccx)
713                 }
714
715                 "singlethreadfence" => {
716                     AtomicFence(bcx, order, llvm::SynchronizationScope::SingleThread);
717                     C_nil(ccx)
718                 }
719
720                 // These are all AtomicRMW ops
721                 op => {
722                     let atom_op = match op {
723                         "xchg"  => llvm::AtomicXchg,
724                         "xadd"  => llvm::AtomicAdd,
725                         "xsub"  => llvm::AtomicSub,
726                         "and"   => llvm::AtomicAnd,
727                         "nand"  => llvm::AtomicNand,
728                         "or"    => llvm::AtomicOr,
729                         "xor"   => llvm::AtomicXor,
730                         "max"   => llvm::AtomicMax,
731                         "min"   => llvm::AtomicMin,
732                         "umax"  => llvm::AtomicUMax,
733                         "umin"  => llvm::AtomicUMin,
734                         _ => ccx.sess().fatal("unknown atomic operation")
735                     };
736
737                     let sty = &substs.types[0].sty;
738                     if int_type_width_signed(sty, ccx).is_some() {
739                         AtomicRMW(bcx, atom_op, llargs[0], llargs[1], order)
740                     } else {
741                         span_invalid_monomorphization_error(
742                             tcx.sess, span,
743                             &format!("invalid monomorphization of `{}` intrinsic: \
744                                       expected basic integer type, found `{}`", name, sty));
745                         C_nil(ccx)
746                     }
747                 }
748             }
749
750         }
751
752         (_, _) => {
753             let intr = match Intrinsic::find(&name) {
754                 Some(intr) => intr,
755                 None => bug!("unknown intrinsic '{}'", name),
756             };
757             fn one<T>(x: Vec<T>) -> T {
758                 assert_eq!(x.len(), 1);
759                 x.into_iter().next().unwrap()
760             }
761             fn ty_to_type(ccx: &CrateContext, t: &intrinsics::Type,
762                           any_changes_needed: &mut bool) -> Vec<Type> {
763                 use intrinsics::Type::*;
764                 match *t {
765                     Void => vec![Type::void(ccx)],
766                     Integer(_signed, width, llvm_width) => {
767                         *any_changes_needed |= width != llvm_width;
768                         vec![Type::ix(ccx, llvm_width as u64)]
769                     }
770                     Float(x) => {
771                         match x {
772                             32 => vec![Type::f32(ccx)],
773                             64 => vec![Type::f64(ccx)],
774                             _ => bug!()
775                         }
776                     }
777                     Pointer(ref t, ref llvm_elem, _const) => {
778                         *any_changes_needed |= llvm_elem.is_some();
779
780                         let t = llvm_elem.as_ref().unwrap_or(t);
781                         let elem = one(ty_to_type(ccx, t,
782                                                   any_changes_needed));
783                         vec![elem.ptr_to()]
784                     }
785                     Vector(ref t, ref llvm_elem, length) => {
786                         *any_changes_needed |= llvm_elem.is_some();
787
788                         let t = llvm_elem.as_ref().unwrap_or(t);
789                         let elem = one(ty_to_type(ccx, t,
790                                                   any_changes_needed));
791                         vec![Type::vector(&elem,
792                                           length as u64)]
793                     }
794                     Aggregate(false, ref contents) => {
795                         let elems = contents.iter()
796                                             .map(|t| one(ty_to_type(ccx, t, any_changes_needed)))
797                                             .collect::<Vec<_>>();
798                         vec![Type::struct_(ccx, &elems, false)]
799                     }
800                     Aggregate(true, ref contents) => {
801                         *any_changes_needed = true;
802                         contents.iter()
803                                 .flat_map(|t| ty_to_type(ccx, t, any_changes_needed))
804                                 .collect()
805                     }
806                 }
807             }
808
809             // This allows an argument list like `foo, (bar, baz),
810             // qux` to be converted into `foo, bar, baz, qux`, integer
811             // arguments to be truncated as needed and pointers to be
812             // cast.
813             fn modify_as_needed<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
814                                             t: &intrinsics::Type,
815                                             arg_type: Ty<'tcx>,
816                                             llarg: ValueRef)
817                                             -> Vec<ValueRef>
818             {
819                 match *t {
820                     intrinsics::Type::Aggregate(true, ref contents) => {
821                         // We found a tuple that needs squishing! So
822                         // run over the tuple and load each field.
823                         //
824                         // This assumes the type is "simple", i.e. no
825                         // destructors, and the contents are SIMD
826                         // etc.
827                         assert!(!bcx.fcx.type_needs_drop(arg_type));
828
829                         let repr = adt::represent_type(bcx.ccx(), arg_type);
830                         let repr_ptr = &repr;
831                         let arg = adt::MaybeSizedValue::sized(llarg);
832                         (0..contents.len())
833                             .map(|i| {
834                                 Load(bcx, adt::trans_field_ptr(bcx, repr_ptr, arg, Disr(0), i))
835                             })
836                             .collect()
837                     }
838                     intrinsics::Type::Pointer(_, Some(ref llvm_elem), _) => {
839                         let llvm_elem = one(ty_to_type(bcx.ccx(), llvm_elem, &mut false));
840                         vec![PointerCast(bcx, llarg,
841                                          llvm_elem.ptr_to())]
842                     }
843                     intrinsics::Type::Vector(_, Some(ref llvm_elem), length) => {
844                         let llvm_elem = one(ty_to_type(bcx.ccx(), llvm_elem, &mut false));
845                         vec![BitCast(bcx, llarg,
846                                      Type::vector(&llvm_elem, length as u64))]
847                     }
848                     intrinsics::Type::Integer(_, width, llvm_width) if width != llvm_width => {
849                         // the LLVM intrinsic uses a smaller integer
850                         // size than the C intrinsic's signature, so
851                         // we have to trim it down here.
852                         vec![Trunc(bcx, llarg, Type::ix(bcx.ccx(), llvm_width as u64))]
853                     }
854                     _ => vec![llarg],
855                 }
856             }
857
858
859             let mut any_changes_needed = false;
860             let inputs = intr.inputs.iter()
861                                     .flat_map(|t| ty_to_type(ccx, t, &mut any_changes_needed))
862                                     .collect::<Vec<_>>();
863
864             let mut out_changes = false;
865             let outputs = one(ty_to_type(ccx, &intr.output, &mut out_changes));
866             // outputting a flattened aggregate is nonsense
867             assert!(!out_changes);
868
869             let llargs = if !any_changes_needed {
870                 // no aggregates to flatten, so no change needed
871                 llargs
872             } else {
873                 // there are some aggregates that need to be flattened
874                 // in the LLVM call, so we need to run over the types
875                 // again to find them and extract the arguments
876                 intr.inputs.iter()
877                            .zip(&llargs)
878                            .zip(&arg_tys)
879                            .flat_map(|((t, llarg), ty)| modify_as_needed(bcx, t, ty, *llarg))
880                            .collect()
881             };
882             assert_eq!(inputs.len(), llargs.len());
883
884             let val = match intr.definition {
885                 intrinsics::IntrinsicDef::Named(name) => {
886                     let f = declare::declare_cfn(ccx,
887                                                  name,
888                                                  Type::func(&inputs, &outputs));
889                     Call(bcx, f, &llargs, call_debug_location)
890                 }
891             };
892
893             match *intr.output {
894                 intrinsics::Type::Aggregate(flatten, ref elems) => {
895                     // the output is a tuple so we need to munge it properly
896                     assert!(!flatten);
897
898                     for i in 0..elems.len() {
899                         let val = ExtractValue(bcx, val, i);
900                         Store(bcx, val, StructGEP(bcx, llresult, i));
901                     }
902                     C_nil(ccx)
903                 }
904                 _ => val,
905             }
906         }
907     };
908
909     if val_ty(llval) != Type::void(ccx) &&
910        machine::llsize_of_alloc(ccx, val_ty(llval)) != 0 {
911         if let Some(ty) = fn_ty.ret.cast {
912             let ptr = PointerCast(bcx, llresult, ty.ptr_to());
913             let store = Store(bcx, llval, ptr);
914             unsafe {
915                 llvm::LLVMSetAlignment(store, type_of::align_of(ccx, ret_ty));
916             }
917         } else {
918             store_ty(bcx, llval, llresult, ret_ty);
919         }
920     }
921
922     // If we made a temporary stack slot, let's clean it up
923     match dest {
924         expr::Ignore => {
925             bcx = glue::drop_ty(bcx, llresult, ret_ty, call_debug_location);
926             call_lifetime_end(bcx, llresult);
927         }
928         expr::SaveIn(_) => {}
929     }
930
931     fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope);
932
933     Result::new(bcx, llresult)
934 }
935
936 fn copy_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
937                               allow_overlap: bool,
938                               volatile: bool,
939                               tp_ty: Ty<'tcx>,
940                               dst: ValueRef,
941                               src: ValueRef,
942                               count: ValueRef,
943                               call_debug_location: DebugLoc)
944                               -> ValueRef {
945     let ccx = bcx.ccx();
946     let lltp_ty = type_of::type_of(ccx, tp_ty);
947     let align = C_i32(ccx, type_of::align_of(ccx, tp_ty) as i32);
948     let size = machine::llsize_of(ccx, lltp_ty);
949     let int_size = machine::llbitsize_of_real(ccx, ccx.int_type());
950
951     let operation = if allow_overlap {
952         "memmove"
953     } else {
954         "memcpy"
955     };
956
957     let name = format!("llvm.{}.p0i8.p0i8.i{}", operation, int_size);
958
959     let dst_ptr = PointerCast(bcx, dst, Type::i8p(ccx));
960     let src_ptr = PointerCast(bcx, src, Type::i8p(ccx));
961     let llfn = ccx.get_intrinsic(&name);
962
963     Call(bcx,
964          llfn,
965          &[dst_ptr,
966            src_ptr,
967            Mul(bcx, size, count, DebugLoc::None),
968            align,
969            C_bool(ccx, volatile)],
970          call_debug_location)
971 }
972
973 fn memset_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
974                                 volatile: bool,
975                                 tp_ty: Ty<'tcx>,
976                                 dst: ValueRef,
977                                 val: ValueRef,
978                                 count: ValueRef,
979                                 call_debug_location: DebugLoc)
980                                 -> ValueRef {
981     let ccx = bcx.ccx();
982     let lltp_ty = type_of::type_of(ccx, tp_ty);
983     let align = C_i32(ccx, type_of::align_of(ccx, tp_ty) as i32);
984     let size = machine::llsize_of(ccx, lltp_ty);
985     let int_size = machine::llbitsize_of_real(ccx, ccx.int_type());
986
987     let name = format!("llvm.memset.p0i8.i{}", int_size);
988
989     let dst_ptr = PointerCast(bcx, dst, Type::i8p(ccx));
990     let llfn = ccx.get_intrinsic(&name);
991
992     Call(bcx,
993          llfn,
994          &[dst_ptr,
995            val,
996            Mul(bcx, size, count, DebugLoc::None),
997            align,
998            C_bool(ccx, volatile)],
999          call_debug_location)
1000 }
1001
1002 fn count_zeros_intrinsic(bcx: Block,
1003                          name: &str,
1004                          val: ValueRef,
1005                          call_debug_location: DebugLoc)
1006                          -> ValueRef {
1007     let y = C_bool(bcx.ccx(), false);
1008     let llfn = bcx.ccx().get_intrinsic(&name);
1009     Call(bcx, llfn, &[val, y], call_debug_location)
1010 }
1011
1012 fn with_overflow_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1013                                        name: &str,
1014                                        a: ValueRef,
1015                                        b: ValueRef,
1016                                        out: ValueRef,
1017                                        call_debug_location: DebugLoc)
1018                                        -> ValueRef {
1019     let llfn = bcx.ccx().get_intrinsic(&name);
1020
1021     // Convert `i1` to a `bool`, and write it to the out parameter
1022     let val = Call(bcx, llfn, &[a, b], call_debug_location);
1023     let result = ExtractValue(bcx, val, 0);
1024     let overflow = ZExt(bcx, ExtractValue(bcx, val, 1), Type::bool(bcx.ccx()));
1025     Store(bcx, result, StructGEP(bcx, out, 0));
1026     Store(bcx, overflow, StructGEP(bcx, out, 1));
1027
1028     C_nil(bcx.ccx())
1029 }
1030
1031 fn try_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1032                              func: ValueRef,
1033                              data: ValueRef,
1034                              local_ptr: ValueRef,
1035                              dest: ValueRef,
1036                              dloc: DebugLoc) -> Block<'blk, 'tcx> {
1037     if bcx.sess().no_landing_pads() {
1038         Call(bcx, func, &[data], dloc);
1039         Store(bcx, C_null(Type::i8p(bcx.ccx())), dest);
1040         bcx
1041     } else if wants_msvc_seh(bcx.sess()) {
1042         trans_msvc_try(bcx, func, data, local_ptr, dest, dloc)
1043     } else {
1044         trans_gnu_try(bcx, func, data, local_ptr, dest, dloc)
1045     }
1046 }
1047
1048 // MSVC's definition of the `rust_try` function.
1049 //
1050 // This implementation uses the new exception handling instructions in LLVM
1051 // which have support in LLVM for SEH on MSVC targets. Although these
1052 // instructions are meant to work for all targets, as of the time of this
1053 // writing, however, LLVM does not recommend the usage of these new instructions
1054 // as the old ones are still more optimized.
1055 fn trans_msvc_try<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1056                               func: ValueRef,
1057                               data: ValueRef,
1058                               local_ptr: ValueRef,
1059                               dest: ValueRef,
1060                               dloc: DebugLoc) -> Block<'blk, 'tcx> {
1061     let llfn = get_rust_try_fn(bcx.fcx, &mut |bcx| {
1062         let ccx = bcx.ccx();
1063         let dloc = DebugLoc::None;
1064
1065         SetPersonalityFn(bcx, bcx.fcx.eh_personality());
1066
1067         let normal = bcx.fcx.new_temp_block("normal");
1068         let catchswitch = bcx.fcx.new_temp_block("catchswitch");
1069         let catchpad = bcx.fcx.new_temp_block("catchpad");
1070         let caught = bcx.fcx.new_temp_block("caught");
1071
1072         let func = llvm::get_param(bcx.fcx.llfn, 0);
1073         let data = llvm::get_param(bcx.fcx.llfn, 1);
1074         let local_ptr = llvm::get_param(bcx.fcx.llfn, 2);
1075
1076         // We're generating an IR snippet that looks like:
1077         //
1078         //   declare i32 @rust_try(%func, %data, %ptr) {
1079         //      %slot = alloca i64*
1080         //      invoke %func(%data) to label %normal unwind label %catchswitch
1081         //
1082         //   normal:
1083         //      ret i32 0
1084         //
1085         //   catchswitch:
1086         //      %cs = catchswitch within none [%catchpad] unwind to caller
1087         //
1088         //   catchpad:
1089         //      %tok = catchpad within %cs [%type_descriptor, 0, %slot]
1090         //      %ptr[0] = %slot[0]
1091         //      %ptr[1] = %slot[1]
1092         //      catchret from %tok to label %caught
1093         //
1094         //   caught:
1095         //      ret i32 1
1096         //   }
1097         //
1098         // This structure follows the basic usage of throw/try/catch in LLVM.
1099         // For example, compile this C++ snippet to see what LLVM generates:
1100         //
1101         //      #include <stdint.h>
1102         //
1103         //      int bar(void (*foo)(void), uint64_t *ret) {
1104         //          try {
1105         //              foo();
1106         //              return 0;
1107         //          } catch(uint64_t a[2]) {
1108         //              ret[0] = a[0];
1109         //              ret[1] = a[1];
1110         //              return 1;
1111         //          }
1112         //      }
1113         //
1114         // More information can be found in libstd's seh.rs implementation.
1115         let i64p = Type::i64(ccx).ptr_to();
1116         let slot = Alloca(bcx, i64p, "slot");
1117         Invoke(bcx, func, &[data], normal.llbb, catchswitch.llbb, dloc);
1118
1119         Ret(normal, C_i32(ccx, 0), dloc);
1120
1121         let cs = CatchSwitch(catchswitch, None, None, 1);
1122         AddHandler(catchswitch, cs, catchpad.llbb);
1123
1124         let tcx = ccx.tcx();
1125         let tydesc = match tcx.lang_items.msvc_try_filter() {
1126             Some(did) => ::consts::get_static(ccx, did).to_llref(),
1127             None => bug!("msvc_try_filter not defined"),
1128         };
1129         let tok = CatchPad(catchpad, cs, &[tydesc, C_i32(ccx, 0), slot]);
1130         let addr = Load(catchpad, slot);
1131         let arg1 = Load(catchpad, addr);
1132         let val1 = C_i32(ccx, 1);
1133         let arg2 = Load(catchpad, InBoundsGEP(catchpad, addr, &[val1]));
1134         let local_ptr = BitCast(catchpad, local_ptr, i64p);
1135         Store(catchpad, arg1, local_ptr);
1136         Store(catchpad, arg2, InBoundsGEP(catchpad, local_ptr, &[val1]));
1137         CatchRet(catchpad, tok, caught.llbb);
1138
1139         Ret(caught, C_i32(ccx, 1), dloc);
1140     });
1141
1142     // Note that no invoke is used here because by definition this function
1143     // can't panic (that's what it's catching).
1144     let ret = Call(bcx, llfn, &[func, data, local_ptr], dloc);
1145     Store(bcx, ret, dest);
1146     return bcx
1147 }
1148
1149 // Definition of the standard "try" function for Rust using the GNU-like model
1150 // of exceptions (e.g. the normal semantics of LLVM's landingpad and invoke
1151 // instructions).
1152 //
1153 // This translation is a little surprising because we always call a shim
1154 // function instead of inlining the call to `invoke` manually here. This is done
1155 // because in LLVM we're only allowed to have one personality per function
1156 // definition. The call to the `try` intrinsic is being inlined into the
1157 // function calling it, and that function may already have other personality
1158 // functions in play. By calling a shim we're guaranteed that our shim will have
1159 // the right personality function.
1160 fn trans_gnu_try<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1161                              func: ValueRef,
1162                              data: ValueRef,
1163                              local_ptr: ValueRef,
1164                              dest: ValueRef,
1165                              dloc: DebugLoc) -> Block<'blk, 'tcx> {
1166     let llfn = get_rust_try_fn(bcx.fcx, &mut |bcx| {
1167         let ccx = bcx.ccx();
1168         let dloc = DebugLoc::None;
1169
1170         // Translates the shims described above:
1171         //
1172         //   bcx:
1173         //      invoke %func(%args...) normal %normal unwind %catch
1174         //
1175         //   normal:
1176         //      ret 0
1177         //
1178         //   catch:
1179         //      (ptr, _) = landingpad
1180         //      store ptr, %local_ptr
1181         //      ret 1
1182         //
1183         // Note that the `local_ptr` data passed into the `try` intrinsic is
1184         // expected to be `*mut *mut u8` for this to actually work, but that's
1185         // managed by the standard library.
1186
1187         let then = bcx.fcx.new_temp_block("then");
1188         let catch = bcx.fcx.new_temp_block("catch");
1189
1190         let func = llvm::get_param(bcx.fcx.llfn, 0);
1191         let data = llvm::get_param(bcx.fcx.llfn, 1);
1192         let local_ptr = llvm::get_param(bcx.fcx.llfn, 2);
1193         Invoke(bcx, func, &[data], then.llbb, catch.llbb, dloc);
1194         Ret(then, C_i32(ccx, 0), dloc);
1195
1196         // Type indicator for the exception being thrown.
1197         //
1198         // The first value in this tuple is a pointer to the exception object
1199         // being thrown.  The second value is a "selector" indicating which of
1200         // the landing pad clauses the exception's type had been matched to.
1201         // rust_try ignores the selector.
1202         let lpad_ty = Type::struct_(ccx, &[Type::i8p(ccx), Type::i32(ccx)],
1203                                     false);
1204         let vals = LandingPad(catch, lpad_ty, bcx.fcx.eh_personality(), 1);
1205         AddClause(catch, vals, C_null(Type::i8p(ccx)));
1206         let ptr = ExtractValue(catch, vals, 0);
1207         Store(catch, ptr, BitCast(catch, local_ptr, Type::i8p(ccx).ptr_to()));
1208         Ret(catch, C_i32(ccx, 1), dloc);
1209     });
1210
1211     // Note that no invoke is used here because by definition this function
1212     // can't panic (that's what it's catching).
1213     let ret = Call(bcx, llfn, &[func, data, local_ptr], dloc);
1214     Store(bcx, ret, dest);
1215     return bcx;
1216 }
1217
1218 // Helper function to give a Block to a closure to translate a shim function.
1219 // This is currently primarily used for the `try` intrinsic functions above.
1220 fn gen_fn<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>,
1221                     name: &str,
1222                     inputs: Vec<Ty<'tcx>>,
1223                     output: Ty<'tcx>,
1224                     trans: &mut for<'b> FnMut(Block<'b, 'tcx>))
1225                     -> ValueRef {
1226     let ccx = fcx.ccx;
1227     let sig = ty::FnSig {
1228         inputs: inputs,
1229         output: output,
1230         variadic: false,
1231     };
1232     let fn_ty = FnType::new(ccx, Abi::Rust, &sig, &[]);
1233
1234     let rust_fn_ty = ccx.tcx().mk_fn_ptr(ccx.tcx().mk_bare_fn(ty::BareFnTy {
1235         unsafety: hir::Unsafety::Unsafe,
1236         abi: Abi::Rust,
1237         sig: ty::Binder(sig)
1238     }));
1239     let llfn = declare::define_internal_fn(ccx, name, rust_fn_ty);
1240     let (fcx, block_arena);
1241     block_arena = TypedArena::new();
1242     fcx = FunctionContext::new(ccx, llfn, fn_ty, None, &block_arena);
1243     let bcx = fcx.init(true, None);
1244     trans(bcx);
1245     fcx.cleanup();
1246     llfn
1247 }
1248
1249 // Helper function used to get a handle to the `__rust_try` function used to
1250 // catch exceptions.
1251 //
1252 // This function is only generated once and is then cached.
1253 fn get_rust_try_fn<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>,
1254                              trans: &mut for<'b> FnMut(Block<'b, 'tcx>))
1255                              -> ValueRef {
1256     let ccx = fcx.ccx;
1257     if let Some(llfn) = ccx.rust_try_fn().get() {
1258         return llfn;
1259     }
1260
1261     // Define the type up front for the signature of the rust_try function.
1262     let tcx = ccx.tcx();
1263     let i8p = tcx.mk_mut_ptr(tcx.types.i8);
1264     let fn_ty = tcx.mk_fn_ptr(tcx.mk_bare_fn(ty::BareFnTy {
1265         unsafety: hir::Unsafety::Unsafe,
1266         abi: Abi::Rust,
1267         sig: ty::Binder(ty::FnSig {
1268             inputs: vec![i8p],
1269             output: tcx.mk_nil(),
1270             variadic: false,
1271         }),
1272     }));
1273     let output = tcx.types.i32;
1274     let rust_try = gen_fn(fcx, "__rust_try", vec![fn_ty, i8p, i8p], output, trans);
1275     ccx.rust_try_fn().set(Some(rust_try));
1276     return rust_try
1277 }
1278
1279 fn span_invalid_monomorphization_error(a: &Session, b: Span, c: &str) {
1280     span_err!(a, b, E0511, "{}", c);
1281 }
1282
1283 fn generic_simd_intrinsic<'blk, 'tcx, 'a>
1284     (bcx: Block<'blk, 'tcx>,
1285      name: &str,
1286      substs: &'tcx Substs<'tcx>,
1287      callee_ty: Ty<'tcx>,
1288      args: Option<&[P<hir::Expr>]>,
1289      llargs: &[ValueRef],
1290      ret_ty: Ty<'tcx>,
1291      llret_ty: Type,
1292      call_debug_location: DebugLoc,
1293      span: Span) -> ValueRef
1294 {
1295     // macros for error handling:
1296     macro_rules! emit_error {
1297         ($msg: tt) => {
1298             emit_error!($msg, )
1299         };
1300         ($msg: tt, $($fmt: tt)*) => {
1301             span_invalid_monomorphization_error(
1302                 bcx.sess(), span,
1303                 &format!(concat!("invalid monomorphization of `{}` intrinsic: ",
1304                                  $msg),
1305                          name, $($fmt)*));
1306         }
1307     }
1308     macro_rules! require {
1309         ($cond: expr, $($fmt: tt)*) => {
1310             if !$cond {
1311                 emit_error!($($fmt)*);
1312                 return C_nil(bcx.ccx())
1313             }
1314         }
1315     }
1316     macro_rules! require_simd {
1317         ($ty: expr, $position: expr) => {
1318             require!($ty.is_simd(), "expected SIMD {} type, found non-SIMD `{}`", $position, $ty)
1319         }
1320     }
1321
1322
1323
1324     let tcx = bcx.tcx();
1325     let sig = tcx.erase_late_bound_regions(callee_ty.fn_sig());
1326     let sig = tcx.normalize_associated_type(&sig);
1327     let arg_tys = sig.inputs;
1328
1329     // every intrinsic takes a SIMD vector as its first argument
1330     require_simd!(arg_tys[0], "input");
1331     let in_ty = arg_tys[0];
1332     let in_elem = arg_tys[0].simd_type(tcx);
1333     let in_len = arg_tys[0].simd_size(tcx);
1334
1335     let comparison = match name {
1336         "simd_eq" => Some(hir::BiEq),
1337         "simd_ne" => Some(hir::BiNe),
1338         "simd_lt" => Some(hir::BiLt),
1339         "simd_le" => Some(hir::BiLe),
1340         "simd_gt" => Some(hir::BiGt),
1341         "simd_ge" => Some(hir::BiGe),
1342         _ => None
1343     };
1344
1345     if let Some(cmp_op) = comparison {
1346         require_simd!(ret_ty, "return");
1347
1348         let out_len = ret_ty.simd_size(tcx);
1349         require!(in_len == out_len,
1350                  "expected return type with length {} (same as input type `{}`), \
1351                   found `{}` with length {}",
1352                  in_len, in_ty,
1353                  ret_ty, out_len);
1354         require!(llret_ty.element_type().kind() == llvm::Integer,
1355                  "expected return type with integer elements, found `{}` with non-integer `{}`",
1356                  ret_ty,
1357                  ret_ty.simd_type(tcx));
1358
1359         return compare_simd_types(bcx,
1360                                   llargs[0],
1361                                   llargs[1],
1362                                   in_elem,
1363                                   llret_ty,
1364                                   cmp_op,
1365                                   call_debug_location)
1366     }
1367
1368     if name.starts_with("simd_shuffle") {
1369         let n: usize = match name["simd_shuffle".len()..].parse() {
1370             Ok(n) => n,
1371             Err(_) => span_bug!(span,
1372                                 "bad `simd_shuffle` instruction only caught in trans?")
1373         };
1374
1375         require_simd!(ret_ty, "return");
1376
1377         let out_len = ret_ty.simd_size(tcx);
1378         require!(out_len == n,
1379                  "expected return type of length {}, found `{}` with length {}",
1380                  n, ret_ty, out_len);
1381         require!(in_elem == ret_ty.simd_type(tcx),
1382                  "expected return element type `{}` (element of input `{}`), \
1383                   found `{}` with element type `{}`",
1384                  in_elem, in_ty,
1385                  ret_ty, ret_ty.simd_type(tcx));
1386
1387         let total_len = in_len as u64 * 2;
1388
1389         let vector = match args {
1390             Some(args) => {
1391                 match consts::const_expr(bcx.ccx(), &args[2], substs, None,
1392                                          // this should probably help simd error reporting
1393                                          consts::TrueConst::Yes) {
1394                     Ok((vector, _)) => vector,
1395                     Err(err) => {
1396                         fatal_const_eval_err(bcx.tcx(), err.as_inner(), span,
1397                                              "shuffle indices");
1398                     }
1399                 }
1400             }
1401             None => llargs[2]
1402         };
1403
1404         let indices: Option<Vec<_>> = (0..n)
1405             .map(|i| {
1406                 let arg_idx = i;
1407                 let val = const_get_elt(vector, &[i as libc::c_uint]);
1408                 match const_to_opt_uint(val) {
1409                     None => {
1410                         emit_error!("shuffle index #{} is not a constant", arg_idx);
1411                         None
1412                     }
1413                     Some(idx) if idx >= total_len => {
1414                         emit_error!("shuffle index #{} is out of bounds (limit {})",
1415                                     arg_idx, total_len);
1416                         None
1417                     }
1418                     Some(idx) => Some(C_i32(bcx.ccx(), idx as i32)),
1419                 }
1420             })
1421             .collect();
1422         let indices = match indices {
1423             Some(i) => i,
1424             None => return C_null(llret_ty)
1425         };
1426
1427         return ShuffleVector(bcx, llargs[0], llargs[1], C_vector(&indices))
1428     }
1429
1430     if name == "simd_insert" {
1431         require!(in_elem == arg_tys[2],
1432                  "expected inserted type `{}` (element of input `{}`), found `{}`",
1433                  in_elem, in_ty, arg_tys[2]);
1434         return InsertElement(bcx, llargs[0], llargs[2], llargs[1])
1435     }
1436     if name == "simd_extract" {
1437         require!(ret_ty == in_elem,
1438                  "expected return type `{}` (element of input `{}`), found `{}`",
1439                  in_elem, in_ty, ret_ty);
1440         return ExtractElement(bcx, llargs[0], llargs[1])
1441     }
1442
1443     if name == "simd_cast" {
1444         require_simd!(ret_ty, "return");
1445         let out_len = ret_ty.simd_size(tcx);
1446         require!(in_len == out_len,
1447                  "expected return type with length {} (same as input type `{}`), \
1448                   found `{}` with length {}",
1449                  in_len, in_ty,
1450                  ret_ty, out_len);
1451         // casting cares about nominal type, not just structural type
1452         let out_elem = ret_ty.simd_type(tcx);
1453
1454         if in_elem == out_elem { return llargs[0]; }
1455
1456         enum Style { Float, Int(/* is signed? */ bool), Unsupported }
1457
1458         let (in_style, in_width) = match in_elem.sty {
1459             // vectors of pointer-sized integers should've been
1460             // disallowed before here, so this unwrap is safe.
1461             ty::TyInt(i) => (Style::Int(true), i.bit_width().unwrap()),
1462             ty::TyUint(u) => (Style::Int(false), u.bit_width().unwrap()),
1463             ty::TyFloat(f) => (Style::Float, f.bit_width()),
1464             _ => (Style::Unsupported, 0)
1465         };
1466         let (out_style, out_width) = match out_elem.sty {
1467             ty::TyInt(i) => (Style::Int(true), i.bit_width().unwrap()),
1468             ty::TyUint(u) => (Style::Int(false), u.bit_width().unwrap()),
1469             ty::TyFloat(f) => (Style::Float, f.bit_width()),
1470             _ => (Style::Unsupported, 0)
1471         };
1472
1473         match (in_style, out_style) {
1474             (Style::Int(in_is_signed), Style::Int(_)) => {
1475                 return match in_width.cmp(&out_width) {
1476                     Ordering::Greater => Trunc(bcx, llargs[0], llret_ty),
1477                     Ordering::Equal => llargs[0],
1478                     Ordering::Less => if in_is_signed {
1479                         SExt(bcx, llargs[0], llret_ty)
1480                     } else {
1481                         ZExt(bcx, llargs[0], llret_ty)
1482                     }
1483                 }
1484             }
1485             (Style::Int(in_is_signed), Style::Float) => {
1486                 return if in_is_signed {
1487                     SIToFP(bcx, llargs[0], llret_ty)
1488                 } else {
1489                     UIToFP(bcx, llargs[0], llret_ty)
1490                 }
1491             }
1492             (Style::Float, Style::Int(out_is_signed)) => {
1493                 return if out_is_signed {
1494                     FPToSI(bcx, llargs[0], llret_ty)
1495                 } else {
1496                     FPToUI(bcx, llargs[0], llret_ty)
1497                 }
1498             }
1499             (Style::Float, Style::Float) => {
1500                 return match in_width.cmp(&out_width) {
1501                     Ordering::Greater => FPTrunc(bcx, llargs[0], llret_ty),
1502                     Ordering::Equal => llargs[0],
1503                     Ordering::Less => FPExt(bcx, llargs[0], llret_ty)
1504                 }
1505             }
1506             _ => {/* Unsupported. Fallthrough. */}
1507         }
1508         require!(false,
1509                  "unsupported cast from `{}` with element `{}` to `{}` with element `{}`",
1510                  in_ty, in_elem,
1511                  ret_ty, out_elem);
1512     }
1513     macro_rules! arith {
1514         ($($name: ident: $($($p: ident),* => $call: expr),*;)*) => {
1515             $(
1516                 if name == stringify!($name) {
1517                     match in_elem.sty {
1518                         $(
1519                             $(ty::$p(_))|* => {
1520                                 return $call(bcx, llargs[0], llargs[1], call_debug_location)
1521                             }
1522                             )*
1523                         _ => {},
1524                     }
1525                     require!(false,
1526                              "unsupported operation on `{}` with element `{}`",
1527                              in_ty,
1528                              in_elem)
1529                 })*
1530         }
1531     }
1532     arith! {
1533         simd_add: TyUint, TyInt => Add, TyFloat => FAdd;
1534         simd_sub: TyUint, TyInt => Sub, TyFloat => FSub;
1535         simd_mul: TyUint, TyInt => Mul, TyFloat => FMul;
1536         simd_div: TyFloat => FDiv;
1537         simd_shl: TyUint, TyInt => Shl;
1538         simd_shr: TyUint => LShr, TyInt => AShr;
1539         simd_and: TyUint, TyInt => And;
1540         simd_or: TyUint, TyInt => Or;
1541         simd_xor: TyUint, TyInt => Xor;
1542     }
1543     span_bug!(span, "unknown SIMD intrinsic");
1544 }
1545
1546 // Returns the width of an int TypeVariant, and if it's signed or not
1547 // Returns None if the type is not an integer
1548 fn int_type_width_signed<'tcx>(sty: &ty::TypeVariants<'tcx>, ccx: &CrateContext)
1549         -> Option<(u64, bool)> {
1550     use rustc::ty::{TyInt, TyUint};
1551     match *sty {
1552         TyInt(t) => Some((match t {
1553             ast::IntTy::Is => {
1554                 match &ccx.tcx().sess.target.target.target_pointer_width[..] {
1555                     "16" => 16,
1556                     "32" => 32,
1557                     "64" => 64,
1558                     tws => bug!("Unsupported target word size for isize: {}", tws),
1559                 }
1560             },
1561             ast::IntTy::I8 => 8,
1562             ast::IntTy::I16 => 16,
1563             ast::IntTy::I32 => 32,
1564             ast::IntTy::I64 => 64,
1565         }, true)),
1566         TyUint(t) => Some((match t {
1567             ast::UintTy::Us => {
1568                 match &ccx.tcx().sess.target.target.target_pointer_width[..] {
1569                     "16" => 16,
1570                     "32" => 32,
1571                     "64" => 64,
1572                     tws => bug!("Unsupported target word size for usize: {}", tws),
1573                 }
1574             },
1575             ast::UintTy::U8 => 8,
1576             ast::UintTy::U16 => 16,
1577             ast::UintTy::U32 => 32,
1578             ast::UintTy::U64 => 64,
1579         }, false)),
1580         _ => None,
1581     }
1582 }
1583
1584 // Returns the width of a float TypeVariant
1585 // Returns None if the type is not a float
1586 fn float_type_width<'tcx>(sty: &ty::TypeVariants<'tcx>)
1587         -> Option<u64> {
1588     use rustc::ty::TyFloat;
1589     match *sty {
1590         TyFloat(t) => Some(match t {
1591             ast::FloatTy::F32 => 32,
1592             ast::FloatTy::F64 => 64,
1593         }),
1594         _ => None,
1595     }
1596 }