]> git.lizzy.rs Git - rust.git/blob - src/librustc_trans/trans/intrinsic.rs
trans: Re-enable unwinding on 64-bit MSVC
[rust.git] / src / librustc_trans / trans / intrinsic.rs
1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
4 //
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10
11 #![allow(non_upper_case_globals)]
12
13 use arena::TypedArena;
14 use llvm;
15 use llvm::{SequentiallyConsistent, Acquire, Release, AtomicXchg, ValueRef, TypeKind};
16 use middle::subst;
17 use middle::subst::FnSpace;
18 use trans::adt;
19 use trans::attributes;
20 use trans::base::*;
21 use trans::build::*;
22 use trans::callee;
23 use trans::cleanup;
24 use trans::cleanup::CleanupMethods;
25 use trans::common::*;
26 use trans::datum::*;
27 use trans::debuginfo::DebugLoc;
28 use trans::declare;
29 use trans::expr;
30 use trans::glue;
31 use trans::type_of::*;
32 use trans::type_of;
33 use trans::machine;
34 use trans::machine::llsize_of;
35 use trans::type_::Type;
36 use middle::ty::{self, Ty, HasTypeFlags};
37 use middle::subst::Substs;
38 use syntax::abi::{self, RustIntrinsic};
39 use syntax::ast;
40 use syntax::parse::token;
41
42 pub fn get_simple_intrinsic(ccx: &CrateContext, item: &ast::ForeignItem) -> Option<ValueRef> {
43     let name = match &*item.ident.name.as_str() {
44         "sqrtf32" => "llvm.sqrt.f32",
45         "sqrtf64" => "llvm.sqrt.f64",
46         "powif32" => "llvm.powi.f32",
47         "powif64" => "llvm.powi.f64",
48         "sinf32" => "llvm.sin.f32",
49         "sinf64" => "llvm.sin.f64",
50         "cosf32" => "llvm.cos.f32",
51         "cosf64" => "llvm.cos.f64",
52         "powf32" => "llvm.pow.f32",
53         "powf64" => "llvm.pow.f64",
54         "expf32" => "llvm.exp.f32",
55         "expf64" => "llvm.exp.f64",
56         "exp2f32" => "llvm.exp2.f32",
57         "exp2f64" => "llvm.exp2.f64",
58         "logf32" => "llvm.log.f32",
59         "logf64" => "llvm.log.f64",
60         "log10f32" => "llvm.log10.f32",
61         "log10f64" => "llvm.log10.f64",
62         "log2f32" => "llvm.log2.f32",
63         "log2f64" => "llvm.log2.f64",
64         "fmaf32" => "llvm.fma.f32",
65         "fmaf64" => "llvm.fma.f64",
66         "fabsf32" => "llvm.fabs.f32",
67         "fabsf64" => "llvm.fabs.f64",
68         "copysignf32" => "llvm.copysign.f32",
69         "copysignf64" => "llvm.copysign.f64",
70         "floorf32" => "llvm.floor.f32",
71         "floorf64" => "llvm.floor.f64",
72         "ceilf32" => "llvm.ceil.f32",
73         "ceilf64" => "llvm.ceil.f64",
74         "truncf32" => "llvm.trunc.f32",
75         "truncf64" => "llvm.trunc.f64",
76         "rintf32" => "llvm.rint.f32",
77         "rintf64" => "llvm.rint.f64",
78         "nearbyintf32" => "llvm.nearbyint.f32",
79         "nearbyintf64" => "llvm.nearbyint.f64",
80         "roundf32" => "llvm.round.f32",
81         "roundf64" => "llvm.round.f64",
82         "ctpop8" => "llvm.ctpop.i8",
83         "ctpop16" => "llvm.ctpop.i16",
84         "ctpop32" => "llvm.ctpop.i32",
85         "ctpop64" => "llvm.ctpop.i64",
86         "bswap16" => "llvm.bswap.i16",
87         "bswap32" => "llvm.bswap.i32",
88         "bswap64" => "llvm.bswap.i64",
89         "assume" => "llvm.assume",
90         _ => return None
91     };
92     Some(ccx.get_intrinsic(&name))
93 }
94
95 /// Performs late verification that intrinsics are used correctly. At present,
96 /// the only intrinsic that needs such verification is `transmute`.
97 pub fn check_intrinsics(ccx: &CrateContext) {
98     let mut last_failing_id = None;
99     for transmute_restriction in ccx.tcx().transmute_restrictions.borrow().iter() {
100         // Sometimes, a single call to transmute will push multiple
101         // type pairs to test in order to exhaustively test the
102         // possibility around a type parameter. If one of those fails,
103         // there is no sense reporting errors on the others.
104         if last_failing_id == Some(transmute_restriction.id) {
105             continue;
106         }
107
108         debug!("transmute_restriction: {:?}", transmute_restriction);
109
110         assert!(!transmute_restriction.substituted_from.has_param_types());
111         assert!(!transmute_restriction.substituted_to.has_param_types());
112
113         let llfromtype = type_of::sizing_type_of(ccx,
114                                                  transmute_restriction.substituted_from);
115         let lltotype = type_of::sizing_type_of(ccx,
116                                                transmute_restriction.substituted_to);
117         let from_type_size = machine::llbitsize_of_real(ccx, llfromtype);
118         let to_type_size = machine::llbitsize_of_real(ccx, lltotype);
119         if from_type_size != to_type_size {
120             last_failing_id = Some(transmute_restriction.id);
121
122             if transmute_restriction.original_from != transmute_restriction.substituted_from {
123                 ccx.sess().span_err(
124                     transmute_restriction.span,
125                     &format!("transmute called on types with potentially different sizes: \
126                               {} (could be {} bit{}) to {} (could be {} bit{})",
127                              transmute_restriction.original_from,
128                              from_type_size as usize,
129                              if from_type_size == 1 {""} else {"s"},
130                              transmute_restriction.original_to,
131                              to_type_size as usize,
132                              if to_type_size == 1 {""} else {"s"}));
133             } else {
134                 ccx.sess().span_err(
135                     transmute_restriction.span,
136                     &format!("transmute called on types with different sizes: \
137                               {} ({} bit{}) to {} ({} bit{})",
138                              transmute_restriction.original_from,
139                              from_type_size as usize,
140                              if from_type_size == 1 {""} else {"s"},
141                              transmute_restriction.original_to,
142                              to_type_size as usize,
143                              if to_type_size == 1 {""} else {"s"}));
144             }
145         }
146     }
147     ccx.sess().abort_if_errors();
148 }
149
150 /// Remember to add all intrinsics here, in librustc_typeck/check/mod.rs,
151 /// and in libcore/intrinsics.rs; if you need access to any llvm intrinsics,
152 /// add them to librustc_trans/trans/context.rs
153 pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
154                                             node: ast::NodeId,
155                                             callee_ty: Ty<'tcx>,
156                                             cleanup_scope: cleanup::CustomScopeIndex,
157                                             args: callee::CallArgs<'a, 'tcx>,
158                                             dest: expr::Dest,
159                                             substs: subst::Substs<'tcx>,
160                                             call_info: NodeIdAndSpan)
161                                             -> Result<'blk, 'tcx> {
162     let fcx = bcx.fcx;
163     let ccx = fcx.ccx;
164     let tcx = bcx.tcx();
165
166     let _icx = push_ctxt("trans_intrinsic_call");
167
168     let ret_ty = match callee_ty.sty {
169         ty::TyBareFn(_, ref f) => {
170             bcx.tcx().erase_late_bound_regions(&f.sig.output())
171         }
172         _ => panic!("expected bare_fn in trans_intrinsic_call")
173     };
174     let foreign_item = tcx.map.expect_foreign_item(node);
175     let name = foreign_item.ident.name.as_str();
176
177     // For `transmute` we can just trans the input expr directly into dest
178     if name == "transmute" {
179         let llret_ty = type_of::type_of(ccx, ret_ty.unwrap());
180         match args {
181             callee::ArgExprs(arg_exprs) => {
182                 assert_eq!(arg_exprs.len(), 1);
183
184                 let (in_type, out_type) = (*substs.types.get(FnSpace, 0),
185                                            *substs.types.get(FnSpace, 1));
186                 let llintype = type_of::type_of(ccx, in_type);
187                 let llouttype = type_of::type_of(ccx, out_type);
188
189                 let in_type_size = machine::llbitsize_of_real(ccx, llintype);
190                 let out_type_size = machine::llbitsize_of_real(ccx, llouttype);
191
192                 // This should be caught by the intrinsicck pass
193                 assert_eq!(in_type_size, out_type_size);
194
195                 let nonpointer_nonaggregate = |llkind: TypeKind| -> bool {
196                     use llvm::TypeKind::*;
197                     match llkind {
198                         Half | Float | Double | X86_FP80 | FP128 |
199                             PPC_FP128 | Integer | Vector | X86_MMX => true,
200                         _ => false
201                     }
202                 };
203
204                 // An approximation to which types can be directly cast via
205                 // LLVM's bitcast.  This doesn't cover pointer -> pointer casts,
206                 // but does, importantly, cover SIMD types.
207                 let in_kind = llintype.kind();
208                 let ret_kind = llret_ty.kind();
209                 let bitcast_compatible =
210                     (nonpointer_nonaggregate(in_kind) && nonpointer_nonaggregate(ret_kind)) || {
211                         in_kind == TypeKind::Pointer && ret_kind == TypeKind::Pointer
212                     };
213
214                 let dest = if bitcast_compatible {
215                     // if we're here, the type is scalar-like (a primitive, a
216                     // SIMD type or a pointer), and so can be handled as a
217                     // by-value ValueRef and can also be directly bitcast to the
218                     // target type.  Doing this special case makes conversions
219                     // like `u32x4` -> `u64x2` much nicer for LLVM and so more
220                     // efficient (these are done efficiently implicitly in C
221                     // with the `__m128i` type and so this means Rust doesn't
222                     // lose out there).
223                     let expr = &*arg_exprs[0];
224                     let datum = unpack_datum!(bcx, expr::trans(bcx, expr));
225                     let datum = unpack_datum!(bcx, datum.to_rvalue_datum(bcx, "transmute_temp"));
226                     let val = if datum.kind.is_by_ref() {
227                         load_ty(bcx, datum.val, datum.ty)
228                     } else {
229                         from_arg_ty(bcx, datum.val, datum.ty)
230                     };
231
232                     let cast_val = BitCast(bcx, val, llret_ty);
233
234                     match dest {
235                         expr::SaveIn(d) => {
236                             // this often occurs in a sequence like `Store(val,
237                             // d); val2 = Load(d)`, so disappears easily.
238                             Store(bcx, cast_val, d);
239                         }
240                         expr::Ignore => {}
241                     }
242                     dest
243                 } else {
244                     // The types are too complicated to do with a by-value
245                     // bitcast, so pointer cast instead. We need to cast the
246                     // dest so the types work out.
247                     let dest = match dest {
248                         expr::SaveIn(d) => expr::SaveIn(PointerCast(bcx, d, llintype.ptr_to())),
249                         expr::Ignore => expr::Ignore
250                     };
251                     bcx = expr::trans_into(bcx, &*arg_exprs[0], dest);
252                     dest
253                 };
254
255                 fcx.scopes.borrow_mut().last_mut().unwrap().drop_non_lifetime_clean();
256                 fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope);
257
258                 return match dest {
259                     expr::SaveIn(d) => Result::new(bcx, d),
260                     expr::Ignore => Result::new(bcx, C_undef(llret_ty.ptr_to()))
261                 };
262
263             }
264
265             _ => {
266                 ccx.sess().bug("expected expr as argument for transmute");
267             }
268         }
269     }
270
271     // For `move_val_init` we can evaluate the destination address
272     // (the first argument) and then trans the source value (the
273     // second argument) directly into the resulting destination
274     // address.
275     if name == "move_val_init" {
276         if let callee::ArgExprs(ref exprs) = args {
277             let (dest_expr, source_expr) = if exprs.len() != 2 {
278                 ccx.sess().bug("expected two exprs as arguments for `move_val_init` intrinsic");
279             } else {
280                 (&exprs[0], &exprs[1])
281             };
282
283             // evaluate destination address
284             let dest_datum = unpack_datum!(bcx, expr::trans(bcx, dest_expr));
285             let dest_datum = unpack_datum!(
286                 bcx, dest_datum.to_rvalue_datum(bcx, "arg"));
287             let dest_datum = unpack_datum!(
288                 bcx, dest_datum.to_appropriate_datum(bcx));
289
290             // `expr::trans_into(bcx, expr, dest)` is equiv to
291             //
292             //    `trans(bcx, expr).store_to_dest(dest)`,
293             //
294             // which for `dest == expr::SaveIn(addr)`, is equivalent to:
295             //
296             //    `trans(bcx, expr).store_to(bcx, addr)`.
297             let lldest = expr::Dest::SaveIn(dest_datum.val);
298             bcx = expr::trans_into(bcx, source_expr, lldest);
299
300             let llresult = C_nil(ccx);
301             fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope);
302
303             return Result::new(bcx, llresult);
304         } else {
305             ccx.sess().bug("expected two exprs as arguments for `move_val_init` intrinsic");
306         }
307     }
308
309     let call_debug_location = DebugLoc::At(call_info.id, call_info.span);
310
311     // For `try` we need some custom control flow
312     if &name[..] == "try" {
313         if let callee::ArgExprs(ref exprs) = args {
314             let (func, data) = if exprs.len() != 2 {
315                 ccx.sess().bug("expected two exprs as arguments for \
316                                 `try` intrinsic");
317             } else {
318                 (&exprs[0], &exprs[1])
319             };
320
321             // translate arguments
322             let func = unpack_datum!(bcx, expr::trans(bcx, func));
323             let func = unpack_datum!(bcx, func.to_rvalue_datum(bcx, "func"));
324             let data = unpack_datum!(bcx, expr::trans(bcx, data));
325             let data = unpack_datum!(bcx, data.to_rvalue_datum(bcx, "data"));
326
327             let dest = match dest {
328                 expr::SaveIn(d) => d,
329                 expr::Ignore => alloc_ty(bcx, tcx.mk_mut_ptr(tcx.types.i8),
330                                          "try_result"),
331             };
332
333             // do the invoke
334             bcx = try_intrinsic(bcx, func.val, data.val, dest,
335                                 call_debug_location);
336
337             fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope);
338             return Result::new(bcx, dest);
339         } else {
340             ccx.sess().bug("expected two exprs as arguments for \
341                             `try` intrinsic");
342         }
343     }
344
345     // Push the arguments.
346     let mut llargs = Vec::new();
347     bcx = callee::trans_args(bcx,
348                              args,
349                              callee_ty,
350                              &mut llargs,
351                              cleanup::CustomScope(cleanup_scope),
352                              false,
353                              RustIntrinsic);
354
355     fcx.scopes.borrow_mut().last_mut().unwrap().drop_non_lifetime_clean();
356
357     // These are the only intrinsic functions that diverge.
358     if name == "abort" {
359         let llfn = ccx.get_intrinsic(&("llvm.trap"));
360         Call(bcx, llfn, &[], None, call_debug_location);
361         fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope);
362         Unreachable(bcx);
363         return Result::new(bcx, C_undef(Type::nil(ccx).ptr_to()));
364     } else if &name[..] == "unreachable" {
365         fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope);
366         Unreachable(bcx);
367         return Result::new(bcx, C_nil(ccx));
368     }
369
370     let ret_ty = match ret_ty {
371         ty::FnConverging(ret_ty) => ret_ty,
372         ty::FnDiverging => unreachable!()
373     };
374
375     let llret_ty = type_of::type_of(ccx, ret_ty);
376
377     // Get location to store the result. If the user does
378     // not care about the result, just make a stack slot
379     let llresult = match dest {
380         expr::SaveIn(d) => d,
381         expr::Ignore => {
382             if !type_is_zero_size(ccx, ret_ty) {
383                 alloc_ty(bcx, ret_ty, "intrinsic_result")
384             } else {
385                 C_undef(llret_ty.ptr_to())
386             }
387         }
388     };
389
390     let simple = get_simple_intrinsic(ccx, &*foreign_item);
391     let llval = match (simple, &*name) {
392         (Some(llfn), _) => {
393             Call(bcx, llfn, &llargs, None, call_debug_location)
394         }
395         (_, "breakpoint") => {
396             let llfn = ccx.get_intrinsic(&("llvm.debugtrap"));
397             Call(bcx, llfn, &[], None, call_debug_location)
398         }
399         (_, "size_of") => {
400             let tp_ty = *substs.types.get(FnSpace, 0);
401             let lltp_ty = type_of::type_of(ccx, tp_ty);
402             C_uint(ccx, machine::llsize_of_alloc(ccx, lltp_ty))
403         }
404         (_, "size_of_val") => {
405             let tp_ty = *substs.types.get(FnSpace, 0);
406             if !type_is_sized(tcx, tp_ty) {
407                 let (llsize, _) = glue::size_and_align_of_dst(bcx, tp_ty, llargs[1]);
408                 llsize
409             } else {
410                 let lltp_ty = type_of::type_of(ccx, tp_ty);
411                 C_uint(ccx, machine::llsize_of_alloc(ccx, lltp_ty))
412             }
413         }
414         (_, "min_align_of") => {
415             let tp_ty = *substs.types.get(FnSpace, 0);
416             C_uint(ccx, type_of::align_of(ccx, tp_ty))
417         }
418         (_, "min_align_of_val") => {
419             let tp_ty = *substs.types.get(FnSpace, 0);
420             if !type_is_sized(tcx, tp_ty) {
421                 let (_, llalign) = glue::size_and_align_of_dst(bcx, tp_ty, llargs[1]);
422                 llalign
423             } else {
424                 C_uint(ccx, type_of::align_of(ccx, tp_ty))
425             }
426         }
427         (_, "pref_align_of") => {
428             let tp_ty = *substs.types.get(FnSpace, 0);
429             let lltp_ty = type_of::type_of(ccx, tp_ty);
430             C_uint(ccx, machine::llalign_of_pref(ccx, lltp_ty))
431         }
432         (_, "drop_in_place") => {
433             let tp_ty = *substs.types.get(FnSpace, 0);
434             let ptr = if type_is_sized(tcx, tp_ty) {
435                 llargs[0]
436             } else {
437                 let scratch = rvalue_scratch_datum(bcx, tp_ty, "tmp");
438                 Store(bcx, llargs[0], expr::get_dataptr(bcx, scratch.val));
439                 Store(bcx, llargs[1], expr::get_len(bcx, scratch.val));
440                 fcx.schedule_lifetime_end(cleanup::CustomScope(cleanup_scope), scratch.val);
441                 scratch.val
442             };
443             glue::drop_ty(bcx, ptr, tp_ty, call_debug_location);
444             C_nil(ccx)
445         }
446         (_, "type_name") => {
447             let tp_ty = *substs.types.get(FnSpace, 0);
448             let ty_name = token::intern_and_get_ident(&tp_ty.to_string());
449             C_str_slice(ccx, ty_name)
450         }
451         (_, "type_id") => {
452             let hash = ccx.tcx().hash_crate_independent(*substs.types.get(FnSpace, 0),
453                                                         &ccx.link_meta().crate_hash);
454             C_u64(ccx, hash)
455         }
456         (_, "init_dropped") => {
457             let tp_ty = *substs.types.get(FnSpace, 0);
458             if !return_type_is_void(ccx, tp_ty) {
459                 drop_done_fill_mem(bcx, llresult, tp_ty);
460             }
461             C_nil(ccx)
462         }
463         (_, "init") => {
464             let tp_ty = *substs.types.get(FnSpace, 0);
465             if !return_type_is_void(ccx, tp_ty) {
466                 // Just zero out the stack slot. (See comment on base::memzero for explanation)
467                 init_zero_mem(bcx, llresult, tp_ty);
468             }
469             C_nil(ccx)
470         }
471         // Effectively no-ops
472         (_, "uninit") | (_, "forget") => {
473             C_nil(ccx)
474         }
475         (_, "needs_drop") => {
476             let tp_ty = *substs.types.get(FnSpace, 0);
477
478             C_bool(ccx, bcx.fcx.type_needs_drop(tp_ty))
479         }
480         (_, "offset") => {
481             let ptr = llargs[0];
482             let offset = llargs[1];
483             InBoundsGEP(bcx, ptr, &[offset])
484         }
485         (_, "arith_offset") => {
486             let ptr = llargs[0];
487             let offset = llargs[1];
488             GEP(bcx, ptr, &[offset])
489         }
490
491         (_, "copy_nonoverlapping") => {
492             copy_intrinsic(bcx,
493                            false,
494                            false,
495                            *substs.types.get(FnSpace, 0),
496                            llargs[1],
497                            llargs[0],
498                            llargs[2],
499                            call_debug_location)
500         }
501         (_, "copy") => {
502             copy_intrinsic(bcx,
503                            true,
504                            false,
505                            *substs.types.get(FnSpace, 0),
506                            llargs[1],
507                            llargs[0],
508                            llargs[2],
509                            call_debug_location)
510         }
511         (_, "write_bytes") => {
512             memset_intrinsic(bcx,
513                              false,
514                              *substs.types.get(FnSpace, 0),
515                              llargs[0],
516                              llargs[1],
517                              llargs[2],
518                              call_debug_location)
519         }
520
521         (_, "volatile_copy_nonoverlapping_memory") => {
522             copy_intrinsic(bcx,
523                            false,
524                            true,
525                            *substs.types.get(FnSpace, 0),
526                            llargs[0],
527                            llargs[1],
528                            llargs[2],
529                            call_debug_location)
530         }
531         (_, "volatile_copy_memory") => {
532             copy_intrinsic(bcx,
533                            true,
534                            true,
535                            *substs.types.get(FnSpace, 0),
536                            llargs[0],
537                            llargs[1],
538                            llargs[2],
539                            call_debug_location)
540         }
541         (_, "volatile_set_memory") => {
542             memset_intrinsic(bcx,
543                              true,
544                              *substs.types.get(FnSpace, 0),
545                              llargs[0],
546                              llargs[1],
547                              llargs[2],
548                              call_debug_location)
549         }
550         (_, "volatile_load") => {
551             let tp_ty = *substs.types.get(FnSpace, 0);
552             let ptr = to_arg_ty_ptr(bcx, llargs[0], tp_ty);
553             let load = VolatileLoad(bcx, ptr);
554             unsafe {
555                 llvm::LLVMSetAlignment(load, type_of::align_of(ccx, tp_ty));
556             }
557             to_arg_ty(bcx, load, tp_ty)
558         },
559         (_, "volatile_store") => {
560             let tp_ty = *substs.types.get(FnSpace, 0);
561             let ptr = to_arg_ty_ptr(bcx, llargs[0], tp_ty);
562             let val = from_arg_ty(bcx, llargs[1], tp_ty);
563             let store = VolatileStore(bcx, val, ptr);
564             unsafe {
565                 llvm::LLVMSetAlignment(store, type_of::align_of(ccx, tp_ty));
566             }
567             C_nil(ccx)
568         },
569
570         (_, "ctlz8") => count_zeros_intrinsic(bcx,
571                                               "llvm.ctlz.i8",
572                                               llargs[0],
573                                               call_debug_location),
574         (_, "ctlz16") => count_zeros_intrinsic(bcx,
575                                                "llvm.ctlz.i16",
576                                                llargs[0],
577                                                call_debug_location),
578         (_, "ctlz32") => count_zeros_intrinsic(bcx,
579                                                "llvm.ctlz.i32",
580                                                llargs[0],
581                                                call_debug_location),
582         (_, "ctlz64") => count_zeros_intrinsic(bcx,
583                                                "llvm.ctlz.i64",
584                                                llargs[0],
585                                                call_debug_location),
586         (_, "cttz8") => count_zeros_intrinsic(bcx,
587                                               "llvm.cttz.i8",
588                                               llargs[0],
589                                               call_debug_location),
590         (_, "cttz16") => count_zeros_intrinsic(bcx,
591                                                "llvm.cttz.i16",
592                                                llargs[0],
593                                                call_debug_location),
594         (_, "cttz32") => count_zeros_intrinsic(bcx,
595                                                "llvm.cttz.i32",
596                                                llargs[0],
597                                                call_debug_location),
598         (_, "cttz64") => count_zeros_intrinsic(bcx,
599                                                "llvm.cttz.i64",
600                                                llargs[0],
601                                                call_debug_location),
602
603         (_, "i8_add_with_overflow") =>
604             with_overflow_intrinsic(bcx,
605                                     "llvm.sadd.with.overflow.i8",
606                                     ret_ty,
607                                     llargs[0],
608                                     llargs[1],
609                                     call_debug_location),
610         (_, "i16_add_with_overflow") =>
611             with_overflow_intrinsic(bcx,
612                                     "llvm.sadd.with.overflow.i16",
613                                     ret_ty,
614                                     llargs[0],
615                                     llargs[1],
616                                     call_debug_location),
617         (_, "i32_add_with_overflow") =>
618             with_overflow_intrinsic(bcx,
619                                     "llvm.sadd.with.overflow.i32",
620                                     ret_ty,
621                                     llargs[0],
622                                     llargs[1],
623                                     call_debug_location),
624         (_, "i64_add_with_overflow") =>
625             with_overflow_intrinsic(bcx,
626                                     "llvm.sadd.with.overflow.i64",
627                                     ret_ty,
628                                     llargs[0],
629                                     llargs[1],
630                                     call_debug_location),
631
632         (_, "u8_add_with_overflow") =>
633             with_overflow_intrinsic(bcx,
634                                     "llvm.uadd.with.overflow.i8",
635                                     ret_ty,
636                                     llargs[0],
637                                     llargs[1],
638                                     call_debug_location),
639         (_, "u16_add_with_overflow") =>
640             with_overflow_intrinsic(bcx,
641                                     "llvm.uadd.with.overflow.i16",
642                                     ret_ty,
643                                     llargs[0],
644                                     llargs[1],
645                                     call_debug_location),
646         (_, "u32_add_with_overflow") =>
647             with_overflow_intrinsic(bcx,
648                                     "llvm.uadd.with.overflow.i32",
649                                     ret_ty,
650                                     llargs[0],
651                                     llargs[1],
652                                     call_debug_location),
653         (_, "u64_add_with_overflow") =>
654             with_overflow_intrinsic(bcx,
655                                     "llvm.uadd.with.overflow.i64",
656                                     ret_ty,
657                                     llargs[0],
658                                     llargs[1],
659                                     call_debug_location),
660         (_, "i8_sub_with_overflow") =>
661             with_overflow_intrinsic(bcx,
662                                     "llvm.ssub.with.overflow.i8",
663                                     ret_ty,
664                                     llargs[0],
665                                     llargs[1],
666                                     call_debug_location),
667         (_, "i16_sub_with_overflow") =>
668             with_overflow_intrinsic(bcx,
669                                     "llvm.ssub.with.overflow.i16",
670                                     ret_ty,
671                                     llargs[0],
672                                     llargs[1],
673                                     call_debug_location),
674         (_, "i32_sub_with_overflow") =>
675             with_overflow_intrinsic(bcx,
676                                     "llvm.ssub.with.overflow.i32",
677                                     ret_ty,
678                                     llargs[0],
679                                     llargs[1],
680                                     call_debug_location),
681         (_, "i64_sub_with_overflow") =>
682             with_overflow_intrinsic(bcx,
683                                     "llvm.ssub.with.overflow.i64",
684                                     ret_ty,
685                                     llargs[0],
686                                     llargs[1],
687                                     call_debug_location),
688         (_, "u8_sub_with_overflow") =>
689             with_overflow_intrinsic(bcx,
690                                     "llvm.usub.with.overflow.i8",
691                                     ret_ty,
692                                     llargs[0],
693                                     llargs[1],
694                                     call_debug_location),
695         (_, "u16_sub_with_overflow") =>
696             with_overflow_intrinsic(bcx,
697                                     "llvm.usub.with.overflow.i16",
698                                     ret_ty,
699                                     llargs[0],
700                                     llargs[1],
701                                     call_debug_location),
702         (_, "u32_sub_with_overflow") =>
703             with_overflow_intrinsic(bcx,
704                                     "llvm.usub.with.overflow.i32",
705                                     ret_ty,
706                                     llargs[0],
707                                     llargs[1],
708                                     call_debug_location),
709         (_, "u64_sub_with_overflow") =>
710             with_overflow_intrinsic(bcx,
711                                     "llvm.usub.with.overflow.i64",
712                                     ret_ty,
713                                     llargs[0],
714                                     llargs[1],
715                                     call_debug_location),
716         (_, "i8_mul_with_overflow") =>
717             with_overflow_intrinsic(bcx,
718                                     "llvm.smul.with.overflow.i8",
719                                     ret_ty,
720                                     llargs[0],
721                                     llargs[1],
722                                     call_debug_location),
723         (_, "i16_mul_with_overflow") =>
724             with_overflow_intrinsic(bcx,
725                                     "llvm.smul.with.overflow.i16",
726                                     ret_ty,
727                                     llargs[0],
728                                     llargs[1],
729                                     call_debug_location),
730         (_, "i32_mul_with_overflow") =>
731             with_overflow_intrinsic(bcx,
732                                     "llvm.smul.with.overflow.i32",
733                                     ret_ty,
734                                     llargs[0],
735                                     llargs[1],
736                                     call_debug_location),
737         (_, "i64_mul_with_overflow") =>
738             with_overflow_intrinsic(bcx,
739                                     "llvm.smul.with.overflow.i64",
740                                     ret_ty,
741                                     llargs[0],
742                                     llargs[1],
743                                     call_debug_location),
744         (_, "u8_mul_with_overflow") =>
745             with_overflow_intrinsic(bcx,
746                                     "llvm.umul.with.overflow.i8",
747                                     ret_ty,
748                                     llargs[0],
749                                     llargs[1],
750                                     call_debug_location),
751         (_, "u16_mul_with_overflow") =>
752             with_overflow_intrinsic(bcx,
753                                     "llvm.umul.with.overflow.i16",
754                                     ret_ty,
755                                     llargs[0],
756                                     llargs[1],
757                                     call_debug_location),
758         (_, "u32_mul_with_overflow") =>
759             with_overflow_intrinsic(bcx,
760                                     "llvm.umul.with.overflow.i32",
761                                     ret_ty,
762                                     llargs[0],
763                                     llargs[1],
764                                     call_debug_location),
765         (_, "u64_mul_with_overflow") =>
766             with_overflow_intrinsic(bcx,
767                                     "llvm.umul.with.overflow.i64",
768                                     ret_ty,
769                                     llargs[0],
770                                     llargs[1],
771                                     call_debug_location),
772
773         (_, "unchecked_udiv") => UDiv(bcx, llargs[0], llargs[1], call_debug_location),
774         (_, "unchecked_sdiv") => SDiv(bcx, llargs[0], llargs[1], call_debug_location),
775         (_, "unchecked_urem") => URem(bcx, llargs[0], llargs[1], call_debug_location),
776         (_, "unchecked_srem") => SRem(bcx, llargs[0], llargs[1], call_debug_location),
777
778         (_, "overflowing_add") => Add(bcx, llargs[0], llargs[1], call_debug_location),
779         (_, "overflowing_sub") => Sub(bcx, llargs[0], llargs[1], call_debug_location),
780         (_, "overflowing_mul") => Mul(bcx, llargs[0], llargs[1], call_debug_location),
781
782         (_, "return_address") => {
783             if !fcx.caller_expects_out_pointer {
784                 tcx.sess.span_err(call_info.span,
785                                   "invalid use of `return_address` intrinsic: function \
786                                    does not use out pointer");
787                 C_null(Type::i8p(ccx))
788             } else {
789                 PointerCast(bcx, llvm::get_param(fcx.llfn, 0), Type::i8p(ccx))
790             }
791         }
792
793         (_, "discriminant_value") => {
794             let val_ty = substs.types.get(FnSpace, 0);
795             match val_ty.sty {
796                 ty::TyEnum(..) => {
797                     let repr = adt::represent_type(ccx, *val_ty);
798                     adt::trans_get_discr(bcx, &*repr, llargs[0], Some(llret_ty))
799                 }
800                 _ => C_null(llret_ty)
801             }
802         }
803
804         // This requires that atomic intrinsics follow a specific naming pattern:
805         // "atomic_<operation>[_<ordering>]", and no ordering means SeqCst
806         (_, name) if name.starts_with("atomic_") => {
807             let split: Vec<&str> = name.split('_').collect();
808             assert!(split.len() >= 2, "Atomic intrinsic not correct format");
809
810             let order = if split.len() == 2 {
811                 llvm::SequentiallyConsistent
812             } else {
813                 match split[2] {
814                     "unordered" => llvm::Unordered,
815                     "relaxed" => llvm::Monotonic,
816                     "acq"     => llvm::Acquire,
817                     "rel"     => llvm::Release,
818                     "acqrel"  => llvm::AcquireRelease,
819                     _ => ccx.sess().fatal("unknown ordering in atomic intrinsic")
820                 }
821             };
822
823             match split[1] {
824                 "cxchg" => {
825                     // See include/llvm/IR/Instructions.h for their implementation
826                     // of this, I assume that it's good enough for us to use for
827                     // now.
828                     let strongest_failure_ordering = match order {
829                         llvm::NotAtomic | llvm::Unordered =>
830                             ccx.sess().fatal("cmpxchg must be atomic"),
831
832                         llvm::Monotonic | llvm::Release =>
833                             llvm::Monotonic,
834
835                         llvm::Acquire | llvm::AcquireRelease =>
836                             llvm::Acquire,
837
838                         llvm::SequentiallyConsistent =>
839                             llvm::SequentiallyConsistent
840                     };
841
842                     let tp_ty = *substs.types.get(FnSpace, 0);
843                     let ptr = to_arg_ty_ptr(bcx, llargs[0], tp_ty);
844                     let cmp = from_arg_ty(bcx, llargs[1], tp_ty);
845                     let src = from_arg_ty(bcx, llargs[2], tp_ty);
846                     let res = AtomicCmpXchg(bcx, ptr, cmp, src, order,
847                                             strongest_failure_ordering);
848                     ExtractValue(bcx, res, 0)
849                 }
850
851                 "load" => {
852                     let tp_ty = *substs.types.get(FnSpace, 0);
853                     let ptr = to_arg_ty_ptr(bcx, llargs[0], tp_ty);
854                     to_arg_ty(bcx, AtomicLoad(bcx, ptr, order), tp_ty)
855                 }
856                 "store" => {
857                     let tp_ty = *substs.types.get(FnSpace, 0);
858                     let ptr = to_arg_ty_ptr(bcx, llargs[0], tp_ty);
859                     let val = from_arg_ty(bcx, llargs[1], tp_ty);
860                     AtomicStore(bcx, val, ptr, order);
861                     C_nil(ccx)
862                 }
863
864                 "fence" => {
865                     AtomicFence(bcx, order, llvm::CrossThread);
866                     C_nil(ccx)
867                 }
868
869                 "singlethreadfence" => {
870                     AtomicFence(bcx, order, llvm::SingleThread);
871                     C_nil(ccx)
872                 }
873
874                 // These are all AtomicRMW ops
875                 op => {
876                     let atom_op = match op {
877                         "xchg"  => llvm::AtomicXchg,
878                         "xadd"  => llvm::AtomicAdd,
879                         "xsub"  => llvm::AtomicSub,
880                         "and"   => llvm::AtomicAnd,
881                         "nand"  => llvm::AtomicNand,
882                         "or"    => llvm::AtomicOr,
883                         "xor"   => llvm::AtomicXor,
884                         "max"   => llvm::AtomicMax,
885                         "min"   => llvm::AtomicMin,
886                         "umax"  => llvm::AtomicUMax,
887                         "umin"  => llvm::AtomicUMin,
888                         _ => ccx.sess().fatal("unknown atomic operation")
889                     };
890
891                     let tp_ty = *substs.types.get(FnSpace, 0);
892                     let ptr = to_arg_ty_ptr(bcx, llargs[0], tp_ty);
893                     let val = from_arg_ty(bcx, llargs[1], tp_ty);
894                     AtomicRMW(bcx, atom_op, ptr, val, order)
895                 }
896             }
897
898         }
899
900         (_, _) => ccx.sess().span_bug(foreign_item.span, "unknown intrinsic")
901     };
902
903     if val_ty(llval) != Type::void(ccx) &&
904        machine::llsize_of_alloc(ccx, val_ty(llval)) != 0 {
905         store_ty(bcx, llval, llresult, ret_ty);
906     }
907
908     // If we made a temporary stack slot, let's clean it up
909     match dest {
910         expr::Ignore => {
911             bcx = glue::drop_ty(bcx, llresult, ret_ty, call_debug_location);
912         }
913         expr::SaveIn(_) => {}
914     }
915
916     fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope);
917
918     Result::new(bcx, llresult)
919 }
920
921 fn copy_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
922                               allow_overlap: bool,
923                               volatile: bool,
924                               tp_ty: Ty<'tcx>,
925                               dst: ValueRef,
926                               src: ValueRef,
927                               count: ValueRef,
928                               call_debug_location: DebugLoc)
929                               -> ValueRef {
930     let ccx = bcx.ccx();
931     let lltp_ty = type_of::type_of(ccx, tp_ty);
932     let align = C_i32(ccx, type_of::align_of(ccx, tp_ty) as i32);
933     let size = machine::llsize_of(ccx, lltp_ty);
934     let int_size = machine::llbitsize_of_real(ccx, ccx.int_type());
935     let name = if allow_overlap {
936         if int_size == 32 {
937             "llvm.memmove.p0i8.p0i8.i32"
938         } else {
939             "llvm.memmove.p0i8.p0i8.i64"
940         }
941     } else {
942         if int_size == 32 {
943             "llvm.memcpy.p0i8.p0i8.i32"
944         } else {
945             "llvm.memcpy.p0i8.p0i8.i64"
946         }
947     };
948
949     let dst_ptr = PointerCast(bcx, dst, Type::i8p(ccx));
950     let src_ptr = PointerCast(bcx, src, Type::i8p(ccx));
951     let llfn = ccx.get_intrinsic(&name);
952
953     Call(bcx,
954          llfn,
955          &[dst_ptr,
956            src_ptr,
957            Mul(bcx, size, count, DebugLoc::None),
958            align,
959            C_bool(ccx, volatile)],
960          None,
961          call_debug_location)
962 }
963
964 fn memset_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
965                                 volatile: bool,
966                                 tp_ty: Ty<'tcx>,
967                                 dst: ValueRef,
968                                 val: ValueRef,
969                                 count: ValueRef,
970                                 call_debug_location: DebugLoc)
971                                 -> ValueRef {
972     let ccx = bcx.ccx();
973     let lltp_ty = type_of::type_of(ccx, tp_ty);
974     let align = C_i32(ccx, type_of::align_of(ccx, tp_ty) as i32);
975     let size = machine::llsize_of(ccx, lltp_ty);
976     let name = if machine::llbitsize_of_real(ccx, ccx.int_type()) == 32 {
977         "llvm.memset.p0i8.i32"
978     } else {
979         "llvm.memset.p0i8.i64"
980     };
981
982     let dst_ptr = PointerCast(bcx, dst, Type::i8p(ccx));
983     let llfn = ccx.get_intrinsic(&name);
984
985     Call(bcx,
986          llfn,
987          &[dst_ptr,
988            val,
989            Mul(bcx, size, count, DebugLoc::None),
990            align,
991            C_bool(ccx, volatile)],
992          None,
993          call_debug_location)
994 }
995
996 fn count_zeros_intrinsic(bcx: Block,
997                          name: &'static str,
998                          val: ValueRef,
999                          call_debug_location: DebugLoc)
1000                          -> ValueRef {
1001     let y = C_bool(bcx.ccx(), false);
1002     let llfn = bcx.ccx().get_intrinsic(&name);
1003     Call(bcx, llfn, &[val, y], None, call_debug_location)
1004 }
1005
1006 fn with_overflow_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1007                                        name: &'static str,
1008                                        t: Ty<'tcx>,
1009                                        a: ValueRef,
1010                                        b: ValueRef,
1011                                        call_debug_location: DebugLoc)
1012                                        -> ValueRef {
1013     let llfn = bcx.ccx().get_intrinsic(&name);
1014
1015     // Convert `i1` to a `bool`, and write it to the out parameter
1016     let val = Call(bcx, llfn, &[a, b], None, call_debug_location);
1017     let result = ExtractValue(bcx, val, 0);
1018     let overflow = ZExt(bcx, ExtractValue(bcx, val, 1), Type::bool(bcx.ccx()));
1019     let ret = C_undef(type_of::type_of(bcx.ccx(), t));
1020     let ret = InsertValue(bcx, ret, result, 0);
1021     let ret = InsertValue(bcx, ret, overflow, 1);
1022     if !arg_is_indirect(bcx.ccx(), t) {
1023         let tmp = alloc_ty(bcx, t, "tmp");
1024         Store(bcx, ret, tmp);
1025         load_ty(bcx, tmp, t)
1026     } else {
1027         ret
1028     }
1029 }
1030
1031 fn try_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1032                              func: ValueRef,
1033                              data: ValueRef,
1034                              dest: ValueRef,
1035                              dloc: DebugLoc) -> Block<'blk, 'tcx> {
1036     if bcx.sess().no_landing_pads() {
1037         Call(bcx, func, &[data], None, dloc);
1038         Store(bcx, C_null(Type::i8p(bcx.ccx())), dest);
1039         bcx
1040     } else if wants_msvc_seh(bcx.sess()) {
1041         trans_msvc_try(bcx, func, data, dest, dloc)
1042     } else {
1043         trans_gnu_try(bcx, func, data, dest, dloc)
1044     }
1045 }
1046
1047 // MSVC's definition of the `rust_try` function. The exact implementation here
1048 // is a little different than the GNU (standard) version below, not only because
1049 // of the personality function but also because of the other fiddly bits about
1050 // SEH. LLVM also currently requires us to structure this a very particular way
1051 // as explained below.
1052 //
1053 // Like with the GNU version we generate a shim wrapper
1054 fn trans_msvc_try<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1055                               func: ValueRef,
1056                               data: ValueRef,
1057                               dest: ValueRef,
1058                               dloc: DebugLoc) -> Block<'blk, 'tcx> {
1059     let llfn = get_rust_try_fn(bcx.fcx, &mut |try_fn_ty, output| {
1060         let ccx = bcx.ccx();
1061         let dloc = DebugLoc::None;
1062         let rust_try = declare::define_internal_rust_fn(ccx, "__rust_try",
1063                                                          try_fn_ty);
1064         let (fcx, block_arena);
1065         block_arena = TypedArena::new();
1066         fcx = new_fn_ctxt(ccx, rust_try, ast::DUMMY_NODE_ID, false,
1067                           output, ccx.tcx().mk_substs(Substs::trans_empty()),
1068                           None, &block_arena);
1069         let bcx = init_function(&fcx, true, output);
1070         let then = fcx.new_temp_block("then");
1071         let catch = fcx.new_temp_block("catch");
1072         let catch_return = fcx.new_temp_block("catch-return");
1073         let catch_resume = fcx.new_temp_block("catch-resume");
1074         let personality = fcx.eh_personality();
1075
1076         let eh_typeid_for = ccx.get_intrinsic(&"llvm.eh.typeid.for");
1077         let rust_try_filter = match bcx.tcx().lang_items.msvc_try_filter() {
1078             Some(did) => callee::trans_fn_ref(ccx, did, ExprId(0),
1079                                               bcx.fcx.param_substs).val,
1080             None => bcx.sess().bug("msvc_try_filter not defined"),
1081         };
1082
1083         // Type indicator for the exception being thrown, not entirely sure
1084         // what's going on here but it's what all the examples in LLVM use.
1085         let lpad_ty = Type::struct_(ccx, &[Type::i8p(ccx), Type::i32(ccx)],
1086                                     false);
1087
1088         llvm::SetFunctionAttribute(rust_try, llvm::Attribute::NoInline);
1089         llvm::SetFunctionAttribute(rust_try, llvm::Attribute::OptimizeNone);
1090         let func = llvm::get_param(rust_try, 0);
1091         let data = llvm::get_param(rust_try, 1);
1092
1093         // Invoke the function, specifying our two temporary landing pads as the
1094         // ext point. After the invoke we've terminated our basic block.
1095         Invoke(bcx, func, &[data], then.llbb, catch.llbb, None, dloc);
1096
1097         // All the magic happens in this landing pad, and this is basically the
1098         // only landing pad in rust tagged with "catch" to indicate that we're
1099         // catching an exception. The other catch handlers in the GNU version
1100         // below just catch *all* exceptions, but that's because most exceptions
1101         // are already filtered out by the gnu personality function.
1102         //
1103         // For MSVC we're just using a standard personality function that we
1104         // can't customize (e.g. _except_handler3 or __C_specific_handler), so
1105         // we need to do the exception filtering ourselves. This is currently
1106         // performed by the `__rust_try_filter` function. This function,
1107         // specified in the landingpad instruction, will be invoked by Windows
1108         // SEH routines and will return whether the exception in question can be
1109         // caught (aka the Rust runtime is the one that threw the exception).
1110         //
1111         // To get this to compile (currently LLVM segfaults if it's not in this
1112         // particular structure), when the landingpad is executing we test to
1113         // make sure that the ID of the exception being thrown is indeed the one
1114         // that we were expecting. If it's not, we resume the exception, and
1115         // otherwise we return the pointer that we got Full disclosure: It's not
1116         // clear to me what this `llvm.eh.typeid` stuff is doing *other* then
1117         // just allowing LLVM to compile this file without segfaulting. I would
1118         // expect the entire landing pad to just be:
1119         //
1120         //     %vals = landingpad ...
1121         //     %ehptr = extractvalue { i8*, i32 } %vals, 0
1122         //     ret i8* %ehptr
1123         //
1124         // but apparently LLVM chokes on this, so we do the more complicated
1125         // thing to placate it.
1126         let vals = LandingPad(catch, lpad_ty, personality, 1);
1127         let rust_try_filter = BitCast(catch, rust_try_filter, Type::i8p(ccx));
1128         AddClause(catch, vals, rust_try_filter);
1129         let ehptr = ExtractValue(catch, vals, 0);
1130         let sel = ExtractValue(catch, vals, 1);
1131         let filter_sel = Call(catch, eh_typeid_for, &[rust_try_filter], None,
1132                               dloc);
1133         let is_filter = ICmp(catch, llvm::IntEQ, sel, filter_sel, dloc);
1134         CondBr(catch, is_filter, catch_return.llbb, catch_resume.llbb, dloc);
1135
1136         // Our "catch-return" basic block is where we've determined that we
1137         // actually need to catch this exception, in which case we just return
1138         // the exception pointer.
1139         Ret(catch_return, ehptr, dloc);
1140
1141         // The "catch-resume" block is where we're running this landing pad but
1142         // we actually need to not catch the exception, so just resume the
1143         // exception to return.
1144         Resume(catch_resume, vals);
1145
1146         // On the successful branch we just return null.
1147         Ret(then, C_null(Type::i8p(ccx)), dloc);
1148
1149         return rust_try
1150     });
1151
1152     // Note that no invoke is used here because by definition this function
1153     // can't panic (that's what it's catching).
1154     let ret = Call(bcx, llfn, &[func, data], None, dloc);
1155     Store(bcx, ret, dest);
1156     return bcx;
1157 }
1158
1159 // Definition of the standard "try" function for Rust using the GNU-like model
1160 // of exceptions (e.g. the normal semantics of LLVM's landingpad and invoke
1161 // instructions).
1162 //
1163 // This translation is a little surprising because
1164 // we always call a shim function instead of inlining the call to `invoke`
1165 // manually here. This is done because in LLVM we're only allowed to have one
1166 // personality per function definition. The call to the `try` intrinsic is
1167 // being inlined into the function calling it, and that function may already
1168 // have other personality functions in play. By calling a shim we're
1169 // guaranteed that our shim will have the right personality function.
1170 //
1171 fn trans_gnu_try<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1172                              func: ValueRef,
1173                              data: ValueRef,
1174                              dest: ValueRef,
1175                              dloc: DebugLoc) -> Block<'blk, 'tcx> {
1176     let llfn = get_rust_try_fn(bcx.fcx, &mut |try_fn_ty, output| {
1177         let ccx = bcx.ccx();
1178         let dloc = DebugLoc::None;
1179
1180         // Translates the shims described above:
1181         //
1182         //   bcx:
1183         //      invoke %func(%args...) normal %normal unwind %catch
1184         //
1185         //   normal:
1186         //      ret null
1187         //
1188         //   catch:
1189         //      (ptr, _) = landingpad
1190         //      ret ptr
1191
1192         let rust_try = declare::define_internal_rust_fn(ccx, "__rust_try", try_fn_ty);
1193         attributes::emit_uwtable(rust_try, true);
1194         let catch_pers = match bcx.tcx().lang_items.eh_personality_catch() {
1195             Some(did) => callee::trans_fn_ref(ccx, did, ExprId(0),
1196                                               bcx.fcx.param_substs).val,
1197             None => bcx.tcx().sess.bug("eh_personality_catch not defined"),
1198         };
1199
1200         let (fcx, block_arena);
1201         block_arena = TypedArena::new();
1202         fcx = new_fn_ctxt(ccx, rust_try, ast::DUMMY_NODE_ID, false,
1203                           output, ccx.tcx().mk_substs(Substs::trans_empty()),
1204                           None, &block_arena);
1205         let bcx = init_function(&fcx, true, output);
1206         let then = bcx.fcx.new_temp_block("then");
1207         let catch = bcx.fcx.new_temp_block("catch");
1208
1209         let func = llvm::get_param(rust_try, 0);
1210         let data = llvm::get_param(rust_try, 1);
1211         Invoke(bcx, func, &[data], then.llbb, catch.llbb, None, dloc);
1212         Ret(then, C_null(Type::i8p(ccx)), dloc);
1213
1214         // Type indicator for the exception being thrown.
1215         // The first value in this tuple is a pointer to the exception object being thrown.
1216         // The second value is a "selector" indicating which of the landing pad clauses
1217         // the exception's type had been matched to.  rust_try ignores the selector.
1218         let lpad_ty = Type::struct_(ccx, &[Type::i8p(ccx), Type::i32(ccx)],
1219                                     false);
1220         let vals = LandingPad(catch, lpad_ty, catch_pers, 1);
1221         AddClause(catch, vals, C_null(Type::i8p(ccx)));
1222         let ptr = ExtractValue(catch, vals, 0);
1223         Ret(catch, ptr, dloc);
1224         fcx.cleanup();
1225
1226         return rust_try
1227     });
1228
1229     // Note that no invoke is used here because by definition this function
1230     // can't panic (that's what it's catching).
1231     let ret = Call(bcx, llfn, &[func, data], None, dloc);
1232     Store(bcx, ret, dest);
1233     return bcx;
1234 }
1235
1236 // Helper to generate the `Ty` associated with `rust_try`
1237 fn get_rust_try_fn<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>,
1238                              f: &mut FnMut(Ty<'tcx>,
1239                                            ty::FnOutput<'tcx>) -> ValueRef)
1240                              -> ValueRef {
1241     let ccx = fcx.ccx;
1242     if let Some(llfn) = *ccx.rust_try_fn().borrow() {
1243         return llfn
1244     }
1245
1246     // Define the type up front for the signature of the rust_try function.
1247     let tcx = ccx.tcx();
1248     let i8p = tcx.mk_mut_ptr(tcx.types.i8);
1249     let fn_ty = tcx.mk_bare_fn(ty::BareFnTy {
1250         unsafety: ast::Unsafety::Unsafe,
1251         abi: abi::Rust,
1252         sig: ty::Binder(ty::FnSig {
1253             inputs: vec![i8p],
1254             output: ty::FnOutput::FnConverging(tcx.mk_nil()),
1255             variadic: false,
1256         }),
1257     });
1258     let fn_ty = tcx.mk_fn(None, fn_ty);
1259     let output = ty::FnOutput::FnConverging(i8p);
1260     let try_fn_ty  = tcx.mk_bare_fn(ty::BareFnTy {
1261         unsafety: ast::Unsafety::Unsafe,
1262         abi: abi::Rust,
1263         sig: ty::Binder(ty::FnSig {
1264             inputs: vec![fn_ty, i8p],
1265             output: output,
1266             variadic: false,
1267         }),
1268     });
1269     let rust_try = f(tcx.mk_fn(None, try_fn_ty), output);
1270     *ccx.rust_try_fn().borrow_mut() = Some(rust_try);
1271     return rust_try
1272 }