]> git.lizzy.rs Git - rust.git/blob - src/librustc_trans/intrinsic.rs
Refactor determining of relocation model into methods
[rust.git] / src / librustc_trans / intrinsic.rs
1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
4 //
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10
11 #![allow(non_upper_case_globals)]
12
13 use arena::TypedArena;
14 use intrinsics::{self, Intrinsic};
15 use libc;
16 use llvm;
17 use llvm::{ValueRef, TypeKind};
18 use rustc::ty::subst;
19 use rustc::ty::subst::FnSpace;
20 use abi::{Abi, FnType};
21 use adt;
22 use attributes;
23 use base::*;
24 use build::*;
25 use callee::{self, Callee};
26 use cleanup;
27 use cleanup::CleanupMethods;
28 use common::*;
29 use consts;
30 use datum::*;
31 use debuginfo::DebugLoc;
32 use declare;
33 use expr;
34 use glue;
35 use type_of;
36 use machine;
37 use type_::Type;
38 use rustc::ty::{self, Ty};
39 use Disr;
40 use rustc::ty::subst::Substs;
41 use rustc::hir;
42 use syntax::ast;
43 use syntax::ptr::P;
44 use syntax::parse::token;
45
46 use rustc::session::Session;
47 use rustc_const_eval::fatal_const_eval_err;
48 use syntax_pos::{Span, DUMMY_SP};
49
50 use std::cmp::Ordering;
51
52 fn get_simple_intrinsic(ccx: &CrateContext, name: &str) -> Option<ValueRef> {
53     let llvm_name = match name {
54         "sqrtf32" => "llvm.sqrt.f32",
55         "sqrtf64" => "llvm.sqrt.f64",
56         "powif32" => "llvm.powi.f32",
57         "powif64" => "llvm.powi.f64",
58         "sinf32" => "llvm.sin.f32",
59         "sinf64" => "llvm.sin.f64",
60         "cosf32" => "llvm.cos.f32",
61         "cosf64" => "llvm.cos.f64",
62         "powf32" => "llvm.pow.f32",
63         "powf64" => "llvm.pow.f64",
64         "expf32" => "llvm.exp.f32",
65         "expf64" => "llvm.exp.f64",
66         "exp2f32" => "llvm.exp2.f32",
67         "exp2f64" => "llvm.exp2.f64",
68         "logf32" => "llvm.log.f32",
69         "logf64" => "llvm.log.f64",
70         "log10f32" => "llvm.log10.f32",
71         "log10f64" => "llvm.log10.f64",
72         "log2f32" => "llvm.log2.f32",
73         "log2f64" => "llvm.log2.f64",
74         "fmaf32" => "llvm.fma.f32",
75         "fmaf64" => "llvm.fma.f64",
76         "fabsf32" => "llvm.fabs.f32",
77         "fabsf64" => "llvm.fabs.f64",
78         "copysignf32" => "llvm.copysign.f32",
79         "copysignf64" => "llvm.copysign.f64",
80         "floorf32" => "llvm.floor.f32",
81         "floorf64" => "llvm.floor.f64",
82         "ceilf32" => "llvm.ceil.f32",
83         "ceilf64" => "llvm.ceil.f64",
84         "truncf32" => "llvm.trunc.f32",
85         "truncf64" => "llvm.trunc.f64",
86         "rintf32" => "llvm.rint.f32",
87         "rintf64" => "llvm.rint.f64",
88         "nearbyintf32" => "llvm.nearbyint.f32",
89         "nearbyintf64" => "llvm.nearbyint.f64",
90         "roundf32" => "llvm.round.f32",
91         "roundf64" => "llvm.round.f64",
92         "assume" => "llvm.assume",
93         _ => return None
94     };
95     Some(ccx.get_intrinsic(&llvm_name))
96 }
97
98 /// Remember to add all intrinsics here, in librustc_typeck/check/mod.rs,
99 /// and in libcore/intrinsics.rs; if you need access to any llvm intrinsics,
100 /// add them to librustc_trans/trans/context.rs
101 pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
102                                             callee_ty: Ty<'tcx>,
103                                             fn_ty: &FnType,
104                                             args: callee::CallArgs<'a, 'tcx>,
105                                             dest: expr::Dest,
106                                             call_debug_location: DebugLoc)
107                                             -> Result<'blk, 'tcx> {
108     let fcx = bcx.fcx;
109     let ccx = fcx.ccx;
110     let tcx = bcx.tcx();
111
112     let _icx = push_ctxt("trans_intrinsic_call");
113
114     let (def_id, substs, sig) = match callee_ty.sty {
115         ty::TyFnDef(def_id, substs, fty) => {
116             let sig = tcx.erase_late_bound_regions(&fty.sig);
117             (def_id, substs, tcx.normalize_associated_type(&sig))
118         }
119         _ => bug!("expected fn item type, found {}", callee_ty)
120     };
121     let arg_tys = sig.inputs;
122     let ret_ty = sig.output;
123     let name = tcx.item_name(def_id).as_str();
124
125     let span = match call_debug_location {
126         DebugLoc::At(_, span) | DebugLoc::ScopeAt(_, span) => span,
127         DebugLoc::None => {
128             span_bug!(fcx.span.unwrap_or(DUMMY_SP),
129                       "intrinsic `{}` called with missing span", name);
130         }
131     };
132
133     let cleanup_scope = fcx.push_custom_cleanup_scope();
134
135     // For `transmute` we can just trans the input expr directly into dest
136     if name == "transmute" {
137         let llret_ty = type_of::type_of(ccx, ret_ty.unwrap());
138         match args {
139             callee::ArgExprs(arg_exprs) => {
140                 assert_eq!(arg_exprs.len(), 1);
141
142                 let (in_type, out_type) = (*substs.types.get(FnSpace, 0),
143                                            *substs.types.get(FnSpace, 1));
144                 let llintype = type_of::type_of(ccx, in_type);
145                 let llouttype = type_of::type_of(ccx, out_type);
146
147                 let in_type_size = machine::llbitsize_of_real(ccx, llintype);
148                 let out_type_size = machine::llbitsize_of_real(ccx, llouttype);
149
150                 if let ty::TyFnDef(def_id, substs, _) = in_type.sty {
151                     if out_type_size != 0 {
152                         // FIXME #19925 Remove this hack after a release cycle.
153                         let _ = unpack_datum!(bcx, expr::trans(bcx, &arg_exprs[0]));
154                         let llfn = Callee::def(ccx, def_id, substs).reify(ccx).val;
155                         let llfnty = val_ty(llfn);
156                         let llresult = match dest {
157                             expr::SaveIn(d) => d,
158                             expr::Ignore => alloc_ty(bcx, out_type, "ret")
159                         };
160                         Store(bcx, llfn, PointerCast(bcx, llresult, llfnty.ptr_to()));
161                         if dest == expr::Ignore {
162                             bcx = glue::drop_ty(bcx, llresult, out_type,
163                                                 call_debug_location);
164                         }
165                         fcx.scopes.borrow_mut().last_mut().unwrap().drop_non_lifetime_clean();
166                         fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope);
167                         return Result::new(bcx, llresult);
168                     }
169                 }
170
171                 // This should be caught by the intrinsicck pass
172                 assert_eq!(in_type_size, out_type_size);
173
174                 let nonpointer_nonaggregate = |llkind: TypeKind| -> bool {
175                     use llvm::TypeKind::*;
176                     match llkind {
177                         Half | Float | Double | X86_FP80 | FP128 |
178                             PPC_FP128 | Integer | Vector | X86_MMX => true,
179                         _ => false
180                     }
181                 };
182
183                 // An approximation to which types can be directly cast via
184                 // LLVM's bitcast.  This doesn't cover pointer -> pointer casts,
185                 // but does, importantly, cover SIMD types.
186                 let in_kind = llintype.kind();
187                 let ret_kind = llret_ty.kind();
188                 let bitcast_compatible =
189                     (nonpointer_nonaggregate(in_kind) && nonpointer_nonaggregate(ret_kind)) || {
190                         in_kind == TypeKind::Pointer && ret_kind == TypeKind::Pointer
191                     };
192
193                 let dest = if bitcast_compatible {
194                     // if we're here, the type is scalar-like (a primitive, a
195                     // SIMD type or a pointer), and so can be handled as a
196                     // by-value ValueRef and can also be directly bitcast to the
197                     // target type.  Doing this special case makes conversions
198                     // like `u32x4` -> `u64x2` much nicer for LLVM and so more
199                     // efficient (these are done efficiently implicitly in C
200                     // with the `__m128i` type and so this means Rust doesn't
201                     // lose out there).
202                     let expr = &arg_exprs[0];
203                     let datum = unpack_datum!(bcx, expr::trans(bcx, expr));
204                     let datum = unpack_datum!(bcx, datum.to_rvalue_datum(bcx, "transmute_temp"));
205                     let val = if datum.kind.is_by_ref() {
206                         load_ty(bcx, datum.val, datum.ty)
207                     } else {
208                         from_immediate(bcx, datum.val)
209                     };
210
211                     let cast_val = BitCast(bcx, val, llret_ty);
212
213                     match dest {
214                         expr::SaveIn(d) => {
215                             // this often occurs in a sequence like `Store(val,
216                             // d); val2 = Load(d)`, so disappears easily.
217                             Store(bcx, cast_val, d);
218                         }
219                         expr::Ignore => {}
220                     }
221                     dest
222                 } else {
223                     // The types are too complicated to do with a by-value
224                     // bitcast, so pointer cast instead. We need to cast the
225                     // dest so the types work out.
226                     let dest = match dest {
227                         expr::SaveIn(d) => expr::SaveIn(PointerCast(bcx, d, llintype.ptr_to())),
228                         expr::Ignore => expr::Ignore
229                     };
230                     bcx = expr::trans_into(bcx, &arg_exprs[0], dest);
231                     dest
232                 };
233
234                 fcx.scopes.borrow_mut().last_mut().unwrap().drop_non_lifetime_clean();
235                 fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope);
236
237                 return match dest {
238                     expr::SaveIn(d) => Result::new(bcx, d),
239                     expr::Ignore => Result::new(bcx, C_undef(llret_ty.ptr_to()))
240                 };
241
242             }
243
244             _ => {
245                 bug!("expected expr as argument for transmute");
246             }
247         }
248     }
249
250     // For `move_val_init` we can evaluate the destination address
251     // (the first argument) and then trans the source value (the
252     // second argument) directly into the resulting destination
253     // address.
254     if name == "move_val_init" {
255         if let callee::ArgExprs(ref exprs) = args {
256             let (dest_expr, source_expr) = if exprs.len() != 2 {
257                 bug!("expected two exprs as arguments for `move_val_init` intrinsic");
258             } else {
259                 (&exprs[0], &exprs[1])
260             };
261
262             // evaluate destination address
263             let dest_datum = unpack_datum!(bcx, expr::trans(bcx, dest_expr));
264             let dest_datum = unpack_datum!(
265                 bcx, dest_datum.to_rvalue_datum(bcx, "arg"));
266             let dest_datum = unpack_datum!(
267                 bcx, dest_datum.to_appropriate_datum(bcx));
268
269             // `expr::trans_into(bcx, expr, dest)` is equiv to
270             //
271             //    `trans(bcx, expr).store_to_dest(dest)`,
272             //
273             // which for `dest == expr::SaveIn(addr)`, is equivalent to:
274             //
275             //    `trans(bcx, expr).store_to(bcx, addr)`.
276             let lldest = expr::Dest::SaveIn(dest_datum.val);
277             bcx = expr::trans_into(bcx, source_expr, lldest);
278
279             let llresult = C_nil(ccx);
280             fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope);
281
282             return Result::new(bcx, llresult);
283         } else {
284             bug!("expected two exprs as arguments for `move_val_init` intrinsic");
285         }
286     }
287
288     // save the actual AST arguments for later (some places need to do
289     // const-evaluation on them)
290     let expr_arguments = match args {
291         callee::ArgExprs(args) => Some(args),
292         _ => None,
293     };
294
295     // Push the arguments.
296     let mut llargs = Vec::new();
297     bcx = callee::trans_args(bcx,
298                              Abi::RustIntrinsic,
299                              fn_ty,
300                              &mut callee::Intrinsic,
301                              args,
302                              &mut llargs,
303                              cleanup::CustomScope(cleanup_scope));
304
305     fcx.scopes.borrow_mut().last_mut().unwrap().drop_non_lifetime_clean();
306
307     // These are the only intrinsic functions that diverge.
308     if name == "abort" {
309         let llfn = ccx.get_intrinsic(&("llvm.trap"));
310         Call(bcx, llfn, &[], call_debug_location);
311         fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope);
312         Unreachable(bcx);
313         return Result::new(bcx, C_undef(Type::nil(ccx).ptr_to()));
314     } else if &name[..] == "unreachable" {
315         fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope);
316         Unreachable(bcx);
317         return Result::new(bcx, C_nil(ccx));
318     }
319
320     let ret_ty = match ret_ty {
321         ty::FnConverging(ret_ty) => ret_ty,
322         ty::FnDiverging => bug!()
323     };
324
325     let llret_ty = type_of::type_of(ccx, ret_ty);
326
327     // Get location to store the result. If the user does
328     // not care about the result, just make a stack slot
329     let llresult = match dest {
330         expr::SaveIn(d) => d,
331         expr::Ignore => {
332             if !type_is_zero_size(ccx, ret_ty) {
333                 let llresult = alloc_ty(bcx, ret_ty, "intrinsic_result");
334                 call_lifetime_start(bcx, llresult);
335                 llresult
336             } else {
337                 C_undef(llret_ty.ptr_to())
338             }
339         }
340     };
341
342     let simple = get_simple_intrinsic(ccx, &name);
343     let llval = match (simple, &name[..]) {
344         (Some(llfn), _) => {
345             Call(bcx, llfn, &llargs, call_debug_location)
346         }
347         (_, "try") => {
348             bcx = try_intrinsic(bcx, llargs[0], llargs[1], llargs[2], llresult,
349                                 call_debug_location);
350             C_nil(ccx)
351         }
352         (_, "breakpoint") => {
353             let llfn = ccx.get_intrinsic(&("llvm.debugtrap"));
354             Call(bcx, llfn, &[], call_debug_location)
355         }
356         (_, "size_of") => {
357             let tp_ty = *substs.types.get(FnSpace, 0);
358             let lltp_ty = type_of::type_of(ccx, tp_ty);
359             C_uint(ccx, machine::llsize_of_alloc(ccx, lltp_ty))
360         }
361         (_, "size_of_val") => {
362             let tp_ty = *substs.types.get(FnSpace, 0);
363             if !type_is_sized(tcx, tp_ty) {
364                 let (llsize, _) =
365                     glue::size_and_align_of_dst(&bcx.build(), tp_ty, llargs[1]);
366                 llsize
367             } else {
368                 let lltp_ty = type_of::type_of(ccx, tp_ty);
369                 C_uint(ccx, machine::llsize_of_alloc(ccx, lltp_ty))
370             }
371         }
372         (_, "min_align_of") => {
373             let tp_ty = *substs.types.get(FnSpace, 0);
374             C_uint(ccx, type_of::align_of(ccx, tp_ty))
375         }
376         (_, "min_align_of_val") => {
377             let tp_ty = *substs.types.get(FnSpace, 0);
378             if !type_is_sized(tcx, tp_ty) {
379                 let (_, llalign) =
380                     glue::size_and_align_of_dst(&bcx.build(), tp_ty, llargs[1]);
381                 llalign
382             } else {
383                 C_uint(ccx, type_of::align_of(ccx, tp_ty))
384             }
385         }
386         (_, "pref_align_of") => {
387             let tp_ty = *substs.types.get(FnSpace, 0);
388             let lltp_ty = type_of::type_of(ccx, tp_ty);
389             C_uint(ccx, machine::llalign_of_pref(ccx, lltp_ty))
390         }
391         (_, "drop_in_place") => {
392             let tp_ty = *substs.types.get(FnSpace, 0);
393             let ptr = if type_is_sized(tcx, tp_ty) {
394                 llargs[0]
395             } else {
396                 let scratch = rvalue_scratch_datum(bcx, tp_ty, "tmp");
397                 Store(bcx, llargs[0], expr::get_dataptr(bcx, scratch.val));
398                 Store(bcx, llargs[1], expr::get_meta(bcx, scratch.val));
399                 fcx.schedule_lifetime_end(cleanup::CustomScope(cleanup_scope), scratch.val);
400                 scratch.val
401             };
402             glue::drop_ty(bcx, ptr, tp_ty, call_debug_location);
403             C_nil(ccx)
404         }
405         (_, "type_name") => {
406             let tp_ty = *substs.types.get(FnSpace, 0);
407             let ty_name = token::intern_and_get_ident(&tp_ty.to_string());
408             C_str_slice(ccx, ty_name)
409         }
410         (_, "type_id") => {
411             let hash = ccx.tcx().hash_crate_independent(*substs.types.get(FnSpace, 0),
412                                                         &ccx.link_meta().crate_hash);
413             C_u64(ccx, hash)
414         }
415         (_, "init_dropped") => {
416             let tp_ty = *substs.types.get(FnSpace, 0);
417             if !type_is_zero_size(ccx, tp_ty) {
418                 drop_done_fill_mem(bcx, llresult, tp_ty);
419             }
420             C_nil(ccx)
421         }
422         (_, "init") => {
423             let tp_ty = *substs.types.get(FnSpace, 0);
424             if !type_is_zero_size(ccx, tp_ty) {
425                 // Just zero out the stack slot. (See comment on base::memzero for explanation)
426                 init_zero_mem(bcx, llresult, tp_ty);
427             }
428             C_nil(ccx)
429         }
430         // Effectively no-ops
431         (_, "uninit") | (_, "forget") => {
432             C_nil(ccx)
433         }
434         (_, "needs_drop") => {
435             let tp_ty = *substs.types.get(FnSpace, 0);
436
437             C_bool(ccx, bcx.fcx.type_needs_drop(tp_ty))
438         }
439         (_, "offset") => {
440             let ptr = llargs[0];
441             let offset = llargs[1];
442             InBoundsGEP(bcx, ptr, &[offset])
443         }
444         (_, "arith_offset") => {
445             let ptr = llargs[0];
446             let offset = llargs[1];
447             GEP(bcx, ptr, &[offset])
448         }
449
450         (_, "copy_nonoverlapping") => {
451             copy_intrinsic(bcx,
452                            false,
453                            false,
454                            *substs.types.get(FnSpace, 0),
455                            llargs[1],
456                            llargs[0],
457                            llargs[2],
458                            call_debug_location)
459         }
460         (_, "copy") => {
461             copy_intrinsic(bcx,
462                            true,
463                            false,
464                            *substs.types.get(FnSpace, 0),
465                            llargs[1],
466                            llargs[0],
467                            llargs[2],
468                            call_debug_location)
469         }
470         (_, "write_bytes") => {
471             memset_intrinsic(bcx,
472                              false,
473                              *substs.types.get(FnSpace, 0),
474                              llargs[0],
475                              llargs[1],
476                              llargs[2],
477                              call_debug_location)
478         }
479
480         (_, "volatile_copy_nonoverlapping_memory") => {
481             copy_intrinsic(bcx,
482                            false,
483                            true,
484                            *substs.types.get(FnSpace, 0),
485                            llargs[0],
486                            llargs[1],
487                            llargs[2],
488                            call_debug_location)
489         }
490         (_, "volatile_copy_memory") => {
491             copy_intrinsic(bcx,
492                            true,
493                            true,
494                            *substs.types.get(FnSpace, 0),
495                            llargs[0],
496                            llargs[1],
497                            llargs[2],
498                            call_debug_location)
499         }
500         (_, "volatile_set_memory") => {
501             memset_intrinsic(bcx,
502                              true,
503                              *substs.types.get(FnSpace, 0),
504                              llargs[0],
505                              llargs[1],
506                              llargs[2],
507                              call_debug_location)
508         }
509         (_, "volatile_load") => {
510             let tp_ty = *substs.types.get(FnSpace, 0);
511             let mut ptr = llargs[0];
512             if let Some(ty) = fn_ty.ret.cast {
513                 ptr = PointerCast(bcx, ptr, ty.ptr_to());
514             }
515             let load = VolatileLoad(bcx, ptr);
516             unsafe {
517                 llvm::LLVMSetAlignment(load, type_of::align_of(ccx, tp_ty));
518             }
519             to_immediate(bcx, load, tp_ty)
520         },
521         (_, "volatile_store") => {
522             let tp_ty = *substs.types.get(FnSpace, 0);
523             if type_is_fat_ptr(bcx.tcx(), tp_ty) {
524                 VolatileStore(bcx, llargs[1], expr::get_dataptr(bcx, llargs[0]));
525                 VolatileStore(bcx, llargs[2], expr::get_meta(bcx, llargs[0]));
526             } else {
527                 let val = if fn_ty.args[1].is_indirect() {
528                     Load(bcx, llargs[1])
529                 } else {
530                     from_immediate(bcx, llargs[1])
531                 };
532                 let ptr = PointerCast(bcx, llargs[0], val_ty(val).ptr_to());
533                 let store = VolatileStore(bcx, val, ptr);
534                 unsafe {
535                     llvm::LLVMSetAlignment(store, type_of::align_of(ccx, tp_ty));
536                 }
537             }
538             C_nil(ccx)
539         },
540
541         (_, "ctlz") | (_, "cttz") | (_, "ctpop") | (_, "bswap") |
542         (_, "add_with_overflow") | (_, "sub_with_overflow") | (_, "mul_with_overflow") |
543         (_, "overflowing_add") | (_, "overflowing_sub") | (_, "overflowing_mul") |
544         (_, "unchecked_div") | (_, "unchecked_rem") => {
545             let sty = &arg_tys[0].sty;
546             match int_type_width_signed(sty, ccx) {
547                 Some((width, signed)) =>
548                     match &*name {
549                         "ctlz" => count_zeros_intrinsic(bcx, &format!("llvm.ctlz.i{}", width),
550                                                         llargs[0], call_debug_location),
551                         "cttz" => count_zeros_intrinsic(bcx, &format!("llvm.cttz.i{}", width),
552                                                         llargs[0], call_debug_location),
553                         "ctpop" => Call(bcx, ccx.get_intrinsic(&format!("llvm.ctpop.i{}", width)),
554                                         &llargs, call_debug_location),
555                         "bswap" => {
556                             if width == 8 {
557                                 llargs[0] // byte swap a u8/i8 is just a no-op
558                             } else {
559                                 Call(bcx, ccx.get_intrinsic(&format!("llvm.bswap.i{}", width)),
560                                         &llargs, call_debug_location)
561                             }
562                         }
563                         "add_with_overflow" | "sub_with_overflow" | "mul_with_overflow" => {
564                             let intrinsic = format!("llvm.{}{}.with.overflow.i{}",
565                                                     if signed { 's' } else { 'u' },
566                                                     &name[..3], width);
567                             with_overflow_intrinsic(bcx, &intrinsic, llargs[0], llargs[1], llresult,
568                                                     call_debug_location)
569                         },
570                         "overflowing_add" => Add(bcx, llargs[0], llargs[1], call_debug_location),
571                         "overflowing_sub" => Sub(bcx, llargs[0], llargs[1], call_debug_location),
572                         "overflowing_mul" => Mul(bcx, llargs[0], llargs[1], call_debug_location),
573                         "unchecked_div" =>
574                             if signed {
575                                 SDiv(bcx, llargs[0], llargs[1], call_debug_location)
576                             } else {
577                                 UDiv(bcx, llargs[0], llargs[1], call_debug_location)
578                             },
579                         "unchecked_rem" =>
580                             if signed {
581                                 SRem(bcx, llargs[0], llargs[1], call_debug_location)
582                             } else {
583                                 URem(bcx, llargs[0], llargs[1], call_debug_location)
584                             },
585                         _ => bug!(),
586                     },
587                 None => {
588                     span_invalid_monomorphization_error(
589                         tcx.sess, span,
590                         &format!("invalid monomorphization of `{}` intrinsic: \
591                                   expected basic integer type, found `{}`", name, sty));
592                         C_nil(ccx)
593                 }
594             }
595
596         },
597         (_, "fadd_fast") | (_, "fsub_fast") | (_, "fmul_fast") | (_, "fdiv_fast") |
598         (_, "frem_fast") => {
599             let sty = &arg_tys[0].sty;
600             match float_type_width(sty) {
601                 Some(_width) =>
602                     match &*name {
603                         "fadd_fast" => FAddFast(bcx, llargs[0], llargs[1], call_debug_location),
604                         "fsub_fast" => FSubFast(bcx, llargs[0], llargs[1], call_debug_location),
605                         "fmul_fast" => FMulFast(bcx, llargs[0], llargs[1], call_debug_location),
606                         "fdiv_fast" => FDivFast(bcx, llargs[0], llargs[1], call_debug_location),
607                         "frem_fast" => FRemFast(bcx, llargs[0], llargs[1], call_debug_location),
608                         _ => bug!(),
609                     },
610                 None => {
611                     span_invalid_monomorphization_error(
612                         tcx.sess, span,
613                         &format!("invalid monomorphization of `{}` intrinsic: \
614                                   expected basic float type, found `{}`", name, sty));
615                         C_nil(ccx)
616                 }
617             }
618
619         },
620
621         (_, "discriminant_value") => {
622             let val_ty = substs.types.get(FnSpace, 0);
623             match val_ty.sty {
624                 ty::TyEnum(..) => {
625                     let repr = adt::represent_type(ccx, *val_ty);
626                     adt::trans_get_discr(bcx, &repr, llargs[0],
627                                          Some(llret_ty), true)
628                 }
629                 _ => C_null(llret_ty)
630             }
631         }
632         (_, name) if name.starts_with("simd_") => {
633             generic_simd_intrinsic(bcx, name,
634                                    substs,
635                                    callee_ty,
636                                    expr_arguments,
637                                    &llargs,
638                                    ret_ty, llret_ty,
639                                    call_debug_location,
640                                    span)
641         }
642         // This requires that atomic intrinsics follow a specific naming pattern:
643         // "atomic_<operation>[_<ordering>]", and no ordering means SeqCst
644         (_, name) if name.starts_with("atomic_") => {
645             let split: Vec<&str> = name.split('_').collect();
646
647             let is_cxchg = split[1] == "cxchg" || split[1] == "cxchgweak";
648             let (order, failorder) = match split.len() {
649                 2 => (llvm::SequentiallyConsistent, llvm::SequentiallyConsistent),
650                 3 => match split[2] {
651                     "unordered" => (llvm::Unordered, llvm::Unordered),
652                     "relaxed" => (llvm::Monotonic, llvm::Monotonic),
653                     "acq"     => (llvm::Acquire, llvm::Acquire),
654                     "rel"     => (llvm::Release, llvm::Monotonic),
655                     "acqrel"  => (llvm::AcquireRelease, llvm::Acquire),
656                     "failrelaxed" if is_cxchg =>
657                         (llvm::SequentiallyConsistent, llvm::Monotonic),
658                     "failacq" if is_cxchg =>
659                         (llvm::SequentiallyConsistent, llvm::Acquire),
660                     _ => ccx.sess().fatal("unknown ordering in atomic intrinsic")
661                 },
662                 4 => match (split[2], split[3]) {
663                     ("acq", "failrelaxed") if is_cxchg =>
664                         (llvm::Acquire, llvm::Monotonic),
665                     ("acqrel", "failrelaxed") if is_cxchg =>
666                         (llvm::AcquireRelease, llvm::Monotonic),
667                     _ => ccx.sess().fatal("unknown ordering in atomic intrinsic")
668                 },
669                 _ => ccx.sess().fatal("Atomic intrinsic not in correct format"),
670             };
671
672             match split[1] {
673                 "cxchg" | "cxchgweak" => {
674                     let sty = &substs.types.get(FnSpace, 0).sty;
675                     if int_type_width_signed(sty, ccx).is_some() {
676                         let weak = if split[1] == "cxchgweak" { llvm::True } else { llvm::False };
677                         let val = AtomicCmpXchg(bcx, llargs[0], llargs[1], llargs[2],
678                                                 order, failorder, weak);
679                         let result = ExtractValue(bcx, val, 0);
680                         let success = ZExt(bcx, ExtractValue(bcx, val, 1), Type::bool(bcx.ccx()));
681                         Store(bcx, result, StructGEP(bcx, llresult, 0));
682                         Store(bcx, success, StructGEP(bcx, llresult, 1));
683                     } else {
684                         span_invalid_monomorphization_error(
685                             tcx.sess, span,
686                             &format!("invalid monomorphization of `{}` intrinsic: \
687                                       expected basic integer type, found `{}`", name, sty));
688                     }
689                     C_nil(ccx)
690                 }
691
692                 "load" => {
693                     let sty = &substs.types.get(FnSpace, 0).sty;
694                     if int_type_width_signed(sty, ccx).is_some() {
695                         AtomicLoad(bcx, llargs[0], order)
696                     } else {
697                         span_invalid_monomorphization_error(
698                             tcx.sess, span,
699                             &format!("invalid monomorphization of `{}` intrinsic: \
700                                       expected basic integer type, found `{}`", name, sty));
701                         C_nil(ccx)
702                     }
703                 }
704
705                 "store" => {
706                     let sty = &substs.types.get(FnSpace, 0).sty;
707                     if int_type_width_signed(sty, ccx).is_some() {
708                         AtomicStore(bcx, llargs[1], llargs[0], order);
709                     } else {
710                         span_invalid_monomorphization_error(
711                             tcx.sess, span,
712                             &format!("invalid monomorphization of `{}` intrinsic: \
713                                       expected basic integer type, found `{}`", name, sty));
714                     }
715                     C_nil(ccx)
716                 }
717
718                 "fence" => {
719                     AtomicFence(bcx, order, llvm::CrossThread);
720                     C_nil(ccx)
721                 }
722
723                 "singlethreadfence" => {
724                     AtomicFence(bcx, order, llvm::SingleThread);
725                     C_nil(ccx)
726                 }
727
728                 // These are all AtomicRMW ops
729                 op => {
730                     let atom_op = match op {
731                         "xchg"  => llvm::AtomicXchg,
732                         "xadd"  => llvm::AtomicAdd,
733                         "xsub"  => llvm::AtomicSub,
734                         "and"   => llvm::AtomicAnd,
735                         "nand"  => llvm::AtomicNand,
736                         "or"    => llvm::AtomicOr,
737                         "xor"   => llvm::AtomicXor,
738                         "max"   => llvm::AtomicMax,
739                         "min"   => llvm::AtomicMin,
740                         "umax"  => llvm::AtomicUMax,
741                         "umin"  => llvm::AtomicUMin,
742                         _ => ccx.sess().fatal("unknown atomic operation")
743                     };
744
745                     let sty = &substs.types.get(FnSpace, 0).sty;
746                     if int_type_width_signed(sty, ccx).is_some() {
747                         AtomicRMW(bcx, atom_op, llargs[0], llargs[1], order)
748                     } else {
749                         span_invalid_monomorphization_error(
750                             tcx.sess, span,
751                             &format!("invalid monomorphization of `{}` intrinsic: \
752                                       expected basic integer type, found `{}`", name, sty));
753                         C_nil(ccx)
754                     }
755                 }
756             }
757
758         }
759
760         (_, _) => {
761             let intr = match Intrinsic::find(&name) {
762                 Some(intr) => intr,
763                 None => bug!("unknown intrinsic '{}'", name),
764             };
765             fn one<T>(x: Vec<T>) -> T {
766                 assert_eq!(x.len(), 1);
767                 x.into_iter().next().unwrap()
768             }
769             fn ty_to_type(ccx: &CrateContext, t: &intrinsics::Type,
770                           any_changes_needed: &mut bool) -> Vec<Type> {
771                 use intrinsics::Type::*;
772                 match *t {
773                     Void => vec![Type::void(ccx)],
774                     Integer(_signed, width, llvm_width) => {
775                         *any_changes_needed |= width != llvm_width;
776                         vec![Type::ix(ccx, llvm_width as u64)]
777                     }
778                     Float(x) => {
779                         match x {
780                             32 => vec![Type::f32(ccx)],
781                             64 => vec![Type::f64(ccx)],
782                             _ => bug!()
783                         }
784                     }
785                     Pointer(ref t, ref llvm_elem, _const) => {
786                         *any_changes_needed |= llvm_elem.is_some();
787
788                         let t = llvm_elem.as_ref().unwrap_or(t);
789                         let elem = one(ty_to_type(ccx, t,
790                                                   any_changes_needed));
791                         vec![elem.ptr_to()]
792                     }
793                     Vector(ref t, ref llvm_elem, length) => {
794                         *any_changes_needed |= llvm_elem.is_some();
795
796                         let t = llvm_elem.as_ref().unwrap_or(t);
797                         let elem = one(ty_to_type(ccx, t,
798                                                   any_changes_needed));
799                         vec![Type::vector(&elem,
800                                           length as u64)]
801                     }
802                     Aggregate(false, ref contents) => {
803                         let elems = contents.iter()
804                                             .map(|t| one(ty_to_type(ccx, t, any_changes_needed)))
805                                             .collect::<Vec<_>>();
806                         vec![Type::struct_(ccx, &elems, false)]
807                     }
808                     Aggregate(true, ref contents) => {
809                         *any_changes_needed = true;
810                         contents.iter()
811                                 .flat_map(|t| ty_to_type(ccx, t, any_changes_needed))
812                                 .collect()
813                     }
814                 }
815             }
816
817             // This allows an argument list like `foo, (bar, baz),
818             // qux` to be converted into `foo, bar, baz, qux`, integer
819             // arguments to be truncated as needed and pointers to be
820             // cast.
821             fn modify_as_needed<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
822                                             t: &intrinsics::Type,
823                                             arg_type: Ty<'tcx>,
824                                             llarg: ValueRef)
825                                             -> Vec<ValueRef>
826             {
827                 match *t {
828                     intrinsics::Type::Aggregate(true, ref contents) => {
829                         // We found a tuple that needs squishing! So
830                         // run over the tuple and load each field.
831                         //
832                         // This assumes the type is "simple", i.e. no
833                         // destructors, and the contents are SIMD
834                         // etc.
835                         assert!(!bcx.fcx.type_needs_drop(arg_type));
836
837                         let repr = adt::represent_type(bcx.ccx(), arg_type);
838                         let repr_ptr = &repr;
839                         let arg = adt::MaybeSizedValue::sized(llarg);
840                         (0..contents.len())
841                             .map(|i| {
842                                 Load(bcx, adt::trans_field_ptr(bcx, repr_ptr, arg, Disr(0), i))
843                             })
844                             .collect()
845                     }
846                     intrinsics::Type::Pointer(_, Some(ref llvm_elem), _) => {
847                         let llvm_elem = one(ty_to_type(bcx.ccx(), llvm_elem, &mut false));
848                         vec![PointerCast(bcx, llarg,
849                                          llvm_elem.ptr_to())]
850                     }
851                     intrinsics::Type::Vector(_, Some(ref llvm_elem), length) => {
852                         let llvm_elem = one(ty_to_type(bcx.ccx(), llvm_elem, &mut false));
853                         vec![BitCast(bcx, llarg,
854                                      Type::vector(&llvm_elem, length as u64))]
855                     }
856                     intrinsics::Type::Integer(_, width, llvm_width) if width != llvm_width => {
857                         // the LLVM intrinsic uses a smaller integer
858                         // size than the C intrinsic's signature, so
859                         // we have to trim it down here.
860                         vec![Trunc(bcx, llarg, Type::ix(bcx.ccx(), llvm_width as u64))]
861                     }
862                     _ => vec![llarg],
863                 }
864             }
865
866
867             let mut any_changes_needed = false;
868             let inputs = intr.inputs.iter()
869                                     .flat_map(|t| ty_to_type(ccx, t, &mut any_changes_needed))
870                                     .collect::<Vec<_>>();
871
872             let mut out_changes = false;
873             let outputs = one(ty_to_type(ccx, &intr.output, &mut out_changes));
874             // outputting a flattened aggregate is nonsense
875             assert!(!out_changes);
876
877             let llargs = if !any_changes_needed {
878                 // no aggregates to flatten, so no change needed
879                 llargs
880             } else {
881                 // there are some aggregates that need to be flattened
882                 // in the LLVM call, so we need to run over the types
883                 // again to find them and extract the arguments
884                 intr.inputs.iter()
885                            .zip(&llargs)
886                            .zip(&arg_tys)
887                            .flat_map(|((t, llarg), ty)| modify_as_needed(bcx, t, ty, *llarg))
888                            .collect()
889             };
890             assert_eq!(inputs.len(), llargs.len());
891
892             let val = match intr.definition {
893                 intrinsics::IntrinsicDef::Named(name) => {
894                     let f = declare::declare_cfn(ccx,
895                                                  name,
896                                                  Type::func(&inputs, &outputs));
897                     Call(bcx, f, &llargs, call_debug_location)
898                 }
899             };
900
901             match *intr.output {
902                 intrinsics::Type::Aggregate(flatten, ref elems) => {
903                     // the output is a tuple so we need to munge it properly
904                     assert!(!flatten);
905
906                     for i in 0..elems.len() {
907                         let val = ExtractValue(bcx, val, i);
908                         Store(bcx, val, StructGEP(bcx, llresult, i));
909                     }
910                     C_nil(ccx)
911                 }
912                 _ => val,
913             }
914         }
915     };
916
917     if val_ty(llval) != Type::void(ccx) &&
918        machine::llsize_of_alloc(ccx, val_ty(llval)) != 0 {
919         if let Some(ty) = fn_ty.ret.cast {
920             let ptr = PointerCast(bcx, llresult, ty.ptr_to());
921             let store = Store(bcx, llval, ptr);
922             unsafe {
923                 llvm::LLVMSetAlignment(store, type_of::align_of(ccx, ret_ty));
924             }
925         } else {
926             store_ty(bcx, llval, llresult, ret_ty);
927         }
928     }
929
930     // If we made a temporary stack slot, let's clean it up
931     match dest {
932         expr::Ignore => {
933             bcx = glue::drop_ty(bcx, llresult, ret_ty, call_debug_location);
934             call_lifetime_end(bcx, llresult);
935         }
936         expr::SaveIn(_) => {}
937     }
938
939     fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope);
940
941     Result::new(bcx, llresult)
942 }
943
944 fn copy_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
945                               allow_overlap: bool,
946                               volatile: bool,
947                               tp_ty: Ty<'tcx>,
948                               dst: ValueRef,
949                               src: ValueRef,
950                               count: ValueRef,
951                               call_debug_location: DebugLoc)
952                               -> ValueRef {
953     let ccx = bcx.ccx();
954     let lltp_ty = type_of::type_of(ccx, tp_ty);
955     let align = C_i32(ccx, type_of::align_of(ccx, tp_ty) as i32);
956     let size = machine::llsize_of(ccx, lltp_ty);
957     let int_size = machine::llbitsize_of_real(ccx, ccx.int_type());
958
959     let operation = if allow_overlap {
960         "memmove"
961     } else {
962         "memcpy"
963     };
964
965     let name = format!("llvm.{}.p0i8.p0i8.i{}", operation, int_size);
966
967     let dst_ptr = PointerCast(bcx, dst, Type::i8p(ccx));
968     let src_ptr = PointerCast(bcx, src, Type::i8p(ccx));
969     let llfn = ccx.get_intrinsic(&name);
970
971     Call(bcx,
972          llfn,
973          &[dst_ptr,
974            src_ptr,
975            Mul(bcx, size, count, DebugLoc::None),
976            align,
977            C_bool(ccx, volatile)],
978          call_debug_location)
979 }
980
981 fn memset_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
982                                 volatile: bool,
983                                 tp_ty: Ty<'tcx>,
984                                 dst: ValueRef,
985                                 val: ValueRef,
986                                 count: ValueRef,
987                                 call_debug_location: DebugLoc)
988                                 -> ValueRef {
989     let ccx = bcx.ccx();
990     let lltp_ty = type_of::type_of(ccx, tp_ty);
991     let align = C_i32(ccx, type_of::align_of(ccx, tp_ty) as i32);
992     let size = machine::llsize_of(ccx, lltp_ty);
993     let int_size = machine::llbitsize_of_real(ccx, ccx.int_type());
994
995     let name = format!("llvm.memset.p0i8.i{}", int_size);
996
997     let dst_ptr = PointerCast(bcx, dst, Type::i8p(ccx));
998     let llfn = ccx.get_intrinsic(&name);
999
1000     Call(bcx,
1001          llfn,
1002          &[dst_ptr,
1003            val,
1004            Mul(bcx, size, count, DebugLoc::None),
1005            align,
1006            C_bool(ccx, volatile)],
1007          call_debug_location)
1008 }
1009
1010 fn count_zeros_intrinsic(bcx: Block,
1011                          name: &str,
1012                          val: ValueRef,
1013                          call_debug_location: DebugLoc)
1014                          -> ValueRef {
1015     let y = C_bool(bcx.ccx(), false);
1016     let llfn = bcx.ccx().get_intrinsic(&name);
1017     Call(bcx, llfn, &[val, y], call_debug_location)
1018 }
1019
1020 fn with_overflow_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1021                                        name: &str,
1022                                        a: ValueRef,
1023                                        b: ValueRef,
1024                                        out: ValueRef,
1025                                        call_debug_location: DebugLoc)
1026                                        -> ValueRef {
1027     let llfn = bcx.ccx().get_intrinsic(&name);
1028
1029     // Convert `i1` to a `bool`, and write it to the out parameter
1030     let val = Call(bcx, llfn, &[a, b], call_debug_location);
1031     let result = ExtractValue(bcx, val, 0);
1032     let overflow = ZExt(bcx, ExtractValue(bcx, val, 1), Type::bool(bcx.ccx()));
1033     Store(bcx, result, StructGEP(bcx, out, 0));
1034     Store(bcx, overflow, StructGEP(bcx, out, 1));
1035
1036     C_nil(bcx.ccx())
1037 }
1038
1039 fn try_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1040                              func: ValueRef,
1041                              data: ValueRef,
1042                              local_ptr: ValueRef,
1043                              dest: ValueRef,
1044                              dloc: DebugLoc) -> Block<'blk, 'tcx> {
1045     if bcx.sess().no_landing_pads() {
1046         Call(bcx, func, &[data], dloc);
1047         Store(bcx, C_null(Type::i8p(bcx.ccx())), dest);
1048         bcx
1049     } else if wants_msvc_seh(bcx.sess()) {
1050         trans_msvc_try(bcx, func, data, local_ptr, dest, dloc)
1051     } else {
1052         trans_gnu_try(bcx, func, data, local_ptr, dest, dloc)
1053     }
1054 }
1055
1056 // MSVC's definition of the `rust_try` function.
1057 //
1058 // This implementation uses the new exception handling instructions in LLVM
1059 // which have support in LLVM for SEH on MSVC targets. Although these
1060 // instructions are meant to work for all targets, as of the time of this
1061 // writing, however, LLVM does not recommend the usage of these new instructions
1062 // as the old ones are still more optimized.
1063 fn trans_msvc_try<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1064                               func: ValueRef,
1065                               data: ValueRef,
1066                               local_ptr: ValueRef,
1067                               dest: ValueRef,
1068                               dloc: DebugLoc) -> Block<'blk, 'tcx> {
1069     let llfn = get_rust_try_fn(bcx.fcx, &mut |bcx| {
1070         let ccx = bcx.ccx();
1071         let dloc = DebugLoc::None;
1072
1073         SetPersonalityFn(bcx, bcx.fcx.eh_personality());
1074
1075         let normal = bcx.fcx.new_temp_block("normal");
1076         let catchswitch = bcx.fcx.new_temp_block("catchswitch");
1077         let catchpad = bcx.fcx.new_temp_block("catchpad");
1078         let caught = bcx.fcx.new_temp_block("caught");
1079
1080         let func = llvm::get_param(bcx.fcx.llfn, 0);
1081         let data = llvm::get_param(bcx.fcx.llfn, 1);
1082         let local_ptr = llvm::get_param(bcx.fcx.llfn, 2);
1083
1084         // We're generating an IR snippet that looks like:
1085         //
1086         //   declare i32 @rust_try(%func, %data, %ptr) {
1087         //      %slot = alloca i64*
1088         //      invoke %func(%data) to label %normal unwind label %catchswitch
1089         //
1090         //   normal:
1091         //      ret i32 0
1092         //
1093         //   catchswitch:
1094         //      %cs = catchswitch within none [%catchpad] unwind to caller
1095         //
1096         //   catchpad:
1097         //      %tok = catchpad within %cs [%type_descriptor, 0, %slot]
1098         //      %ptr[0] = %slot[0]
1099         //      %ptr[1] = %slot[1]
1100         //      catchret from %tok to label %caught
1101         //
1102         //   caught:
1103         //      ret i32 1
1104         //   }
1105         //
1106         // This structure follows the basic usage of throw/try/catch in LLVM.
1107         // For example, compile this C++ snippet to see what LLVM generates:
1108         //
1109         //      #include <stdint.h>
1110         //
1111         //      int bar(void (*foo)(void), uint64_t *ret) {
1112         //          try {
1113         //              foo();
1114         //              return 0;
1115         //          } catch(uint64_t a[2]) {
1116         //              ret[0] = a[0];
1117         //              ret[1] = a[1];
1118         //              return 1;
1119         //          }
1120         //      }
1121         //
1122         // More information can be found in libstd's seh.rs implementation.
1123         let i64p = Type::i64(ccx).ptr_to();
1124         let slot = Alloca(bcx, i64p, "slot");
1125         Invoke(bcx, func, &[data], normal.llbb, catchswitch.llbb, dloc);
1126
1127         Ret(normal, C_i32(ccx, 0), dloc);
1128
1129         let cs = CatchSwitch(catchswitch, None, None, 1);
1130         AddHandler(catchswitch, cs, catchpad.llbb);
1131
1132         let tcx = ccx.tcx();
1133         let tydesc = match tcx.lang_items.msvc_try_filter() {
1134             Some(did) => ::consts::get_static(ccx, did).to_llref(),
1135             None => bug!("msvc_try_filter not defined"),
1136         };
1137         let tok = CatchPad(catchpad, cs, &[tydesc, C_i32(ccx, 0), slot]);
1138         let addr = Load(catchpad, slot);
1139         let arg1 = Load(catchpad, addr);
1140         let val1 = C_i32(ccx, 1);
1141         let arg2 = Load(catchpad, InBoundsGEP(catchpad, addr, &[val1]));
1142         let local_ptr = BitCast(catchpad, local_ptr, i64p);
1143         Store(catchpad, arg1, local_ptr);
1144         Store(catchpad, arg2, InBoundsGEP(catchpad, local_ptr, &[val1]));
1145         CatchRet(catchpad, tok, caught.llbb);
1146
1147         Ret(caught, C_i32(ccx, 1), dloc);
1148     });
1149
1150     // Note that no invoke is used here because by definition this function
1151     // can't panic (that's what it's catching).
1152     let ret = Call(bcx, llfn, &[func, data, local_ptr], dloc);
1153     Store(bcx, ret, dest);
1154     return bcx
1155 }
1156
1157 // Definition of the standard "try" function for Rust using the GNU-like model
1158 // of exceptions (e.g. the normal semantics of LLVM's landingpad and invoke
1159 // instructions).
1160 //
1161 // This translation is a little surprising because we always call a shim
1162 // function instead of inlining the call to `invoke` manually here. This is done
1163 // because in LLVM we're only allowed to have one personality per function
1164 // definition. The call to the `try` intrinsic is being inlined into the
1165 // function calling it, and that function may already have other personality
1166 // functions in play. By calling a shim we're guaranteed that our shim will have
1167 // the right personality function.
1168 fn trans_gnu_try<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1169                              func: ValueRef,
1170                              data: ValueRef,
1171                              local_ptr: ValueRef,
1172                              dest: ValueRef,
1173                              dloc: DebugLoc) -> Block<'blk, 'tcx> {
1174     let llfn = get_rust_try_fn(bcx.fcx, &mut |bcx| {
1175         let ccx = bcx.ccx();
1176         let tcx = ccx.tcx();
1177         let dloc = DebugLoc::None;
1178
1179         // Translates the shims described above:
1180         //
1181         //   bcx:
1182         //      invoke %func(%args...) normal %normal unwind %catch
1183         //
1184         //   normal:
1185         //      ret 0
1186         //
1187         //   catch:
1188         //      (ptr, _) = landingpad
1189         //      store ptr, %local_ptr
1190         //      ret 1
1191         //
1192         // Note that the `local_ptr` data passed into the `try` intrinsic is
1193         // expected to be `*mut *mut u8` for this to actually work, but that's
1194         // managed by the standard library.
1195
1196         attributes::emit_uwtable(bcx.fcx.llfn, true);
1197         let target = &bcx.sess().target.target;
1198         let catch_pers = if target.arch == "arm" && target.target_os != "ios" {
1199             // Only ARM still uses a separate catch personality (for now)
1200             match tcx.lang_items.eh_personality_catch() {
1201                 Some(did) => {
1202                     Callee::def(ccx, did, tcx.mk_substs(Substs::empty())).reify(ccx).val
1203                 }
1204                 None => bug!("eh_personality_catch not defined"),
1205             }
1206         } else {
1207             bcx.fcx.eh_personality()
1208         };
1209
1210         let then = bcx.fcx.new_temp_block("then");
1211         let catch = bcx.fcx.new_temp_block("catch");
1212
1213         let func = llvm::get_param(bcx.fcx.llfn, 0);
1214         let data = llvm::get_param(bcx.fcx.llfn, 1);
1215         let local_ptr = llvm::get_param(bcx.fcx.llfn, 2);
1216         Invoke(bcx, func, &[data], then.llbb, catch.llbb, dloc);
1217         Ret(then, C_i32(ccx, 0), dloc);
1218
1219         // Type indicator for the exception being thrown.
1220         //
1221         // The first value in this tuple is a pointer to the exception object
1222         // being thrown.  The second value is a "selector" indicating which of
1223         // the landing pad clauses the exception's type had been matched to.
1224         // rust_try ignores the selector.
1225         let lpad_ty = Type::struct_(ccx, &[Type::i8p(ccx), Type::i32(ccx)],
1226                                     false);
1227         let vals = LandingPad(catch, lpad_ty, catch_pers, 1);
1228         AddClause(catch, vals, C_null(Type::i8p(ccx)));
1229         let ptr = ExtractValue(catch, vals, 0);
1230         Store(catch, ptr, BitCast(catch, local_ptr, Type::i8p(ccx).ptr_to()));
1231         Ret(catch, C_i32(ccx, 1), dloc);
1232     });
1233
1234     // Note that no invoke is used here because by definition this function
1235     // can't panic (that's what it's catching).
1236     let ret = Call(bcx, llfn, &[func, data, local_ptr], dloc);
1237     Store(bcx, ret, dest);
1238     return bcx;
1239 }
1240
1241 // Helper function to give a Block to a closure to translate a shim function.
1242 // This is currently primarily used for the `try` intrinsic functions above.
1243 fn gen_fn<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>,
1244                     name: &str,
1245                     inputs: Vec<Ty<'tcx>>,
1246                     output: ty::FnOutput<'tcx>,
1247                     trans: &mut for<'b> FnMut(Block<'b, 'tcx>))
1248                     -> ValueRef {
1249     let ccx = fcx.ccx;
1250     let sig = ty::FnSig {
1251         inputs: inputs,
1252         output: output,
1253         variadic: false,
1254     };
1255     let fn_ty = FnType::new(ccx, Abi::Rust, &sig, &[]);
1256
1257     let rust_fn_ty = ccx.tcx().mk_fn_ptr(ccx.tcx().mk_bare_fn(ty::BareFnTy {
1258         unsafety: hir::Unsafety::Unsafe,
1259         abi: Abi::Rust,
1260         sig: ty::Binder(sig)
1261     }));
1262     let llfn = declare::define_internal_fn(ccx, name, rust_fn_ty);
1263     let (fcx, block_arena);
1264     block_arena = TypedArena::new();
1265     fcx = FunctionContext::new(ccx, llfn, fn_ty, None, &block_arena);
1266     let bcx = fcx.init(true, None);
1267     trans(bcx);
1268     fcx.cleanup();
1269     llfn
1270 }
1271
1272 // Helper function used to get a handle to the `__rust_try` function used to
1273 // catch exceptions.
1274 //
1275 // This function is only generated once and is then cached.
1276 fn get_rust_try_fn<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>,
1277                              trans: &mut for<'b> FnMut(Block<'b, 'tcx>))
1278                              -> ValueRef {
1279     let ccx = fcx.ccx;
1280     if let Some(llfn) = ccx.rust_try_fn().get() {
1281         return llfn;
1282     }
1283
1284     // Define the type up front for the signature of the rust_try function.
1285     let tcx = ccx.tcx();
1286     let i8p = tcx.mk_mut_ptr(tcx.types.i8);
1287     let fn_ty = tcx.mk_fn_ptr(tcx.mk_bare_fn(ty::BareFnTy {
1288         unsafety: hir::Unsafety::Unsafe,
1289         abi: Abi::Rust,
1290         sig: ty::Binder(ty::FnSig {
1291             inputs: vec![i8p],
1292             output: ty::FnOutput::FnConverging(tcx.mk_nil()),
1293             variadic: false,
1294         }),
1295     }));
1296     let output = ty::FnOutput::FnConverging(tcx.types.i32);
1297     let rust_try = gen_fn(fcx, "__rust_try", vec![fn_ty, i8p, i8p], output, trans);
1298     ccx.rust_try_fn().set(Some(rust_try));
1299     return rust_try
1300 }
1301
1302 fn span_invalid_monomorphization_error(a: &Session, b: Span, c: &str) {
1303     span_err!(a, b, E0511, "{}", c);
1304 }
1305
1306 fn generic_simd_intrinsic<'blk, 'tcx, 'a>
1307     (bcx: Block<'blk, 'tcx>,
1308      name: &str,
1309      substs: &'tcx subst::Substs<'tcx>,
1310      callee_ty: Ty<'tcx>,
1311      args: Option<&[P<hir::Expr>]>,
1312      llargs: &[ValueRef],
1313      ret_ty: Ty<'tcx>,
1314      llret_ty: Type,
1315      call_debug_location: DebugLoc,
1316      span: Span) -> ValueRef
1317 {
1318     // macros for error handling:
1319     macro_rules! emit_error {
1320         ($msg: tt) => {
1321             emit_error!($msg, )
1322         };
1323         ($msg: tt, $($fmt: tt)*) => {
1324             span_invalid_monomorphization_error(
1325                 bcx.sess(), span,
1326                 &format!(concat!("invalid monomorphization of `{}` intrinsic: ",
1327                                  $msg),
1328                          name, $($fmt)*));
1329         }
1330     }
1331     macro_rules! require {
1332         ($cond: expr, $($fmt: tt)*) => {
1333             if !$cond {
1334                 emit_error!($($fmt)*);
1335                 return C_nil(bcx.ccx())
1336             }
1337         }
1338     }
1339     macro_rules! require_simd {
1340         ($ty: expr, $position: expr) => {
1341             require!($ty.is_simd(), "expected SIMD {} type, found non-SIMD `{}`", $position, $ty)
1342         }
1343     }
1344
1345
1346
1347     let tcx = bcx.tcx();
1348     let sig = tcx.erase_late_bound_regions(callee_ty.fn_sig());
1349     let sig = tcx.normalize_associated_type(&sig);
1350     let arg_tys = sig.inputs;
1351
1352     // every intrinsic takes a SIMD vector as its first argument
1353     require_simd!(arg_tys[0], "input");
1354     let in_ty = arg_tys[0];
1355     let in_elem = arg_tys[0].simd_type(tcx);
1356     let in_len = arg_tys[0].simd_size(tcx);
1357
1358     let comparison = match name {
1359         "simd_eq" => Some(hir::BiEq),
1360         "simd_ne" => Some(hir::BiNe),
1361         "simd_lt" => Some(hir::BiLt),
1362         "simd_le" => Some(hir::BiLe),
1363         "simd_gt" => Some(hir::BiGt),
1364         "simd_ge" => Some(hir::BiGe),
1365         _ => None
1366     };
1367
1368     if let Some(cmp_op) = comparison {
1369         require_simd!(ret_ty, "return");
1370
1371         let out_len = ret_ty.simd_size(tcx);
1372         require!(in_len == out_len,
1373                  "expected return type with length {} (same as input type `{}`), \
1374                   found `{}` with length {}",
1375                  in_len, in_ty,
1376                  ret_ty, out_len);
1377         require!(llret_ty.element_type().kind() == llvm::Integer,
1378                  "expected return type with integer elements, found `{}` with non-integer `{}`",
1379                  ret_ty,
1380                  ret_ty.simd_type(tcx));
1381
1382         return compare_simd_types(bcx,
1383                                   llargs[0],
1384                                   llargs[1],
1385                                   in_elem,
1386                                   llret_ty,
1387                                   cmp_op,
1388                                   call_debug_location)
1389     }
1390
1391     if name.starts_with("simd_shuffle") {
1392         let n: usize = match name["simd_shuffle".len()..].parse() {
1393             Ok(n) => n,
1394             Err(_) => span_bug!(span,
1395                                 "bad `simd_shuffle` instruction only caught in trans?")
1396         };
1397
1398         require_simd!(ret_ty, "return");
1399
1400         let out_len = ret_ty.simd_size(tcx);
1401         require!(out_len == n,
1402                  "expected return type of length {}, found `{}` with length {}",
1403                  n, ret_ty, out_len);
1404         require!(in_elem == ret_ty.simd_type(tcx),
1405                  "expected return element type `{}` (element of input `{}`), \
1406                   found `{}` with element type `{}`",
1407                  in_elem, in_ty,
1408                  ret_ty, ret_ty.simd_type(tcx));
1409
1410         let total_len = in_len as u64 * 2;
1411
1412         let vector = match args {
1413             Some(args) => {
1414                 match consts::const_expr(bcx.ccx(), &args[2], substs, None,
1415                                          // this should probably help simd error reporting
1416                                          consts::TrueConst::Yes) {
1417                     Ok((vector, _)) => vector,
1418                     Err(err) => {
1419                         fatal_const_eval_err(bcx.tcx(), err.as_inner(), span,
1420                                              "shuffle indices");
1421                     }
1422                 }
1423             }
1424             None => llargs[2]
1425         };
1426
1427         let indices: Option<Vec<_>> = (0..n)
1428             .map(|i| {
1429                 let arg_idx = i;
1430                 let val = const_get_elt(vector, &[i as libc::c_uint]);
1431                 match const_to_opt_uint(val) {
1432                     None => {
1433                         emit_error!("shuffle index #{} is not a constant", arg_idx);
1434                         None
1435                     }
1436                     Some(idx) if idx >= total_len => {
1437                         emit_error!("shuffle index #{} is out of bounds (limit {})",
1438                                     arg_idx, total_len);
1439                         None
1440                     }
1441                     Some(idx) => Some(C_i32(bcx.ccx(), idx as i32)),
1442                 }
1443             })
1444             .collect();
1445         let indices = match indices {
1446             Some(i) => i,
1447             None => return C_null(llret_ty)
1448         };
1449
1450         return ShuffleVector(bcx, llargs[0], llargs[1], C_vector(&indices))
1451     }
1452
1453     if name == "simd_insert" {
1454         require!(in_elem == arg_tys[2],
1455                  "expected inserted type `{}` (element of input `{}`), found `{}`",
1456                  in_elem, in_ty, arg_tys[2]);
1457         return InsertElement(bcx, llargs[0], llargs[2], llargs[1])
1458     }
1459     if name == "simd_extract" {
1460         require!(ret_ty == in_elem,
1461                  "expected return type `{}` (element of input `{}`), found `{}`",
1462                  in_elem, in_ty, ret_ty);
1463         return ExtractElement(bcx, llargs[0], llargs[1])
1464     }
1465
1466     if name == "simd_cast" {
1467         require_simd!(ret_ty, "return");
1468         let out_len = ret_ty.simd_size(tcx);
1469         require!(in_len == out_len,
1470                  "expected return type with length {} (same as input type `{}`), \
1471                   found `{}` with length {}",
1472                  in_len, in_ty,
1473                  ret_ty, out_len);
1474         // casting cares about nominal type, not just structural type
1475         let out_elem = ret_ty.simd_type(tcx);
1476
1477         if in_elem == out_elem { return llargs[0]; }
1478
1479         enum Style { Float, Int(/* is signed? */ bool), Unsupported }
1480
1481         let (in_style, in_width) = match in_elem.sty {
1482             // vectors of pointer-sized integers should've been
1483             // disallowed before here, so this unwrap is safe.
1484             ty::TyInt(i) => (Style::Int(true), i.bit_width().unwrap()),
1485             ty::TyUint(u) => (Style::Int(false), u.bit_width().unwrap()),
1486             ty::TyFloat(f) => (Style::Float, f.bit_width()),
1487             _ => (Style::Unsupported, 0)
1488         };
1489         let (out_style, out_width) = match out_elem.sty {
1490             ty::TyInt(i) => (Style::Int(true), i.bit_width().unwrap()),
1491             ty::TyUint(u) => (Style::Int(false), u.bit_width().unwrap()),
1492             ty::TyFloat(f) => (Style::Float, f.bit_width()),
1493             _ => (Style::Unsupported, 0)
1494         };
1495
1496         match (in_style, out_style) {
1497             (Style::Int(in_is_signed), Style::Int(_)) => {
1498                 return match in_width.cmp(&out_width) {
1499                     Ordering::Greater => Trunc(bcx, llargs[0], llret_ty),
1500                     Ordering::Equal => llargs[0],
1501                     Ordering::Less => if in_is_signed {
1502                         SExt(bcx, llargs[0], llret_ty)
1503                     } else {
1504                         ZExt(bcx, llargs[0], llret_ty)
1505                     }
1506                 }
1507             }
1508             (Style::Int(in_is_signed), Style::Float) => {
1509                 return if in_is_signed {
1510                     SIToFP(bcx, llargs[0], llret_ty)
1511                 } else {
1512                     UIToFP(bcx, llargs[0], llret_ty)
1513                 }
1514             }
1515             (Style::Float, Style::Int(out_is_signed)) => {
1516                 return if out_is_signed {
1517                     FPToSI(bcx, llargs[0], llret_ty)
1518                 } else {
1519                     FPToUI(bcx, llargs[0], llret_ty)
1520                 }
1521             }
1522             (Style::Float, Style::Float) => {
1523                 return match in_width.cmp(&out_width) {
1524                     Ordering::Greater => FPTrunc(bcx, llargs[0], llret_ty),
1525                     Ordering::Equal => llargs[0],
1526                     Ordering::Less => FPExt(bcx, llargs[0], llret_ty)
1527                 }
1528             }
1529             _ => {/* Unsupported. Fallthrough. */}
1530         }
1531         require!(false,
1532                  "unsupported cast from `{}` with element `{}` to `{}` with element `{}`",
1533                  in_ty, in_elem,
1534                  ret_ty, out_elem);
1535     }
1536     macro_rules! arith {
1537         ($($name: ident: $($($p: ident),* => $call: expr),*;)*) => {
1538             $(
1539                 if name == stringify!($name) {
1540                     match in_elem.sty {
1541                         $(
1542                             $(ty::$p(_))|* => {
1543                                 return $call(bcx, llargs[0], llargs[1], call_debug_location)
1544                             }
1545                             )*
1546                         _ => {},
1547                     }
1548                     require!(false,
1549                              "unsupported operation on `{}` with element `{}`",
1550                              in_ty,
1551                              in_elem)
1552                 })*
1553         }
1554     }
1555     arith! {
1556         simd_add: TyUint, TyInt => Add, TyFloat => FAdd;
1557         simd_sub: TyUint, TyInt => Sub, TyFloat => FSub;
1558         simd_mul: TyUint, TyInt => Mul, TyFloat => FMul;
1559         simd_div: TyFloat => FDiv;
1560         simd_shl: TyUint, TyInt => Shl;
1561         simd_shr: TyUint => LShr, TyInt => AShr;
1562         simd_and: TyUint, TyInt => And;
1563         simd_or: TyUint, TyInt => Or;
1564         simd_xor: TyUint, TyInt => Xor;
1565     }
1566     span_bug!(span, "unknown SIMD intrinsic");
1567 }
1568
1569 // Returns the width of an int TypeVariant, and if it's signed or not
1570 // Returns None if the type is not an integer
1571 fn int_type_width_signed<'tcx>(sty: &ty::TypeVariants<'tcx>, ccx: &CrateContext)
1572         -> Option<(u64, bool)> {
1573     use rustc::ty::{TyInt, TyUint};
1574     match *sty {
1575         TyInt(t) => Some((match t {
1576             ast::IntTy::Is => {
1577                 match &ccx.tcx().sess.target.target.target_pointer_width[..] {
1578                     "16" => 16,
1579                     "32" => 32,
1580                     "64" => 64,
1581                     tws => bug!("Unsupported target word size for isize: {}", tws),
1582                 }
1583             },
1584             ast::IntTy::I8 => 8,
1585             ast::IntTy::I16 => 16,
1586             ast::IntTy::I32 => 32,
1587             ast::IntTy::I64 => 64,
1588         }, true)),
1589         TyUint(t) => Some((match t {
1590             ast::UintTy::Us => {
1591                 match &ccx.tcx().sess.target.target.target_pointer_width[..] {
1592                     "16" => 16,
1593                     "32" => 32,
1594                     "64" => 64,
1595                     tws => bug!("Unsupported target word size for usize: {}", tws),
1596                 }
1597             },
1598             ast::UintTy::U8 => 8,
1599             ast::UintTy::U16 => 16,
1600             ast::UintTy::U32 => 32,
1601             ast::UintTy::U64 => 64,
1602         }, false)),
1603         _ => None,
1604     }
1605 }
1606
1607 // Returns the width of a float TypeVariant
1608 // Returns None if the type is not a float
1609 fn float_type_width<'tcx>(sty: &ty::TypeVariants<'tcx>)
1610         -> Option<u64> {
1611     use rustc::ty::TyFloat;
1612     match *sty {
1613         TyFloat(t) => Some(match t {
1614             ast::FloatTy::F32 => 32,
1615             ast::FloatTy::F64 => 64,
1616         }),
1617         _ => None,
1618     }
1619 }