]> git.lizzy.rs Git - rust.git/blob - src/librustc_trans/trans/intrinsic.rs
rollup merge of #20611: simnalamburt/master
[rust.git] / src / librustc_trans / trans / intrinsic.rs
1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
4 //
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10
11 #![allow(non_upper_case_globals)]
12
13 use llvm;
14 use llvm::{SequentiallyConsistent, Acquire, Release, AtomicXchg, ValueRef, TypeKind};
15 use middle::subst;
16 use middle::subst::FnSpace;
17 use trans::base::*;
18 use trans::build::*;
19 use trans::callee;
20 use trans::cleanup;
21 use trans::cleanup::CleanupMethods;
22 use trans::common::*;
23 use trans::datum::*;
24 use trans::expr;
25 use trans::glue;
26 use trans::type_of::*;
27 use trans::type_of;
28 use trans::machine;
29 use trans::machine::llsize_of;
30 use trans::type_::Type;
31 use middle::ty::{self, Ty};
32 use syntax::abi::RustIntrinsic;
33 use syntax::ast;
34 use syntax::parse::token;
35 use util::ppaux::{Repr, ty_to_string};
36
37 pub fn get_simple_intrinsic(ccx: &CrateContext, item: &ast::ForeignItem) -> Option<ValueRef> {
38     let name = match token::get_ident(item.ident).get() {
39         "sqrtf32" => "llvm.sqrt.f32",
40         "sqrtf64" => "llvm.sqrt.f64",
41         "powif32" => "llvm.powi.f32",
42         "powif64" => "llvm.powi.f64",
43         "sinf32" => "llvm.sin.f32",
44         "sinf64" => "llvm.sin.f64",
45         "cosf32" => "llvm.cos.f32",
46         "cosf64" => "llvm.cos.f64",
47         "powf32" => "llvm.pow.f32",
48         "powf64" => "llvm.pow.f64",
49         "expf32" => "llvm.exp.f32",
50         "expf64" => "llvm.exp.f64",
51         "exp2f32" => "llvm.exp2.f32",
52         "exp2f64" => "llvm.exp2.f64",
53         "logf32" => "llvm.log.f32",
54         "logf64" => "llvm.log.f64",
55         "log10f32" => "llvm.log10.f32",
56         "log10f64" => "llvm.log10.f64",
57         "log2f32" => "llvm.log2.f32",
58         "log2f64" => "llvm.log2.f64",
59         "fmaf32" => "llvm.fma.f32",
60         "fmaf64" => "llvm.fma.f64",
61         "fabsf32" => "llvm.fabs.f32",
62         "fabsf64" => "llvm.fabs.f64",
63         "copysignf32" => "llvm.copysign.f32",
64         "copysignf64" => "llvm.copysign.f64",
65         "floorf32" => "llvm.floor.f32",
66         "floorf64" => "llvm.floor.f64",
67         "ceilf32" => "llvm.ceil.f32",
68         "ceilf64" => "llvm.ceil.f64",
69         "truncf32" => "llvm.trunc.f32",
70         "truncf64" => "llvm.trunc.f64",
71         "rintf32" => "llvm.rint.f32",
72         "rintf64" => "llvm.rint.f64",
73         "nearbyintf32" => "llvm.nearbyint.f32",
74         "nearbyintf64" => "llvm.nearbyint.f64",
75         "roundf32" => "llvm.round.f32",
76         "roundf64" => "llvm.round.f64",
77         "ctpop8" => "llvm.ctpop.i8",
78         "ctpop16" => "llvm.ctpop.i16",
79         "ctpop32" => "llvm.ctpop.i32",
80         "ctpop64" => "llvm.ctpop.i64",
81         "bswap16" => "llvm.bswap.i16",
82         "bswap32" => "llvm.bswap.i32",
83         "bswap64" => "llvm.bswap.i64",
84         "assume" => "llvm.assume",
85         _ => return None
86     };
87     Some(ccx.get_intrinsic(&name))
88 }
89
90 /// Performs late verification that intrinsics are used correctly. At present,
91 /// the only intrinsic that needs such verification is `transmute`.
92 pub fn check_intrinsics(ccx: &CrateContext) {
93     let mut last_failing_id = None;
94     for transmute_restriction in ccx.tcx().transmute_restrictions.borrow().iter() {
95         // Sometimes, a single call to transmute will push multiple
96         // type pairs to test in order to exhaustively test the
97         // possibility around a type parameter. If one of those fails,
98         // there is no sense reporting errors on the others.
99         if last_failing_id == Some(transmute_restriction.id) {
100             continue;
101         }
102
103         debug!("transmute_restriction: {}", transmute_restriction.repr(ccx.tcx()));
104
105         assert!(!ty::type_has_params(transmute_restriction.substituted_from));
106         assert!(!ty::type_has_params(transmute_restriction.substituted_to));
107
108         let llfromtype = type_of::sizing_type_of(ccx,
109                                                  transmute_restriction.substituted_from);
110         let lltotype = type_of::sizing_type_of(ccx,
111                                                transmute_restriction.substituted_to);
112         let from_type_size = machine::llbitsize_of_real(ccx, llfromtype);
113         let to_type_size = machine::llbitsize_of_real(ccx, lltotype);
114         if from_type_size != to_type_size {
115             last_failing_id = Some(transmute_restriction.id);
116
117             if transmute_restriction.original_from != transmute_restriction.substituted_from {
118                 ccx.sess().span_err(
119                     transmute_restriction.span,
120                     format!("transmute called on types with potentially different sizes: \
121                              {} (could be {} bit{}) to {} (could be {} bit{})",
122                             ty_to_string(ccx.tcx(), transmute_restriction.original_from),
123                             from_type_size as uint,
124                             if from_type_size == 1 {""} else {"s"},
125                             ty_to_string(ccx.tcx(), transmute_restriction.original_to),
126                             to_type_size as uint,
127                             if to_type_size == 1 {""} else {"s"}).as_slice());
128             } else {
129                 ccx.sess().span_err(
130                     transmute_restriction.span,
131                     format!("transmute called on types with different sizes: \
132                              {} ({} bit{}) to {} ({} bit{})",
133                             ty_to_string(ccx.tcx(), transmute_restriction.original_from),
134                             from_type_size as uint,
135                             if from_type_size == 1 {""} else {"s"},
136                             ty_to_string(ccx.tcx(), transmute_restriction.original_to),
137                             to_type_size as uint,
138                             if to_type_size == 1 {""} else {"s"}).as_slice());
139             }
140         }
141     }
142     ccx.sess().abort_if_errors();
143 }
144
145 pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
146                                             node: ast::NodeId,
147                                             callee_ty: Ty<'tcx>,
148                                             cleanup_scope: cleanup::CustomScopeIndex,
149                                             args: callee::CallArgs<'a, 'tcx>,
150                                             dest: expr::Dest,
151                                             substs: subst::Substs<'tcx>,
152                                             call_info: NodeInfo)
153                                             -> Result<'blk, 'tcx>
154 {
155     let fcx = bcx.fcx;
156     let ccx = fcx.ccx;
157     let tcx = bcx.tcx();
158
159     let ret_ty = match callee_ty.sty {
160         ty::ty_bare_fn(_, ref f) => {
161             ty::erase_late_bound_regions(bcx.tcx(), &f.sig.output())
162         }
163         _ => panic!("expected bare_fn in trans_intrinsic_call")
164     };
165     let foreign_item = tcx.map.expect_foreign_item(node);
166     let name = token::get_ident(foreign_item.ident);
167
168     // For `transmute` we can just trans the input expr directly into dest
169     if name.get() == "transmute" {
170         let llret_ty = type_of::type_of(ccx, ret_ty.unwrap());
171         match args {
172             callee::ArgExprs(arg_exprs) => {
173                 assert_eq!(arg_exprs.len(), 1);
174
175                 let (in_type, out_type) = (*substs.types.get(FnSpace, 0),
176                                            *substs.types.get(FnSpace, 1));
177                 let llintype = type_of::type_of(ccx, in_type);
178                 let llouttype = type_of::type_of(ccx, out_type);
179
180                 let in_type_size = machine::llbitsize_of_real(ccx, llintype);
181                 let out_type_size = machine::llbitsize_of_real(ccx, llouttype);
182
183                 // This should be caught by the intrinsicck pass
184                 assert_eq!(in_type_size, out_type_size);
185
186                 let nonpointer_nonaggregate = |&: llkind: TypeKind| -> bool {
187                     use llvm::TypeKind::*;
188                     match llkind {
189                         Half | Float | Double | X86_FP80 | FP128 |
190                             PPC_FP128 | Integer | Vector | X86_MMX => true,
191                         _ => false
192                     }
193                 };
194
195                 // An approximation to which types can be directly cast via
196                 // LLVM's bitcast.  This doesn't cover pointer -> pointer casts,
197                 // but does, importantly, cover SIMD types.
198                 let in_kind = llintype.kind();
199                 let ret_kind = llret_ty.kind();
200                 let bitcast_compatible =
201                     (nonpointer_nonaggregate(in_kind) && nonpointer_nonaggregate(ret_kind)) || {
202                         in_kind == TypeKind::Pointer && ret_kind == TypeKind::Pointer
203                     };
204
205                 let dest = if bitcast_compatible {
206                     // if we're here, the type is scalar-like (a primitive, a
207                     // SIMD type or a pointer), and so can be handled as a
208                     // by-value ValueRef and can also be directly bitcast to the
209                     // target type.  Doing this special case makes conversions
210                     // like `u32x4` -> `u64x2` much nicer for LLVM and so more
211                     // efficient (these are done efficiently implicitly in C
212                     // with the `__m128i` type and so this means Rust doesn't
213                     // lose out there).
214                     let expr = &*arg_exprs[0];
215                     let datum = unpack_datum!(bcx, expr::trans(bcx, expr));
216                     let datum = unpack_datum!(bcx, datum.to_rvalue_datum(bcx, "transmute_temp"));
217                     let val = if datum.kind.is_by_ref() {
218                         load_ty(bcx, datum.val, datum.ty)
219                     } else {
220                         datum.val
221                     };
222
223                     let cast_val = BitCast(bcx, val, llret_ty);
224
225                     match dest {
226                         expr::SaveIn(d) => {
227                             // this often occurs in a sequence like `Store(val,
228                             // d); val2 = Load(d)`, so disappears easily.
229                             Store(bcx, cast_val, d);
230                         }
231                         expr::Ignore => {}
232                     }
233                     dest
234                 } else {
235                     // The types are too complicated to do with a by-value
236                     // bitcast, so pointer cast instead. We need to cast the
237                     // dest so the types work out.
238                     let dest = match dest {
239                         expr::SaveIn(d) => expr::SaveIn(PointerCast(bcx, d, llintype.ptr_to())),
240                         expr::Ignore => expr::Ignore
241                     };
242                     bcx = expr::trans_into(bcx, &*arg_exprs[0], dest);
243                     dest
244                 };
245
246                 fcx.pop_custom_cleanup_scope(cleanup_scope);
247
248                 return match dest {
249                     expr::SaveIn(d) => Result::new(bcx, d),
250                     expr::Ignore => Result::new(bcx, C_undef(llret_ty.ptr_to()))
251                 };
252
253             }
254
255             _ => {
256                 ccx.sess().bug("expected expr as argument for transmute");
257             }
258         }
259     }
260
261     // Push the arguments.
262     let mut llargs = Vec::new();
263     bcx = callee::trans_args(bcx,
264                              args,
265                              callee_ty,
266                              &mut llargs,
267                              cleanup::CustomScope(cleanup_scope),
268                              false,
269                              RustIntrinsic);
270
271     fcx.pop_custom_cleanup_scope(cleanup_scope);
272
273     // These are the only intrinsic functions that diverge.
274     if name.get() == "abort" {
275         let llfn = ccx.get_intrinsic(&("llvm.trap"));
276         Call(bcx, llfn, &[], None);
277         Unreachable(bcx);
278         return Result::new(bcx, C_undef(Type::nil(ccx).ptr_to()));
279     } else if name.get() == "unreachable" {
280         Unreachable(bcx);
281         return Result::new(bcx, C_nil(ccx));
282     }
283
284     let ret_ty = match ret_ty {
285         ty::FnConverging(ret_ty) => ret_ty,
286         ty::FnDiverging => unreachable!()
287     };
288
289     let llret_ty = type_of::type_of(ccx, ret_ty);
290
291     // Get location to store the result. If the user does
292     // not care about the result, just make a stack slot
293     let llresult = match dest {
294         expr::SaveIn(d) => d,
295         expr::Ignore => {
296             if !type_is_zero_size(ccx, ret_ty) {
297                 alloc_ty(bcx, ret_ty, "intrinsic_result")
298             } else {
299                 C_undef(llret_ty.ptr_to())
300             }
301         }
302     };
303
304     let simple = get_simple_intrinsic(ccx, &*foreign_item);
305     let llval = match (simple, name.get()) {
306         (Some(llfn), _) => {
307             Call(bcx, llfn, llargs.as_slice(), None)
308         }
309         (_, "breakpoint") => {
310             let llfn = ccx.get_intrinsic(&("llvm.debugtrap"));
311             Call(bcx, llfn, &[], None)
312         }
313         (_, "size_of") => {
314             let tp_ty = *substs.types.get(FnSpace, 0);
315             let lltp_ty = type_of::type_of(ccx, tp_ty);
316             C_uint(ccx, machine::llsize_of_alloc(ccx, lltp_ty))
317         }
318         (_, "min_align_of") => {
319             let tp_ty = *substs.types.get(FnSpace, 0);
320             C_uint(ccx, type_of::align_of(ccx, tp_ty))
321         }
322         (_, "pref_align_of") => {
323             let tp_ty = *substs.types.get(FnSpace, 0);
324             let lltp_ty = type_of::type_of(ccx, tp_ty);
325             C_uint(ccx, machine::llalign_of_pref(ccx, lltp_ty))
326         }
327         (_, "move_val_init") => {
328             // Create a datum reflecting the value being moved.
329             // Use `appropriate_mode` so that the datum is by ref
330             // if the value is non-immediate. Note that, with
331             // intrinsics, there are no argument cleanups to
332             // concern ourselves with, so we can use an rvalue datum.
333             let tp_ty = *substs.types.get(FnSpace, 0);
334             let mode = appropriate_rvalue_mode(ccx, tp_ty);
335             let src = Datum {
336                 val: llargs[1],
337                 ty: tp_ty,
338                 kind: Rvalue::new(mode)
339             };
340             bcx = src.store_to(bcx, llargs[0]);
341             C_nil(ccx)
342         }
343         (_, "get_tydesc") => {
344             let tp_ty = *substs.types.get(FnSpace, 0);
345             let static_ti = get_tydesc(ccx, tp_ty);
346
347             // FIXME (#3730): ideally this shouldn't need a cast,
348             // but there's a circularity between translating rust types to llvm
349             // types and having a tydesc type available. So I can't directly access
350             // the llvm type of intrinsic::TyDesc struct.
351             PointerCast(bcx, static_ti.tydesc, llret_ty)
352         }
353         (_, "type_id") => {
354             let hash = ty::hash_crate_independent(
355                 ccx.tcx(),
356                 *substs.types.get(FnSpace, 0),
357                 &ccx.link_meta().crate_hash);
358             // NB: This needs to be kept in lockstep with the TypeId struct in
359             //     the intrinsic module
360             C_named_struct(llret_ty, &[C_u64(ccx, hash)])
361         }
362         (_, "init") => {
363             let tp_ty = *substs.types.get(FnSpace, 0);
364             let lltp_ty = type_of::type_of(ccx, tp_ty);
365             if return_type_is_void(ccx, tp_ty) {
366                 C_nil(ccx)
367             } else {
368                 C_null(lltp_ty)
369             }
370         }
371         // Effectively no-ops
372         (_, "uninit") | (_, "forget") => {
373             C_nil(ccx)
374         }
375         (_, "needs_drop") => {
376             let tp_ty = *substs.types.get(FnSpace, 0);
377             C_bool(ccx, type_needs_drop(ccx.tcx(), tp_ty))
378         }
379         (_, "owns_managed") => {
380             let tp_ty = *substs.types.get(FnSpace, 0);
381             C_bool(ccx, ty::type_contents(ccx.tcx(), tp_ty).owns_managed())
382         }
383         (_, "offset") => {
384             let ptr = llargs[0];
385             let offset = llargs[1];
386             InBoundsGEP(bcx, ptr, &[offset])
387         }
388
389         (_, "copy_nonoverlapping_memory") => {
390             copy_intrinsic(bcx, false, false, *substs.types.get(FnSpace, 0),
391                            llargs[0], llargs[1], llargs[2])
392         }
393         (_, "copy_memory") => {
394             copy_intrinsic(bcx, true, false, *substs.types.get(FnSpace, 0),
395                            llargs[0], llargs[1], llargs[2])
396         }
397         (_, "set_memory") => {
398             memset_intrinsic(bcx, false, *substs.types.get(FnSpace, 0),
399                              llargs[0], llargs[1], llargs[2])
400         }
401
402         (_, "volatile_copy_nonoverlapping_memory") => {
403             copy_intrinsic(bcx, false, true, *substs.types.get(FnSpace, 0),
404                            llargs[0], llargs[1], llargs[2])
405         }
406         (_, "volatile_copy_memory") => {
407             copy_intrinsic(bcx, true, true, *substs.types.get(FnSpace, 0),
408                            llargs[0], llargs[1], llargs[2])
409         }
410         (_, "volatile_set_memory") => {
411             memset_intrinsic(bcx, true, *substs.types.get(FnSpace, 0),
412                              llargs[0], llargs[1], llargs[2])
413         }
414         (_, "volatile_load") => {
415             VolatileLoad(bcx, llargs[0])
416         },
417         (_, "volatile_store") => {
418             VolatileStore(bcx, llargs[1], llargs[0]);
419             C_nil(ccx)
420         },
421
422         (_, "ctlz8") => count_zeros_intrinsic(bcx, "llvm.ctlz.i8", llargs[0]),
423         (_, "ctlz16") => count_zeros_intrinsic(bcx, "llvm.ctlz.i16", llargs[0]),
424         (_, "ctlz32") => count_zeros_intrinsic(bcx, "llvm.ctlz.i32", llargs[0]),
425         (_, "ctlz64") => count_zeros_intrinsic(bcx, "llvm.ctlz.i64", llargs[0]),
426         (_, "cttz8") => count_zeros_intrinsic(bcx, "llvm.cttz.i8", llargs[0]),
427         (_, "cttz16") => count_zeros_intrinsic(bcx, "llvm.cttz.i16", llargs[0]),
428         (_, "cttz32") => count_zeros_intrinsic(bcx, "llvm.cttz.i32", llargs[0]),
429         (_, "cttz64") => count_zeros_intrinsic(bcx, "llvm.cttz.i64", llargs[0]),
430
431         (_, "i8_add_with_overflow") =>
432             with_overflow_intrinsic(bcx, "llvm.sadd.with.overflow.i8", ret_ty,
433                                    llargs[0], llargs[1]),
434         (_, "i16_add_with_overflow") =>
435             with_overflow_intrinsic(bcx, "llvm.sadd.with.overflow.i16", ret_ty,
436                                    llargs[0], llargs[1]),
437         (_, "i32_add_with_overflow") =>
438             with_overflow_intrinsic(bcx, "llvm.sadd.with.overflow.i32", ret_ty,
439                                    llargs[0], llargs[1]),
440         (_, "i64_add_with_overflow") =>
441             with_overflow_intrinsic(bcx, "llvm.sadd.with.overflow.i64", ret_ty,
442                                    llargs[0], llargs[1]),
443
444         (_, "u8_add_with_overflow") =>
445             with_overflow_intrinsic(bcx, "llvm.uadd.with.overflow.i8", ret_ty,
446                                    llargs[0], llargs[1]),
447         (_, "u16_add_with_overflow") =>
448             with_overflow_intrinsic(bcx, "llvm.uadd.with.overflow.i16", ret_ty,
449                                    llargs[0], llargs[1]),
450         (_, "u32_add_with_overflow") =>
451             with_overflow_intrinsic(bcx, "llvm.uadd.with.overflow.i32", ret_ty,
452                                    llargs[0], llargs[1]),
453         (_, "u64_add_with_overflow") =>
454             with_overflow_intrinsic(bcx, "llvm.uadd.with.overflow.i64", ret_ty,
455                                    llargs[0], llargs[1]),
456
457         (_, "i8_sub_with_overflow") =>
458             with_overflow_intrinsic(bcx, "llvm.ssub.with.overflow.i8", ret_ty,
459                                    llargs[0], llargs[1]),
460         (_, "i16_sub_with_overflow") =>
461             with_overflow_intrinsic(bcx, "llvm.ssub.with.overflow.i16", ret_ty,
462                                    llargs[0], llargs[1]),
463         (_, "i32_sub_with_overflow") =>
464             with_overflow_intrinsic(bcx, "llvm.ssub.with.overflow.i32", ret_ty,
465                                    llargs[0], llargs[1]),
466         (_, "i64_sub_with_overflow") =>
467             with_overflow_intrinsic(bcx, "llvm.ssub.with.overflow.i64", ret_ty,
468                                    llargs[0], llargs[1]),
469
470         (_, "u8_sub_with_overflow") =>
471             with_overflow_intrinsic(bcx, "llvm.usub.with.overflow.i8", ret_ty,
472                                    llargs[0], llargs[1]),
473         (_, "u16_sub_with_overflow") =>
474             with_overflow_intrinsic(bcx, "llvm.usub.with.overflow.i16", ret_ty,
475                                    llargs[0], llargs[1]),
476         (_, "u32_sub_with_overflow") =>
477             with_overflow_intrinsic(bcx, "llvm.usub.with.overflow.i32", ret_ty,
478                                    llargs[0], llargs[1]),
479         (_, "u64_sub_with_overflow") =>
480             with_overflow_intrinsic(bcx, "llvm.usub.with.overflow.i64", ret_ty,
481                                    llargs[0], llargs[1]),
482
483         (_, "i8_mul_with_overflow") =>
484             with_overflow_intrinsic(bcx, "llvm.smul.with.overflow.i8", ret_ty,
485                                    llargs[0], llargs[1]),
486         (_, "i16_mul_with_overflow") =>
487             with_overflow_intrinsic(bcx, "llvm.smul.with.overflow.i16", ret_ty,
488                                    llargs[0], llargs[1]),
489         (_, "i32_mul_with_overflow") =>
490             with_overflow_intrinsic(bcx, "llvm.smul.with.overflow.i32", ret_ty,
491                                    llargs[0], llargs[1]),
492         (_, "i64_mul_with_overflow") =>
493             with_overflow_intrinsic(bcx, "llvm.smul.with.overflow.i64", ret_ty,
494                                    llargs[0], llargs[1]),
495
496         (_, "u8_mul_with_overflow") =>
497             with_overflow_intrinsic(bcx, "llvm.umul.with.overflow.i8", ret_ty,
498                                     llargs[0], llargs[1]),
499         (_, "u16_mul_with_overflow") =>
500             with_overflow_intrinsic(bcx, "llvm.umul.with.overflow.i16", ret_ty,
501                                     llargs[0], llargs[1]),
502         (_, "u32_mul_with_overflow") =>
503             with_overflow_intrinsic(bcx, "llvm.umul.with.overflow.i32", ret_ty,
504                                     llargs[0], llargs[1]),
505         (_, "u64_mul_with_overflow") =>
506             with_overflow_intrinsic(bcx, "llvm.umul.with.overflow.i64", ret_ty,
507                                     llargs[0], llargs[1]),
508
509         (_, "return_address") => {
510             if !fcx.caller_expects_out_pointer {
511                 tcx.sess.span_err(call_info.span,
512                                   "invalid use of `return_address` intrinsic: function \
513                                    does not use out pointer");
514                 C_null(Type::i8p(ccx))
515             } else {
516                 PointerCast(bcx, llvm::get_param(fcx.llfn, 0), Type::i8p(ccx))
517             }
518         }
519
520         // This requires that atomic intrinsics follow a specific naming pattern:
521         // "atomic_<operation>[_<ordering>]", and no ordering means SeqCst
522         (_, name) if name.starts_with("atomic_") => {
523             let split: Vec<&str> = name.split('_').collect();
524             assert!(split.len() >= 2, "Atomic intrinsic not correct format");
525
526             let order = if split.len() == 2 {
527                 llvm::SequentiallyConsistent
528             } else {
529                 match split[2] {
530                     "unordered" => llvm::Unordered,
531                     "relaxed" => llvm::Monotonic,
532                     "acq"     => llvm::Acquire,
533                     "rel"     => llvm::Release,
534                     "acqrel"  => llvm::AcquireRelease,
535                     _ => ccx.sess().fatal("unknown ordering in atomic intrinsic")
536                 }
537             };
538
539             match split[1] {
540                 "cxchg" => {
541                     // See include/llvm/IR/Instructions.h for their implementation
542                     // of this, I assume that it's good enough for us to use for
543                     // now.
544                     let strongest_failure_ordering = match order {
545                         llvm::NotAtomic | llvm::Unordered =>
546                             ccx.sess().fatal("cmpxchg must be atomic"),
547
548                         llvm::Monotonic | llvm::Release =>
549                             llvm::Monotonic,
550
551                         llvm::Acquire | llvm::AcquireRelease =>
552                             llvm::Acquire,
553
554                         llvm::SequentiallyConsistent =>
555                             llvm::SequentiallyConsistent
556                     };
557
558                     let res = AtomicCmpXchg(bcx, llargs[0], llargs[1],
559                                             llargs[2], order,
560                                             strongest_failure_ordering);
561                     if unsafe { llvm::LLVMVersionMinor() >= 5 } {
562                         ExtractValue(bcx, res, 0)
563                     } else {
564                         res
565                     }
566                 }
567
568                 "load" => {
569                     AtomicLoad(bcx, llargs[0], order)
570                 }
571                 "store" => {
572                     AtomicStore(bcx, llargs[1], llargs[0], order);
573                     C_nil(ccx)
574                 }
575
576                 "fence" => {
577                     AtomicFence(bcx, order);
578                     C_nil(ccx)
579                 }
580
581                 // These are all AtomicRMW ops
582                 op => {
583                     let atom_op = match op {
584                         "xchg"  => llvm::AtomicXchg,
585                         "xadd"  => llvm::AtomicAdd,
586                         "xsub"  => llvm::AtomicSub,
587                         "and"   => llvm::AtomicAnd,
588                         "nand"  => llvm::AtomicNand,
589                         "or"    => llvm::AtomicOr,
590                         "xor"   => llvm::AtomicXor,
591                         "max"   => llvm::AtomicMax,
592                         "min"   => llvm::AtomicMin,
593                         "umax"  => llvm::AtomicUMax,
594                         "umin"  => llvm::AtomicUMin,
595                         _ => ccx.sess().fatal("unknown atomic operation")
596                     };
597
598                     AtomicRMW(bcx, atom_op, llargs[0], llargs[1], order)
599                 }
600             }
601
602         }
603
604         (_, _) => ccx.sess().span_bug(foreign_item.span, "unknown intrinsic")
605     };
606
607     if val_ty(llval) != Type::void(ccx) &&
608        machine::llsize_of_alloc(ccx, val_ty(llval)) != 0 {
609         store_ty(bcx, llval, llresult, ret_ty);
610     }
611
612     // If we made a temporary stack slot, let's clean it up
613     match dest {
614         expr::Ignore => {
615             bcx = glue::drop_ty(bcx, llresult, ret_ty, Some(call_info));
616         }
617         expr::SaveIn(_) => {}
618     }
619
620     Result::new(bcx, llresult)
621 }
622
623 fn copy_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
624                               allow_overlap: bool, volatile: bool, tp_ty: Ty<'tcx>,
625                               dst: ValueRef, src: ValueRef, count: ValueRef) -> ValueRef {
626     let ccx = bcx.ccx();
627     let lltp_ty = type_of::type_of(ccx, tp_ty);
628     let align = C_i32(ccx, type_of::align_of(ccx, tp_ty) as i32);
629     let size = machine::llsize_of(ccx, lltp_ty);
630     let int_size = machine::llbitsize_of_real(ccx, ccx.int_type());
631     let name = if allow_overlap {
632         if int_size == 32 {
633             "llvm.memmove.p0i8.p0i8.i32"
634         } else {
635             "llvm.memmove.p0i8.p0i8.i64"
636         }
637     } else {
638         if int_size == 32 {
639             "llvm.memcpy.p0i8.p0i8.i32"
640         } else {
641             "llvm.memcpy.p0i8.p0i8.i64"
642         }
643     };
644
645     let dst_ptr = PointerCast(bcx, dst, Type::i8p(ccx));
646     let src_ptr = PointerCast(bcx, src, Type::i8p(ccx));
647     let llfn = ccx.get_intrinsic(&name);
648
649     Call(bcx, llfn, &[dst_ptr, src_ptr, Mul(bcx, size, count), align,
650                       C_bool(ccx, volatile)], None)
651 }
652
653 fn memset_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, volatile: bool, tp_ty: Ty<'tcx>,
654                                 dst: ValueRef, val: ValueRef, count: ValueRef) -> ValueRef {
655     let ccx = bcx.ccx();
656     let lltp_ty = type_of::type_of(ccx, tp_ty);
657     let align = C_i32(ccx, type_of::align_of(ccx, tp_ty) as i32);
658     let size = machine::llsize_of(ccx, lltp_ty);
659     let name = if machine::llbitsize_of_real(ccx, ccx.int_type()) == 32 {
660         "llvm.memset.p0i8.i32"
661     } else {
662         "llvm.memset.p0i8.i64"
663     };
664
665     let dst_ptr = PointerCast(bcx, dst, Type::i8p(ccx));
666     let llfn = ccx.get_intrinsic(&name);
667
668     Call(bcx, llfn, &[dst_ptr, val, Mul(bcx, size, count), align,
669                       C_bool(ccx, volatile)], None)
670 }
671
672 fn count_zeros_intrinsic(bcx: Block, name: &'static str, val: ValueRef) -> ValueRef {
673     let y = C_bool(bcx.ccx(), false);
674     let llfn = bcx.ccx().get_intrinsic(&name);
675     Call(bcx, llfn, &[val, y], None)
676 }
677
678 fn with_overflow_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, name: &'static str,
679                                        t: Ty<'tcx>, a: ValueRef, b: ValueRef) -> ValueRef {
680     let llfn = bcx.ccx().get_intrinsic(&name);
681
682     // Convert `i1` to a `bool`, and write it to the out parameter
683     let val = Call(bcx, llfn, &[a, b], None);
684     let result = ExtractValue(bcx, val, 0);
685     let overflow = ZExt(bcx, ExtractValue(bcx, val, 1), Type::bool(bcx.ccx()));
686     let ret = C_undef(type_of::type_of(bcx.ccx(), t));
687     let ret = InsertValue(bcx, ret, result, 0);
688     let ret = InsertValue(bcx, ret, overflow, 1);
689
690     ret
691 }