]> git.lizzy.rs Git - rust.git/blob - src/librustc/middle/trans/intrinsic.rs
librustc: Don't use the same alloca for match binding which we reassign to in arm...
[rust.git] / src / librustc / middle / trans / intrinsic.rs
1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
4 //
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10
11 #![allow(non_uppercase_pattern_statics)]
12
13 use llvm;
14 use llvm::{SequentiallyConsistent, Acquire, Release, Xchg, ValueRef};
15 use middle::subst;
16 use middle::subst::FnSpace;
17 use middle::trans::base::*;
18 use middle::trans::build::*;
19 use middle::trans::callee;
20 use middle::trans::cleanup;
21 use middle::trans::cleanup::CleanupMethods;
22 use middle::trans::common::*;
23 use middle::trans::datum::*;
24 use middle::trans::expr;
25 use middle::trans::glue;
26 use middle::trans::type_of::*;
27 use middle::trans::type_of;
28 use middle::trans::machine;
29 use middle::trans::machine::llsize_of;
30 use middle::trans::type_::Type;
31 use middle::ty;
32 use syntax::abi::RustIntrinsic;
33 use syntax::ast;
34 use syntax::parse::token;
35 use util::ppaux::ty_to_string;
36
37 pub fn get_simple_intrinsic(ccx: &CrateContext, item: &ast::ForeignItem) -> Option<ValueRef> {
38     let name = match token::get_ident(item.ident).get() {
39         "sqrtf32" => "llvm.sqrt.f32",
40         "sqrtf64" => "llvm.sqrt.f64",
41         "powif32" => "llvm.powi.f32",
42         "powif64" => "llvm.powi.f64",
43         "sinf32" => "llvm.sin.f32",
44         "sinf64" => "llvm.sin.f64",
45         "cosf32" => "llvm.cos.f32",
46         "cosf64" => "llvm.cos.f64",
47         "powf32" => "llvm.pow.f32",
48         "powf64" => "llvm.pow.f64",
49         "expf32" => "llvm.exp.f32",
50         "expf64" => "llvm.exp.f64",
51         "exp2f32" => "llvm.exp2.f32",
52         "exp2f64" => "llvm.exp2.f64",
53         "logf32" => "llvm.log.f32",
54         "logf64" => "llvm.log.f64",
55         "log10f32" => "llvm.log10.f32",
56         "log10f64" => "llvm.log10.f64",
57         "log2f32" => "llvm.log2.f32",
58         "log2f64" => "llvm.log2.f64",
59         "fmaf32" => "llvm.fma.f32",
60         "fmaf64" => "llvm.fma.f64",
61         "fabsf32" => "llvm.fabs.f32",
62         "fabsf64" => "llvm.fabs.f64",
63         "copysignf32" => "llvm.copysign.f32",
64         "copysignf64" => "llvm.copysign.f64",
65         "floorf32" => "llvm.floor.f32",
66         "floorf64" => "llvm.floor.f64",
67         "ceilf32" => "llvm.ceil.f32",
68         "ceilf64" => "llvm.ceil.f64",
69         "truncf32" => "llvm.trunc.f32",
70         "truncf64" => "llvm.trunc.f64",
71         "rintf32" => "llvm.rint.f32",
72         "rintf64" => "llvm.rint.f64",
73         "nearbyintf32" => "llvm.nearbyint.f32",
74         "nearbyintf64" => "llvm.nearbyint.f64",
75         "roundf32" => "llvm.round.f32",
76         "roundf64" => "llvm.round.f64",
77         "ctpop8" => "llvm.ctpop.i8",
78         "ctpop16" => "llvm.ctpop.i16",
79         "ctpop32" => "llvm.ctpop.i32",
80         "ctpop64" => "llvm.ctpop.i64",
81         "bswap16" => "llvm.bswap.i16",
82         "bswap32" => "llvm.bswap.i32",
83         "bswap64" => "llvm.bswap.i64",
84         _ => return None
85     };
86     Some(ccx.get_intrinsic(&name))
87 }
88
89 /// Performs late verification that intrinsics are used correctly. At present,
90 /// the only intrinsic that needs such verification is `transmute`.
91 pub fn check_intrinsics(ccx: &CrateContext) {
92     for transmute_restriction in ccx.tcx
93                                     .transmute_restrictions
94                                     .borrow()
95                                     .iter() {
96         let llfromtype = type_of::sizing_type_of(ccx,
97                                                  transmute_restriction.from);
98         let lltotype = type_of::sizing_type_of(ccx,
99                                                transmute_restriction.to);
100         let from_type_size = machine::llbitsize_of_real(ccx, llfromtype);
101         let to_type_size = machine::llbitsize_of_real(ccx, lltotype);
102         if from_type_size != to_type_size {
103             ccx.sess()
104                .span_err(transmute_restriction.span,
105                 format!("transmute called on types with different sizes: \
106                          {} ({} bit{}) to {} ({} bit{})",
107                         ty_to_string(ccx.tcx(), transmute_restriction.from),
108                         from_type_size as uint,
109                         if from_type_size == 1 {
110                             ""
111                         } else {
112                             "s"
113                         },
114                         ty_to_string(ccx.tcx(), transmute_restriction.to),
115                         to_type_size as uint,
116                         if to_type_size == 1 {
117                             ""
118                         } else {
119                             "s"
120                         }).as_slice());
121         }
122     }
123     ccx.sess().abort_if_errors();
124 }
125
126 pub fn trans_intrinsic_call<'a>(mut bcx: &'a Block<'a>, node: ast::NodeId,
127                                 callee_ty: ty::t, cleanup_scope: cleanup::CustomScopeIndex,
128                                 args: callee::CallArgs, dest: expr::Dest,
129                                 substs: subst::Substs) -> Result<'a> {
130
131     let fcx = bcx.fcx;
132     let ccx = fcx.ccx;
133     let tcx = bcx.tcx();
134
135     let ret_ty = match ty::get(callee_ty).sty {
136         ty::ty_bare_fn(ref f) => f.sig.output,
137         _ => fail!("expected bare_fn in trans_intrinsic_call")
138     };
139     let llret_ty = type_of::type_of(ccx, ret_ty);
140     let foreign_item = tcx.map.expect_foreign_item(node);
141     let name = token::get_ident(foreign_item.ident);
142
143     // For `transmute` we can just trans the input expr directly into dest
144     if name.get() == "transmute" {
145         match args {
146             callee::ArgExprs(arg_exprs) => {
147                 assert_eq!(arg_exprs.len(), 1);
148
149                 let (in_type, out_type) = (*substs.types.get(FnSpace, 0),
150                                            *substs.types.get(FnSpace, 1));
151                 let llintype = type_of::type_of(ccx, in_type);
152                 let llouttype = type_of::type_of(ccx, out_type);
153
154                 let in_type_size = machine::llbitsize_of_real(ccx, llintype);
155                 let out_type_size = machine::llbitsize_of_real(ccx, llouttype);
156
157                 // This should be caught by the intrinsicck pass
158                 assert_eq!(in_type_size, out_type_size);
159
160                 // We need to cast the dest so the types work out
161                 let dest = match dest {
162                     expr::SaveIn(d) => expr::SaveIn(PointerCast(bcx, d, llintype.ptr_to())),
163                     expr::Ignore => expr::Ignore
164                 };
165                 bcx = expr::trans_into(bcx, &*arg_exprs[0], dest);
166
167                 fcx.pop_custom_cleanup_scope(cleanup_scope);
168
169                 return match dest {
170                     expr::SaveIn(d) => Result::new(bcx, d),
171                     expr::Ignore => Result::new(bcx, C_undef(llret_ty.ptr_to()))
172                 };
173
174             }
175
176             _ => {
177                 ccx.sess().bug("expected expr as argument for transmute");
178             }
179         }
180     }
181
182     // Get location to store the result. If the user does
183     // not care about the result, just make a stack slot
184     let llresult = match dest {
185         expr::SaveIn(d) => d,
186         expr::Ignore => {
187             if !type_is_zero_size(ccx, ret_ty) {
188                 alloc_ty(bcx, ret_ty, "intrinsic_result")
189             } else {
190                 C_undef(llret_ty.ptr_to())
191             }
192         }
193     };
194
195     // Push the arguments.
196     let mut llargs = Vec::new();
197     bcx = callee::trans_args(bcx,
198                              args,
199                              callee_ty,
200                              &mut llargs,
201                              cleanup::CustomScope(cleanup_scope),
202                              false,
203                              RustIntrinsic);
204
205     fcx.pop_custom_cleanup_scope(cleanup_scope);
206
207     let simple = get_simple_intrinsic(ccx, &*foreign_item);
208
209     let llval = match (simple, name.get()) {
210         (Some(llfn), _) => {
211             Call(bcx, llfn, llargs.as_slice(), None)
212         }
213         (_, "abort") => {
214             let llfn = ccx.get_intrinsic(&("llvm.trap"));
215             let v = Call(bcx, llfn, [], None);
216             Unreachable(bcx);
217             v
218         }
219         (_, "breakpoint") => {
220             let llfn = ccx.get_intrinsic(&("llvm.debugtrap"));
221             Call(bcx, llfn, [], None)
222         }
223         (_, "size_of") => {
224             let tp_ty = *substs.types.get(FnSpace, 0);
225             let lltp_ty = type_of::type_of(ccx, tp_ty);
226             C_uint(ccx, machine::llsize_of_real(ccx, lltp_ty) as uint)
227         }
228         (_, "min_align_of") => {
229             let tp_ty = *substs.types.get(FnSpace, 0);
230             let lltp_ty = type_of::type_of(ccx, tp_ty);
231             C_uint(ccx, machine::llalign_of_min(ccx, lltp_ty) as uint)
232         }
233         (_, "pref_align_of") => {
234             let tp_ty = *substs.types.get(FnSpace, 0);
235             let lltp_ty = type_of::type_of(ccx, tp_ty);
236             C_uint(ccx, machine::llalign_of_pref(ccx, lltp_ty) as uint)
237         }
238         (_, "move_val_init") => {
239             // Create a datum reflecting the value being moved.
240             // Use `appropriate_mode` so that the datum is by ref
241             // if the value is non-immediate. Note that, with
242             // intrinsics, there are no argument cleanups to
243             // concern ourselves with, so we can use an rvalue datum.
244             let tp_ty = *substs.types.get(FnSpace, 0);
245             let mode = appropriate_rvalue_mode(ccx, tp_ty);
246             let src = Datum {
247                 val: *llargs.get(1),
248                 ty: tp_ty,
249                 kind: Rvalue::new(mode)
250             };
251             bcx = src.store_to(bcx, *llargs.get(0));
252             C_nil(ccx)
253         }
254         (_, "get_tydesc") => {
255             let tp_ty = *substs.types.get(FnSpace, 0);
256             let static_ti = get_tydesc(ccx, tp_ty);
257             glue::lazily_emit_visit_glue(ccx, &*static_ti);
258
259             // FIXME (#3730): ideally this shouldn't need a cast,
260             // but there's a circularity between translating rust types to llvm
261             // types and having a tydesc type available. So I can't directly access
262             // the llvm type of intrinsic::TyDesc struct.
263             PointerCast(bcx, static_ti.tydesc, llret_ty)
264         }
265         (_, "type_id") => {
266             let hash = ty::hash_crate_independent(
267                 ccx.tcx(),
268                 *substs.types.get(FnSpace, 0),
269                 &ccx.link_meta.crate_hash);
270             // NB: This needs to be kept in lockstep with the TypeId struct in
271             //     the intrinsic module
272             C_named_struct(llret_ty, [C_u64(ccx, hash)])
273         }
274         (_, "init") => {
275             let tp_ty = *substs.types.get(FnSpace, 0);
276             let lltp_ty = type_of::type_of(ccx, tp_ty);
277             if return_type_is_void(ccx, tp_ty) {
278                 C_nil(ccx)
279             } else {
280                 C_null(lltp_ty)
281             }
282         }
283         // Effectively no-ops
284         (_, "uninit") | (_, "forget") => {
285             C_nil(ccx)
286         }
287         (_, "needs_drop") => {
288             let tp_ty = *substs.types.get(FnSpace, 0);
289             C_bool(ccx, ty::type_needs_drop(ccx.tcx(), tp_ty))
290         }
291         (_, "owns_managed") => {
292             let tp_ty = *substs.types.get(FnSpace, 0);
293             C_bool(ccx, ty::type_contents(ccx.tcx(), tp_ty).owns_managed())
294         }
295         (_, "visit_tydesc") => {
296             let td = *llargs.get(0);
297             let visitor = *llargs.get(1);
298             let td = PointerCast(bcx, td, ccx.tydesc_type().ptr_to());
299             glue::call_visit_glue(bcx, visitor, td);
300             C_nil(ccx)
301         }
302         (_, "offset") => {
303             let ptr = *llargs.get(0);
304             let offset = *llargs.get(1);
305             InBoundsGEP(bcx, ptr, [offset])
306         }
307
308         (_, "copy_nonoverlapping_memory") => {
309             copy_intrinsic(bcx, false, false, *substs.types.get(FnSpace, 0),
310                            *llargs.get(0), *llargs.get(1), *llargs.get(2))
311         }
312         (_, "copy_memory") => {
313             copy_intrinsic(bcx, true, false, *substs.types.get(FnSpace, 0),
314                            *llargs.get(0), *llargs.get(1), *llargs.get(2))
315         }
316         (_, "set_memory") => {
317             memset_intrinsic(bcx, false, *substs.types.get(FnSpace, 0),
318                              *llargs.get(0), *llargs.get(1), *llargs.get(2))
319         }
320
321         (_, "volatile_copy_nonoverlapping_memory") => {
322             copy_intrinsic(bcx, false, true, *substs.types.get(FnSpace, 0),
323                            *llargs.get(0), *llargs.get(1), *llargs.get(2))
324         }
325         (_, "volatile_copy_memory") => {
326             copy_intrinsic(bcx, true, true, *substs.types.get(FnSpace, 0),
327                            *llargs.get(0), *llargs.get(1), *llargs.get(2))
328         }
329         (_, "volatile_set_memory") => {
330             memset_intrinsic(bcx, true, *substs.types.get(FnSpace, 0),
331                              *llargs.get(0), *llargs.get(1), *llargs.get(2))
332         }
333         (_, "volatile_load") => {
334             VolatileLoad(bcx, *llargs.get(0))
335         },
336         (_, "volatile_store") => {
337             VolatileStore(bcx, *llargs.get(1), *llargs.get(0));
338             C_nil(ccx)
339         },
340
341         (_, "ctlz8") => count_zeros_intrinsic(bcx, "llvm.ctlz.i8", *llargs.get(0)),
342         (_, "ctlz16") => count_zeros_intrinsic(bcx, "llvm.ctlz.i16", *llargs.get(0)),
343         (_, "ctlz32") => count_zeros_intrinsic(bcx, "llvm.ctlz.i32", *llargs.get(0)),
344         (_, "ctlz64") => count_zeros_intrinsic(bcx, "llvm.ctlz.i64", *llargs.get(0)),
345         (_, "cttz8") => count_zeros_intrinsic(bcx, "llvm.cttz.i8", *llargs.get(0)),
346         (_, "cttz16") => count_zeros_intrinsic(bcx, "llvm.cttz.i16", *llargs.get(0)),
347         (_, "cttz32") => count_zeros_intrinsic(bcx, "llvm.cttz.i32", *llargs.get(0)),
348         (_, "cttz64") => count_zeros_intrinsic(bcx, "llvm.cttz.i64", *llargs.get(0)),
349
350         (_, "i8_add_with_overflow") =>
351             with_overflow_intrinsic(bcx, "llvm.sadd.with.overflow.i8", ret_ty,
352                                    *llargs.get(0), *llargs.get(1)),
353         (_, "i16_add_with_overflow") =>
354             with_overflow_intrinsic(bcx, "llvm.sadd.with.overflow.i16", ret_ty,
355                                    *llargs.get(0), *llargs.get(1)),
356         (_, "i32_add_with_overflow") =>
357             with_overflow_intrinsic(bcx, "llvm.sadd.with.overflow.i32", ret_ty,
358                                    *llargs.get(0), *llargs.get(1)),
359         (_, "i64_add_with_overflow") =>
360             with_overflow_intrinsic(bcx, "llvm.sadd.with.overflow.i64", ret_ty,
361                                    *llargs.get(0), *llargs.get(1)),
362
363         (_, "u8_add_with_overflow") =>
364             with_overflow_intrinsic(bcx, "llvm.uadd.with.overflow.i8", ret_ty,
365                                    *llargs.get(0), *llargs.get(1)),
366         (_, "u16_add_with_overflow") =>
367             with_overflow_intrinsic(bcx, "llvm.uadd.with.overflow.i16", ret_ty,
368                                    *llargs.get(0), *llargs.get(1)),
369         (_, "u32_add_with_overflow") =>
370             with_overflow_intrinsic(bcx, "llvm.uadd.with.overflow.i32", ret_ty,
371                                    *llargs.get(0), *llargs.get(1)),
372         (_, "u64_add_with_overflow") =>
373             with_overflow_intrinsic(bcx, "llvm.uadd.with.overflow.i64", ret_ty,
374                                    *llargs.get(0), *llargs.get(1)),
375
376         (_, "i8_sub_with_overflow") =>
377             with_overflow_intrinsic(bcx, "llvm.ssub.with.overflow.i8", ret_ty,
378                                    *llargs.get(0), *llargs.get(1)),
379         (_, "i16_sub_with_overflow") =>
380             with_overflow_intrinsic(bcx, "llvm.ssub.with.overflow.i16", ret_ty,
381                                    *llargs.get(0), *llargs.get(1)),
382         (_, "i32_sub_with_overflow") =>
383             with_overflow_intrinsic(bcx, "llvm.ssub.with.overflow.i32", ret_ty,
384                                    *llargs.get(0), *llargs.get(1)),
385         (_, "i64_sub_with_overflow") =>
386             with_overflow_intrinsic(bcx, "llvm.ssub.with.overflow.i64", ret_ty,
387                                    *llargs.get(0), *llargs.get(1)),
388
389         (_, "u8_sub_with_overflow") =>
390             with_overflow_intrinsic(bcx, "llvm.usub.with.overflow.i8", ret_ty,
391                                    *llargs.get(0), *llargs.get(1)),
392         (_, "u16_sub_with_overflow") =>
393             with_overflow_intrinsic(bcx, "llvm.usub.with.overflow.i16", ret_ty,
394                                    *llargs.get(0), *llargs.get(1)),
395         (_, "u32_sub_with_overflow") =>
396             with_overflow_intrinsic(bcx, "llvm.usub.with.overflow.i32", ret_ty,
397                                    *llargs.get(0), *llargs.get(1)),
398         (_, "u64_sub_with_overflow") =>
399             with_overflow_intrinsic(bcx, "llvm.usub.with.overflow.i64", ret_ty,
400                                    *llargs.get(0), *llargs.get(1)),
401
402         (_, "i8_mul_with_overflow") =>
403             with_overflow_intrinsic(bcx, "llvm.smul.with.overflow.i8", ret_ty,
404                                    *llargs.get(0), *llargs.get(1)),
405         (_, "i16_mul_with_overflow") =>
406             with_overflow_intrinsic(bcx, "llvm.smul.with.overflow.i16", ret_ty,
407                                    *llargs.get(0), *llargs.get(1)),
408         (_, "i32_mul_with_overflow") =>
409             with_overflow_intrinsic(bcx, "llvm.smul.with.overflow.i32", ret_ty,
410                                    *llargs.get(0), *llargs.get(1)),
411         (_, "i64_mul_with_overflow") =>
412             with_overflow_intrinsic(bcx, "llvm.smul.with.overflow.i64", ret_ty,
413                                    *llargs.get(0), *llargs.get(1)),
414
415         (_, "u8_mul_with_overflow") =>
416             with_overflow_intrinsic(bcx, "llvm.umul.with.overflow.i8", ret_ty,
417                                     *llargs.get(0), *llargs.get(1)),
418         (_, "u16_mul_with_overflow") =>
419             with_overflow_intrinsic(bcx, "llvm.umul.with.overflow.i16", ret_ty,
420                                     *llargs.get(0), *llargs.get(1)),
421         (_, "u32_mul_with_overflow") =>
422             with_overflow_intrinsic(bcx, "llvm.umul.with.overflow.i32", ret_ty,
423                                     *llargs.get(0), *llargs.get(1)),
424         (_, "u64_mul_with_overflow") =>
425             with_overflow_intrinsic(bcx, "llvm.umul.with.overflow.i64", ret_ty,
426                                     *llargs.get(0), *llargs.get(1)),
427
428         // This requires that atomic intrinsics follow a specific naming pattern:
429         // "atomic_<operation>[_<ordering>]", and no ordering means SeqCst
430         (_, name) if name.starts_with("atomic_") => {
431             let split: Vec<&str> = name.split('_').collect();
432             assert!(split.len() >= 2, "Atomic intrinsic not correct format");
433
434             let order = if split.len() == 2 {
435                 llvm::SequentiallyConsistent
436             } else {
437                 match *split.get(2) {
438                     "relaxed" => llvm::Monotonic,
439                     "acq"     => llvm::Acquire,
440                     "rel"     => llvm::Release,
441                     "acqrel"  => llvm::AcquireRelease,
442                     _ => ccx.sess().fatal("unknown ordering in atomic intrinsic")
443                 }
444             };
445
446             match *split.get(1) {
447                 "cxchg" => {
448                     // See include/llvm/IR/Instructions.h for their implementation
449                     // of this, I assume that it's good enough for us to use for
450                     // now.
451                     let strongest_failure_ordering = match order {
452                         llvm::NotAtomic | llvm::Unordered =>
453                             ccx.sess().fatal("cmpxchg must be atomic"),
454
455                         llvm::Monotonic | llvm::Release =>
456                             llvm::Monotonic,
457
458                         llvm::Acquire | llvm::AcquireRelease =>
459                             llvm::Acquire,
460
461                         llvm::SequentiallyConsistent =>
462                             llvm::SequentiallyConsistent
463                     };
464
465                     let res = AtomicCmpXchg(bcx, *llargs.get(0), *llargs.get(1),
466                                             *llargs.get(2), order,
467                                             strongest_failure_ordering);
468                     if unsafe { llvm::LLVMVersionMinor() >= 5 } {
469                         ExtractValue(bcx, res, 0)
470                     } else {
471                         res
472                     }
473                 }
474
475                 "load" => {
476                     AtomicLoad(bcx, *llargs.get(0), order)
477                 }
478                 "store" => {
479                     AtomicStore(bcx, *llargs.get(1), *llargs.get(0), order);
480                     C_nil(ccx)
481                 }
482
483                 "fence" => {
484                     AtomicFence(bcx, order);
485                     C_nil(ccx)
486                 }
487
488                 // These are all AtomicRMW ops
489                 op => {
490                     let atom_op = match op {
491                         "xchg"  => llvm::Xchg,
492                         "xadd"  => llvm::Add,
493                         "xsub"  => llvm::Sub,
494                         "and"   => llvm::And,
495                         "nand"  => llvm::Nand,
496                         "or"    => llvm::Or,
497                         "xor"   => llvm::Xor,
498                         "max"   => llvm::Max,
499                         "min"   => llvm::Min,
500                         "umax"  => llvm::UMax,
501                         "umin"  => llvm::UMin,
502                         _ => ccx.sess().fatal("unknown atomic operation")
503                     };
504
505                     AtomicRMW(bcx, atom_op, *llargs.get(0), *llargs.get(1), order)
506                 }
507             }
508
509         }
510
511         (_, _) => ccx.sess().span_bug(foreign_item.span, "unknown intrinsic")
512     };
513
514     if val_ty(llval) != Type::void(ccx) &&
515        machine::llsize_of_alloc(ccx, val_ty(llval)) != 0 {
516         store_ty(bcx, llval, llresult, ret_ty);
517     }
518
519     // If we made a temporary stack slot, let's clean it up
520     match dest {
521         expr::Ignore => {
522             bcx = glue::drop_ty(bcx, llresult, ret_ty);
523         }
524         expr::SaveIn(_) => {}
525     }
526
527     Result::new(bcx, llresult)
528 }
529
530 fn copy_intrinsic(bcx: &Block, allow_overlap: bool, volatile: bool,
531                   tp_ty: ty::t, dst: ValueRef, src: ValueRef, count: ValueRef) -> ValueRef {
532     let ccx = bcx.ccx();
533     let lltp_ty = type_of::type_of(ccx, tp_ty);
534     let align = C_i32(ccx, machine::llalign_of_min(ccx, lltp_ty) as i32);
535     let size = machine::llsize_of(ccx, lltp_ty);
536     let int_size = machine::llbitsize_of_real(ccx, ccx.int_type);
537     let name = if allow_overlap {
538         if int_size == 32 {
539             "llvm.memmove.p0i8.p0i8.i32"
540         } else {
541             "llvm.memmove.p0i8.p0i8.i64"
542         }
543     } else {
544         if int_size == 32 {
545             "llvm.memcpy.p0i8.p0i8.i32"
546         } else {
547             "llvm.memcpy.p0i8.p0i8.i64"
548         }
549     };
550
551     let dst_ptr = PointerCast(bcx, dst, Type::i8p(ccx));
552     let src_ptr = PointerCast(bcx, src, Type::i8p(ccx));
553     let llfn = ccx.get_intrinsic(&name);
554
555     Call(bcx, llfn, [dst_ptr, src_ptr, Mul(bcx, size, count), align,
556                      C_bool(ccx, volatile)], None)
557 }
558
559 fn memset_intrinsic(bcx: &Block, volatile: bool, tp_ty: ty::t,
560                     dst: ValueRef, val: ValueRef, count: ValueRef) -> ValueRef {
561     let ccx = bcx.ccx();
562     let lltp_ty = type_of::type_of(ccx, tp_ty);
563     let align = C_i32(ccx, machine::llalign_of_min(ccx, lltp_ty) as i32);
564     let size = machine::llsize_of(ccx, lltp_ty);
565     let name = if machine::llbitsize_of_real(ccx, ccx.int_type) == 32 {
566         "llvm.memset.p0i8.i32"
567     } else {
568         "llvm.memset.p0i8.i64"
569     };
570
571     let dst_ptr = PointerCast(bcx, dst, Type::i8p(ccx));
572     let llfn = ccx.get_intrinsic(&name);
573
574     Call(bcx, llfn, [dst_ptr, val, Mul(bcx, size, count), align,
575                      C_bool(ccx, volatile)], None)
576 }
577
578 fn count_zeros_intrinsic(bcx: &Block, name: &'static str, val: ValueRef) -> ValueRef {
579     let y = C_bool(bcx.ccx(), false);
580     let llfn = bcx.ccx().get_intrinsic(&name);
581     Call(bcx, llfn, [val, y], None)
582 }
583
584 fn with_overflow_intrinsic(bcx: &Block, name: &'static str, t: ty::t,
585                            a: ValueRef, b: ValueRef) -> ValueRef {
586     let llfn = bcx.ccx().get_intrinsic(&name);
587
588     // Convert `i1` to a `bool`, and write it to the out parameter
589     let val = Call(bcx, llfn, [a, b], None);
590     let result = ExtractValue(bcx, val, 0);
591     let overflow = ZExt(bcx, ExtractValue(bcx, val, 1), Type::bool(bcx.ccx()));
592     let ret = C_undef(type_of::type_of(bcx.ccx(), t));
593     let ret = InsertValue(bcx, ret, result, 0);
594     let ret = InsertValue(bcx, ret, overflow, 1);
595
596     ret
597 }