]> git.lizzy.rs Git - rust.git/blob - src/librustc/middle/trans/intrinsic.rs
return &mut T from the arenas, not &T
[rust.git] / src / librustc / middle / trans / intrinsic.rs
1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
4 //
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10
11 #![allow(non_uppercase_statics)]
12
13 use llvm;
14 use llvm::{SequentiallyConsistent, Acquire, Release, AtomicXchg, ValueRef};
15 use middle::subst;
16 use middle::subst::FnSpace;
17 use middle::trans::base::*;
18 use middle::trans::build::*;
19 use middle::trans::callee;
20 use middle::trans::cleanup;
21 use middle::trans::cleanup::CleanupMethods;
22 use middle::trans::common::*;
23 use middle::trans::datum::*;
24 use middle::trans::expr;
25 use middle::trans::glue;
26 use middle::trans::type_of::*;
27 use middle::trans::type_of;
28 use middle::trans::machine;
29 use middle::trans::machine::llsize_of;
30 use middle::trans::type_::Type;
31 use middle::ty;
32 use syntax::abi::RustIntrinsic;
33 use syntax::ast;
34 use syntax::parse::token;
35 use util::ppaux::ty_to_string;
36
37 pub fn get_simple_intrinsic(ccx: &CrateContext, item: &ast::ForeignItem) -> Option<ValueRef> {
38     let name = match token::get_ident(item.ident).get() {
39         "sqrtf32" => "llvm.sqrt.f32",
40         "sqrtf64" => "llvm.sqrt.f64",
41         "powif32" => "llvm.powi.f32",
42         "powif64" => "llvm.powi.f64",
43         "sinf32" => "llvm.sin.f32",
44         "sinf64" => "llvm.sin.f64",
45         "cosf32" => "llvm.cos.f32",
46         "cosf64" => "llvm.cos.f64",
47         "powf32" => "llvm.pow.f32",
48         "powf64" => "llvm.pow.f64",
49         "expf32" => "llvm.exp.f32",
50         "expf64" => "llvm.exp.f64",
51         "exp2f32" => "llvm.exp2.f32",
52         "exp2f64" => "llvm.exp2.f64",
53         "logf32" => "llvm.log.f32",
54         "logf64" => "llvm.log.f64",
55         "log10f32" => "llvm.log10.f32",
56         "log10f64" => "llvm.log10.f64",
57         "log2f32" => "llvm.log2.f32",
58         "log2f64" => "llvm.log2.f64",
59         "fmaf32" => "llvm.fma.f32",
60         "fmaf64" => "llvm.fma.f64",
61         "fabsf32" => "llvm.fabs.f32",
62         "fabsf64" => "llvm.fabs.f64",
63         "copysignf32" => "llvm.copysign.f32",
64         "copysignf64" => "llvm.copysign.f64",
65         "floorf32" => "llvm.floor.f32",
66         "floorf64" => "llvm.floor.f64",
67         "ceilf32" => "llvm.ceil.f32",
68         "ceilf64" => "llvm.ceil.f64",
69         "truncf32" => "llvm.trunc.f32",
70         "truncf64" => "llvm.trunc.f64",
71         "rintf32" => "llvm.rint.f32",
72         "rintf64" => "llvm.rint.f64",
73         "nearbyintf32" => "llvm.nearbyint.f32",
74         "nearbyintf64" => "llvm.nearbyint.f64",
75         "roundf32" => "llvm.round.f32",
76         "roundf64" => "llvm.round.f64",
77         "ctpop8" => "llvm.ctpop.i8",
78         "ctpop16" => "llvm.ctpop.i16",
79         "ctpop32" => "llvm.ctpop.i32",
80         "ctpop64" => "llvm.ctpop.i64",
81         "bswap16" => "llvm.bswap.i16",
82         "bswap32" => "llvm.bswap.i32",
83         "bswap64" => "llvm.bswap.i64",
84         "assume" => "llvm.assume",
85         _ => return None
86     };
87     Some(ccx.get_intrinsic(&name))
88 }
89
90 /// Performs late verification that intrinsics are used correctly. At present,
91 /// the only intrinsic that needs such verification is `transmute`.
92 pub fn check_intrinsics(ccx: &CrateContext) {
93     for transmute_restriction in ccx.tcx()
94                                     .transmute_restrictions
95                                     .borrow()
96                                     .iter() {
97         let llfromtype = type_of::sizing_type_of(ccx,
98                                                  transmute_restriction.from);
99         let lltotype = type_of::sizing_type_of(ccx,
100                                                transmute_restriction.to);
101         let from_type_size = machine::llbitsize_of_real(ccx, llfromtype);
102         let to_type_size = machine::llbitsize_of_real(ccx, lltotype);
103         if from_type_size != to_type_size {
104             ccx.sess()
105                .span_err(transmute_restriction.span,
106                 format!("transmute called on types with different sizes: \
107                          {} ({} bit{}) to {} ({} bit{})",
108                         ty_to_string(ccx.tcx(), transmute_restriction.from),
109                         from_type_size as uint,
110                         if from_type_size == 1 {
111                             ""
112                         } else {
113                             "s"
114                         },
115                         ty_to_string(ccx.tcx(), transmute_restriction.to),
116                         to_type_size as uint,
117                         if to_type_size == 1 {
118                             ""
119                         } else {
120                             "s"
121                         }).as_slice());
122         }
123         if ty::type_is_fat_ptr(ccx.tcx(), transmute_restriction.to) ||
124            ty::type_is_fat_ptr(ccx.tcx(), transmute_restriction.from) {
125             ccx.sess()
126                .add_lint(::lint::builtin::FAT_PTR_TRANSMUTES,
127                          transmute_restriction.id,
128                          transmute_restriction.span,
129                          format!("Transmuting fat pointer types; {} to {}.\
130                                   Beware of relying on the compiler's representation",
131                                  ty_to_string(ccx.tcx(), transmute_restriction.from),
132                                  ty_to_string(ccx.tcx(), transmute_restriction.to)));
133         }
134     }
135     ccx.sess().abort_if_errors();
136 }
137
138 pub fn trans_intrinsic_call<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, node: ast::NodeId,
139                                         callee_ty: ty::t, cleanup_scope: cleanup::CustomScopeIndex,
140                                         args: callee::CallArgs, dest: expr::Dest,
141                                         substs: subst::Substs, call_info: NodeInfo)
142                                         -> Result<'blk, 'tcx> {
143
144     let fcx = bcx.fcx;
145     let ccx = fcx.ccx;
146     let tcx = bcx.tcx();
147
148     let ret_ty = match ty::get(callee_ty).sty {
149         ty::ty_bare_fn(ref f) => f.sig.output,
150         _ => fail!("expected bare_fn in trans_intrinsic_call")
151     };
152     let llret_ty = type_of::type_of(ccx, ret_ty);
153     let foreign_item = tcx.map.expect_foreign_item(node);
154     let name = token::get_ident(foreign_item.ident);
155
156     // For `transmute` we can just trans the input expr directly into dest
157     if name.get() == "transmute" {
158         match args {
159             callee::ArgExprs(arg_exprs) => {
160                 assert_eq!(arg_exprs.len(), 1);
161
162                 let (in_type, out_type) = (*substs.types.get(FnSpace, 0),
163                                            *substs.types.get(FnSpace, 1));
164                 let llintype = type_of::type_of(ccx, in_type);
165                 let llouttype = type_of::type_of(ccx, out_type);
166
167                 let in_type_size = machine::llbitsize_of_real(ccx, llintype);
168                 let out_type_size = machine::llbitsize_of_real(ccx, llouttype);
169
170                 // This should be caught by the intrinsicck pass
171                 assert_eq!(in_type_size, out_type_size);
172
173                 // We need to cast the dest so the types work out
174                 let dest = match dest {
175                     expr::SaveIn(d) => expr::SaveIn(PointerCast(bcx, d, llintype.ptr_to())),
176                     expr::Ignore => expr::Ignore
177                 };
178                 bcx = expr::trans_into(bcx, &*arg_exprs[0], dest);
179
180                 fcx.pop_custom_cleanup_scope(cleanup_scope);
181
182                 return match dest {
183                     expr::SaveIn(d) => Result::new(bcx, d),
184                     expr::Ignore => Result::new(bcx, C_undef(llret_ty.ptr_to()))
185                 };
186
187             }
188
189             _ => {
190                 ccx.sess().bug("expected expr as argument for transmute");
191             }
192         }
193     }
194
195     // Get location to store the result. If the user does
196     // not care about the result, just make a stack slot
197     let llresult = match dest {
198         expr::SaveIn(d) => d,
199         expr::Ignore => {
200             if !type_is_zero_size(ccx, ret_ty) {
201                 alloc_ty(bcx, ret_ty, "intrinsic_result")
202             } else {
203                 C_undef(llret_ty.ptr_to())
204             }
205         }
206     };
207
208     // Push the arguments.
209     let mut llargs = Vec::new();
210     bcx = callee::trans_args(bcx,
211                              args,
212                              callee_ty,
213                              &mut llargs,
214                              cleanup::CustomScope(cleanup_scope),
215                              false,
216                              RustIntrinsic);
217
218     fcx.pop_custom_cleanup_scope(cleanup_scope);
219
220     let simple = get_simple_intrinsic(ccx, &*foreign_item);
221
222     let llval = match (simple, name.get()) {
223         (Some(llfn), _) => {
224             Call(bcx, llfn, llargs.as_slice(), None)
225         }
226         (_, "abort") => {
227             let llfn = ccx.get_intrinsic(&("llvm.trap"));
228             let v = Call(bcx, llfn, [], None);
229             Unreachable(bcx);
230             v
231         }
232         (_, "unreachable") => {
233             Unreachable(bcx);
234             C_nil(ccx)
235         }
236         (_, "breakpoint") => {
237             let llfn = ccx.get_intrinsic(&("llvm.debugtrap"));
238             Call(bcx, llfn, [], None)
239         }
240         (_, "size_of") => {
241             let tp_ty = *substs.types.get(FnSpace, 0);
242             let lltp_ty = type_of::type_of(ccx, tp_ty);
243             C_uint(ccx, machine::llsize_of_real(ccx, lltp_ty))
244         }
245         (_, "min_align_of") => {
246             let tp_ty = *substs.types.get(FnSpace, 0);
247             C_uint(ccx, type_of::align_of(ccx, tp_ty))
248         }
249         (_, "pref_align_of") => {
250             let tp_ty = *substs.types.get(FnSpace, 0);
251             let lltp_ty = type_of::type_of(ccx, tp_ty);
252             C_uint(ccx, machine::llalign_of_pref(ccx, lltp_ty))
253         }
254         (_, "move_val_init") => {
255             // Create a datum reflecting the value being moved.
256             // Use `appropriate_mode` so that the datum is by ref
257             // if the value is non-immediate. Note that, with
258             // intrinsics, there are no argument cleanups to
259             // concern ourselves with, so we can use an rvalue datum.
260             let tp_ty = *substs.types.get(FnSpace, 0);
261             let mode = appropriate_rvalue_mode(ccx, tp_ty);
262             let src = Datum {
263                 val: llargs[1],
264                 ty: tp_ty,
265                 kind: Rvalue::new(mode)
266             };
267             bcx = src.store_to(bcx, llargs[0]);
268             C_nil(ccx)
269         }
270         (_, "get_tydesc") => {
271             let tp_ty = *substs.types.get(FnSpace, 0);
272             let static_ti = get_tydesc(ccx, tp_ty);
273
274             // FIXME (#3730): ideally this shouldn't need a cast,
275             // but there's a circularity between translating rust types to llvm
276             // types and having a tydesc type available. So I can't directly access
277             // the llvm type of intrinsic::TyDesc struct.
278             PointerCast(bcx, static_ti.tydesc, llret_ty)
279         }
280         (_, "type_id") => {
281             let hash = ty::hash_crate_independent(
282                 ccx.tcx(),
283                 *substs.types.get(FnSpace, 0),
284                 &ccx.link_meta().crate_hash);
285             // NB: This needs to be kept in lockstep with the TypeId struct in
286             //     the intrinsic module
287             C_named_struct(llret_ty, [C_u64(ccx, hash)])
288         }
289         (_, "init") => {
290             let tp_ty = *substs.types.get(FnSpace, 0);
291             let lltp_ty = type_of::type_of(ccx, tp_ty);
292             if return_type_is_void(ccx, tp_ty) {
293                 C_nil(ccx)
294             } else {
295                 C_null(lltp_ty)
296             }
297         }
298         // Effectively no-ops
299         (_, "uninit") | (_, "forget") => {
300             C_nil(ccx)
301         }
302         (_, "needs_drop") => {
303             let tp_ty = *substs.types.get(FnSpace, 0);
304             C_bool(ccx, ty::type_needs_drop(ccx.tcx(), tp_ty))
305         }
306         (_, "owns_managed") => {
307             let tp_ty = *substs.types.get(FnSpace, 0);
308             C_bool(ccx, ty::type_contents(ccx.tcx(), tp_ty).owns_managed())
309         }
310         (_, "offset") => {
311             let ptr = llargs[0];
312             let offset = llargs[1];
313             InBoundsGEP(bcx, ptr, [offset])
314         }
315
316         (_, "copy_nonoverlapping_memory") => {
317             copy_intrinsic(bcx, false, false, *substs.types.get(FnSpace, 0),
318                            llargs[0], llargs[1], llargs[2])
319         }
320         (_, "copy_memory") => {
321             copy_intrinsic(bcx, true, false, *substs.types.get(FnSpace, 0),
322                            llargs[0], llargs[1], llargs[2])
323         }
324         (_, "set_memory") => {
325             memset_intrinsic(bcx, false, *substs.types.get(FnSpace, 0),
326                              llargs[0], llargs[1], llargs[2])
327         }
328
329         (_, "volatile_copy_nonoverlapping_memory") => {
330             copy_intrinsic(bcx, false, true, *substs.types.get(FnSpace, 0),
331                            llargs[0], llargs[1], llargs[2])
332         }
333         (_, "volatile_copy_memory") => {
334             copy_intrinsic(bcx, true, true, *substs.types.get(FnSpace, 0),
335                            llargs[0], llargs[1], llargs[2])
336         }
337         (_, "volatile_set_memory") => {
338             memset_intrinsic(bcx, true, *substs.types.get(FnSpace, 0),
339                              llargs[0], llargs[1], llargs[2])
340         }
341         (_, "volatile_load") => {
342             VolatileLoad(bcx, llargs[0])
343         },
344         (_, "volatile_store") => {
345             VolatileStore(bcx, llargs[1], llargs[0]);
346             C_nil(ccx)
347         },
348
349         (_, "ctlz8") => count_zeros_intrinsic(bcx, "llvm.ctlz.i8", llargs[0]),
350         (_, "ctlz16") => count_zeros_intrinsic(bcx, "llvm.ctlz.i16", llargs[0]),
351         (_, "ctlz32") => count_zeros_intrinsic(bcx, "llvm.ctlz.i32", llargs[0]),
352         (_, "ctlz64") => count_zeros_intrinsic(bcx, "llvm.ctlz.i64", llargs[0]),
353         (_, "cttz8") => count_zeros_intrinsic(bcx, "llvm.cttz.i8", llargs[0]),
354         (_, "cttz16") => count_zeros_intrinsic(bcx, "llvm.cttz.i16", llargs[0]),
355         (_, "cttz32") => count_zeros_intrinsic(bcx, "llvm.cttz.i32", llargs[0]),
356         (_, "cttz64") => count_zeros_intrinsic(bcx, "llvm.cttz.i64", llargs[0]),
357
358         (_, "i8_add_with_overflow") =>
359             with_overflow_intrinsic(bcx, "llvm.sadd.with.overflow.i8", ret_ty,
360                                    llargs[0], llargs[1]),
361         (_, "i16_add_with_overflow") =>
362             with_overflow_intrinsic(bcx, "llvm.sadd.with.overflow.i16", ret_ty,
363                                    llargs[0], llargs[1]),
364         (_, "i32_add_with_overflow") =>
365             with_overflow_intrinsic(bcx, "llvm.sadd.with.overflow.i32", ret_ty,
366                                    llargs[0], llargs[1]),
367         (_, "i64_add_with_overflow") =>
368             with_overflow_intrinsic(bcx, "llvm.sadd.with.overflow.i64", ret_ty,
369                                    llargs[0], llargs[1]),
370
371         (_, "u8_add_with_overflow") =>
372             with_overflow_intrinsic(bcx, "llvm.uadd.with.overflow.i8", ret_ty,
373                                    llargs[0], llargs[1]),
374         (_, "u16_add_with_overflow") =>
375             with_overflow_intrinsic(bcx, "llvm.uadd.with.overflow.i16", ret_ty,
376                                    llargs[0], llargs[1]),
377         (_, "u32_add_with_overflow") =>
378             with_overflow_intrinsic(bcx, "llvm.uadd.with.overflow.i32", ret_ty,
379                                    llargs[0], llargs[1]),
380         (_, "u64_add_with_overflow") =>
381             with_overflow_intrinsic(bcx, "llvm.uadd.with.overflow.i64", ret_ty,
382                                    llargs[0], llargs[1]),
383
384         (_, "i8_sub_with_overflow") =>
385             with_overflow_intrinsic(bcx, "llvm.ssub.with.overflow.i8", ret_ty,
386                                    llargs[0], llargs[1]),
387         (_, "i16_sub_with_overflow") =>
388             with_overflow_intrinsic(bcx, "llvm.ssub.with.overflow.i16", ret_ty,
389                                    llargs[0], llargs[1]),
390         (_, "i32_sub_with_overflow") =>
391             with_overflow_intrinsic(bcx, "llvm.ssub.with.overflow.i32", ret_ty,
392                                    llargs[0], llargs[1]),
393         (_, "i64_sub_with_overflow") =>
394             with_overflow_intrinsic(bcx, "llvm.ssub.with.overflow.i64", ret_ty,
395                                    llargs[0], llargs[1]),
396
397         (_, "u8_sub_with_overflow") =>
398             with_overflow_intrinsic(bcx, "llvm.usub.with.overflow.i8", ret_ty,
399                                    llargs[0], llargs[1]),
400         (_, "u16_sub_with_overflow") =>
401             with_overflow_intrinsic(bcx, "llvm.usub.with.overflow.i16", ret_ty,
402                                    llargs[0], llargs[1]),
403         (_, "u32_sub_with_overflow") =>
404             with_overflow_intrinsic(bcx, "llvm.usub.with.overflow.i32", ret_ty,
405                                    llargs[0], llargs[1]),
406         (_, "u64_sub_with_overflow") =>
407             with_overflow_intrinsic(bcx, "llvm.usub.with.overflow.i64", ret_ty,
408                                    llargs[0], llargs[1]),
409
410         (_, "i8_mul_with_overflow") =>
411             with_overflow_intrinsic(bcx, "llvm.smul.with.overflow.i8", ret_ty,
412                                    llargs[0], llargs[1]),
413         (_, "i16_mul_with_overflow") =>
414             with_overflow_intrinsic(bcx, "llvm.smul.with.overflow.i16", ret_ty,
415                                    llargs[0], llargs[1]),
416         (_, "i32_mul_with_overflow") =>
417             with_overflow_intrinsic(bcx, "llvm.smul.with.overflow.i32", ret_ty,
418                                    llargs[0], llargs[1]),
419         (_, "i64_mul_with_overflow") =>
420             with_overflow_intrinsic(bcx, "llvm.smul.with.overflow.i64", ret_ty,
421                                    llargs[0], llargs[1]),
422
423         (_, "u8_mul_with_overflow") =>
424             with_overflow_intrinsic(bcx, "llvm.umul.with.overflow.i8", ret_ty,
425                                     llargs[0], llargs[1]),
426         (_, "u16_mul_with_overflow") =>
427             with_overflow_intrinsic(bcx, "llvm.umul.with.overflow.i16", ret_ty,
428                                     llargs[0], llargs[1]),
429         (_, "u32_mul_with_overflow") =>
430             with_overflow_intrinsic(bcx, "llvm.umul.with.overflow.i32", ret_ty,
431                                     llargs[0], llargs[1]),
432         (_, "u64_mul_with_overflow") =>
433             with_overflow_intrinsic(bcx, "llvm.umul.with.overflow.i64", ret_ty,
434                                     llargs[0], llargs[1]),
435
436         (_, "return_address") => {
437             if !fcx.caller_expects_out_pointer {
438                 tcx.sess.span_err(call_info.span,
439                                   "invalid use of `return_address` intrinsic: function \
440                                    does not use out pointer");
441                 C_null(Type::i8p(ccx))
442             } else {
443                 PointerCast(bcx, llvm::get_param(fcx.llfn, 0), Type::i8p(ccx))
444             }
445         }
446
447         // This requires that atomic intrinsics follow a specific naming pattern:
448         // "atomic_<operation>[_<ordering>]", and no ordering means SeqCst
449         (_, name) if name.starts_with("atomic_") => {
450             let split: Vec<&str> = name.split('_').collect();
451             assert!(split.len() >= 2, "Atomic intrinsic not correct format");
452
453             let order = if split.len() == 2 {
454                 llvm::SequentiallyConsistent
455             } else {
456                 match split[2] {
457                     "relaxed" => llvm::Monotonic,
458                     "acq"     => llvm::Acquire,
459                     "rel"     => llvm::Release,
460                     "acqrel"  => llvm::AcquireRelease,
461                     _ => ccx.sess().fatal("unknown ordering in atomic intrinsic")
462                 }
463             };
464
465             match split[1] {
466                 "cxchg" => {
467                     // See include/llvm/IR/Instructions.h for their implementation
468                     // of this, I assume that it's good enough for us to use for
469                     // now.
470                     let strongest_failure_ordering = match order {
471                         llvm::NotAtomic | llvm::Unordered =>
472                             ccx.sess().fatal("cmpxchg must be atomic"),
473
474                         llvm::Monotonic | llvm::Release =>
475                             llvm::Monotonic,
476
477                         llvm::Acquire | llvm::AcquireRelease =>
478                             llvm::Acquire,
479
480                         llvm::SequentiallyConsistent =>
481                             llvm::SequentiallyConsistent
482                     };
483
484                     let res = AtomicCmpXchg(bcx, llargs[0], llargs[1],
485                                             llargs[2], order,
486                                             strongest_failure_ordering);
487                     if unsafe { llvm::LLVMVersionMinor() >= 5 } {
488                         ExtractValue(bcx, res, 0)
489                     } else {
490                         res
491                     }
492                 }
493
494                 "load" => {
495                     AtomicLoad(bcx, llargs[0], order)
496                 }
497                 "store" => {
498                     AtomicStore(bcx, llargs[1], llargs[0], order);
499                     C_nil(ccx)
500                 }
501
502                 "fence" => {
503                     AtomicFence(bcx, order);
504                     C_nil(ccx)
505                 }
506
507                 // These are all AtomicRMW ops
508                 op => {
509                     let atom_op = match op {
510                         "xchg"  => llvm::AtomicXchg,
511                         "xadd"  => llvm::AtomicAdd,
512                         "xsub"  => llvm::AtomicSub,
513                         "and"   => llvm::AtomicAnd,
514                         "nand"  => llvm::AtomicNand,
515                         "or"    => llvm::AtomicOr,
516                         "xor"   => llvm::AtomicXor,
517                         "max"   => llvm::AtomicMax,
518                         "min"   => llvm::AtomicMin,
519                         "umax"  => llvm::AtomicUMax,
520                         "umin"  => llvm::AtomicUMin,
521                         _ => ccx.sess().fatal("unknown atomic operation")
522                     };
523
524                     AtomicRMW(bcx, atom_op, llargs[0], llargs[1], order)
525                 }
526             }
527
528         }
529
530         (_, _) => ccx.sess().span_bug(foreign_item.span, "unknown intrinsic")
531     };
532
533     if val_ty(llval) != Type::void(ccx) &&
534        machine::llsize_of_alloc(ccx, val_ty(llval)) != 0 {
535         store_ty(bcx, llval, llresult, ret_ty);
536     }
537
538     // If we made a temporary stack slot, let's clean it up
539     match dest {
540         expr::Ignore => {
541             bcx = glue::drop_ty(bcx, llresult, ret_ty, Some(call_info));
542         }
543         expr::SaveIn(_) => {}
544     }
545
546     Result::new(bcx, llresult)
547 }
548
549 fn copy_intrinsic(bcx: Block, allow_overlap: bool, volatile: bool,
550                   tp_ty: ty::t, dst: ValueRef, src: ValueRef, count: ValueRef) -> ValueRef {
551     let ccx = bcx.ccx();
552     let lltp_ty = type_of::type_of(ccx, tp_ty);
553     let align = C_i32(ccx, type_of::align_of(ccx, tp_ty) as i32);
554     let size = machine::llsize_of(ccx, lltp_ty);
555     let int_size = machine::llbitsize_of_real(ccx, ccx.int_type());
556     let name = if allow_overlap {
557         if int_size == 32 {
558             "llvm.memmove.p0i8.p0i8.i32"
559         } else {
560             "llvm.memmove.p0i8.p0i8.i64"
561         }
562     } else {
563         if int_size == 32 {
564             "llvm.memcpy.p0i8.p0i8.i32"
565         } else {
566             "llvm.memcpy.p0i8.p0i8.i64"
567         }
568     };
569
570     let dst_ptr = PointerCast(bcx, dst, Type::i8p(ccx));
571     let src_ptr = PointerCast(bcx, src, Type::i8p(ccx));
572     let llfn = ccx.get_intrinsic(&name);
573
574     Call(bcx, llfn, [dst_ptr, src_ptr, Mul(bcx, size, count), align,
575                      C_bool(ccx, volatile)], None)
576 }
577
578 fn memset_intrinsic(bcx: Block, volatile: bool, tp_ty: ty::t,
579                     dst: ValueRef, val: ValueRef, count: ValueRef) -> ValueRef {
580     let ccx = bcx.ccx();
581     let lltp_ty = type_of::type_of(ccx, tp_ty);
582     let align = C_i32(ccx, type_of::align_of(ccx, tp_ty) as i32);
583     let size = machine::llsize_of(ccx, lltp_ty);
584     let name = if machine::llbitsize_of_real(ccx, ccx.int_type()) == 32 {
585         "llvm.memset.p0i8.i32"
586     } else {
587         "llvm.memset.p0i8.i64"
588     };
589
590     let dst_ptr = PointerCast(bcx, dst, Type::i8p(ccx));
591     let llfn = ccx.get_intrinsic(&name);
592
593     Call(bcx, llfn, [dst_ptr, val, Mul(bcx, size, count), align,
594                      C_bool(ccx, volatile)], None)
595 }
596
597 fn count_zeros_intrinsic(bcx: Block, name: &'static str, val: ValueRef) -> ValueRef {
598     let y = C_bool(bcx.ccx(), false);
599     let llfn = bcx.ccx().get_intrinsic(&name);
600     Call(bcx, llfn, [val, y], None)
601 }
602
603 fn with_overflow_intrinsic(bcx: Block, name: &'static str, t: ty::t,
604                            a: ValueRef, b: ValueRef) -> ValueRef {
605     let llfn = bcx.ccx().get_intrinsic(&name);
606
607     // Convert `i1` to a `bool`, and write it to the out parameter
608     let val = Call(bcx, llfn, [a, b], None);
609     let result = ExtractValue(bcx, val, 0);
610     let overflow = ZExt(bcx, ExtractValue(bcx, val, 1), Type::bool(bcx.ccx()));
611     let ret = C_undef(type_of::type_of(bcx.ccx(), t));
612     let ret = InsertValue(bcx, ret, result, 0);
613     let ret = InsertValue(bcx, ret, overflow, 1);
614
615     ret
616 }