]> git.lizzy.rs Git - rust.git/blob - src/librustc/middle/trans/intrinsic.rs
doc/guide-ffi: A few minor typo/language fixes
[rust.git] / src / librustc / middle / trans / intrinsic.rs
1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
4 //
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10
11 #![allow(non_uppercase_pattern_statics)]
12
13 use arena::TypedArena;
14 use lib::llvm::{SequentiallyConsistent, Acquire, Release, Xchg};
15 use lib::llvm::{ValueRef, Pointer, Array, Struct};
16 use lib;
17 use middle::subst::FnSpace;
18 use middle::trans::base::*;
19 use middle::trans::build::*;
20 use middle::trans::common::*;
21 use middle::trans::datum::*;
22 use middle::trans::glue;
23 use middle::trans::type_of::*;
24 use middle::trans::type_of;
25 use middle::trans::machine;
26 use middle::trans::machine::llsize_of;
27 use middle::trans::type_::Type;
28 use middle::ty;
29 use syntax::ast;
30 use syntax::ast_map;
31 use syntax::parse::token;
32 use util::ppaux::ty_to_str;
33
34 pub fn get_simple_intrinsic(ccx: &CrateContext, item: &ast::ForeignItem) -> Option<ValueRef> {
35     let name = match token::get_ident(item.ident).get() {
36         "sqrtf32" => "llvm.sqrt.f32",
37         "sqrtf64" => "llvm.sqrt.f64",
38         "powif32" => "llvm.powi.f32",
39         "powif64" => "llvm.powi.f64",
40         "sinf32" => "llvm.sin.f32",
41         "sinf64" => "llvm.sin.f64",
42         "cosf32" => "llvm.cos.f32",
43         "cosf64" => "llvm.cos.f64",
44         "powf32" => "llvm.pow.f32",
45         "powf64" => "llvm.pow.f64",
46         "expf32" => "llvm.exp.f32",
47         "expf64" => "llvm.exp.f64",
48         "exp2f32" => "llvm.exp2.f32",
49         "exp2f64" => "llvm.exp2.f64",
50         "logf32" => "llvm.log.f32",
51         "logf64" => "llvm.log.f64",
52         "log10f32" => "llvm.log10.f32",
53         "log10f64" => "llvm.log10.f64",
54         "log2f32" => "llvm.log2.f32",
55         "log2f64" => "llvm.log2.f64",
56         "fmaf32" => "llvm.fma.f32",
57         "fmaf64" => "llvm.fma.f64",
58         "fabsf32" => "llvm.fabs.f32",
59         "fabsf64" => "llvm.fabs.f64",
60         "copysignf32" => "llvm.copysign.f32",
61         "copysignf64" => "llvm.copysign.f64",
62         "floorf32" => "llvm.floor.f32",
63         "floorf64" => "llvm.floor.f64",
64         "ceilf32" => "llvm.ceil.f32",
65         "ceilf64" => "llvm.ceil.f64",
66         "truncf32" => "llvm.trunc.f32",
67         "truncf64" => "llvm.trunc.f64",
68         "rintf32" => "llvm.rint.f32",
69         "rintf64" => "llvm.rint.f64",
70         "nearbyintf32" => "llvm.nearbyint.f32",
71         "nearbyintf64" => "llvm.nearbyint.f64",
72         "roundf32" => "llvm.round.f32",
73         "roundf64" => "llvm.round.f64",
74         "ctpop8" => "llvm.ctpop.i8",
75         "ctpop16" => "llvm.ctpop.i16",
76         "ctpop32" => "llvm.ctpop.i32",
77         "ctpop64" => "llvm.ctpop.i64",
78         "bswap16" => "llvm.bswap.i16",
79         "bswap32" => "llvm.bswap.i32",
80         "bswap64" => "llvm.bswap.i64",
81         _ => return None
82     };
83     Some(ccx.get_intrinsic(&name))
84 }
85
86 pub fn trans_intrinsic(ccx: &CrateContext,
87                        decl: ValueRef,
88                        item: &ast::ForeignItem,
89                        substs: &param_substs,
90                        ref_id: Option<ast::NodeId>) {
91     debug!("trans_intrinsic(item.ident={})", token::get_ident(item.ident));
92
93     fn with_overflow_instrinsic(bcx: &Block, name: &'static str, t: ty::t) {
94         let first_real_arg = bcx.fcx.arg_pos(0u);
95         let a = get_param(bcx.fcx.llfn, first_real_arg);
96         let b = get_param(bcx.fcx.llfn, first_real_arg + 1);
97         let llfn = bcx.ccx().get_intrinsic(&name);
98
99         let val = Call(bcx, llfn, [a, b], []);
100
101         if type_is_immediate(bcx.ccx(), t) {
102             Ret(bcx, val);
103         } else {
104             let retptr = get_param(bcx.fcx.llfn, bcx.fcx.out_arg_pos());
105             Store(bcx, val, retptr);
106             RetVoid(bcx);
107         }
108     }
109
110     fn volatile_load_intrinsic(bcx: &Block) {
111         let first_real_arg = bcx.fcx.arg_pos(0u);
112         let src = get_param(bcx.fcx.llfn, first_real_arg);
113
114         let val = VolatileLoad(bcx, src);
115         Ret(bcx, val);
116     }
117
118     fn volatile_store_intrinsic(bcx: &Block) {
119         let first_real_arg = bcx.fcx.arg_pos(0u);
120         let dst = get_param(bcx.fcx.llfn, first_real_arg);
121         let val = get_param(bcx.fcx.llfn, first_real_arg + 1);
122
123         VolatileStore(bcx, val, dst);
124         RetVoid(bcx);
125     }
126
127     fn copy_intrinsic(bcx: &Block, allow_overlap: bool, volatile: bool, tp_ty: ty::t) {
128         let ccx = bcx.ccx();
129         let lltp_ty = type_of::type_of(ccx, tp_ty);
130         let align = C_i32(ccx, machine::llalign_of_min(ccx, lltp_ty) as i32);
131         let size = machine::llsize_of(ccx, lltp_ty);
132         let int_size = machine::llbitsize_of_real(ccx, ccx.int_type);
133         let name = if allow_overlap {
134             if int_size == 32 {
135                 "llvm.memmove.p0i8.p0i8.i32"
136             } else {
137                 "llvm.memmove.p0i8.p0i8.i64"
138             }
139         } else {
140             if int_size == 32 {
141                 "llvm.memcpy.p0i8.p0i8.i32"
142             } else {
143                 "llvm.memcpy.p0i8.p0i8.i64"
144             }
145         };
146
147         let decl = bcx.fcx.llfn;
148         let first_real_arg = bcx.fcx.arg_pos(0u);
149         let dst_ptr = PointerCast(bcx, get_param(decl, first_real_arg), Type::i8p(ccx));
150         let src_ptr = PointerCast(bcx, get_param(decl, first_real_arg + 1), Type::i8p(ccx));
151         let count = get_param(decl, first_real_arg + 2);
152         let llfn = ccx.get_intrinsic(&name);
153         Call(bcx, llfn, [dst_ptr, src_ptr, Mul(bcx, size, count), align, C_i1(ccx, volatile)], []);
154         RetVoid(bcx);
155     }
156
157     fn memset_intrinsic(bcx: &Block, volatile: bool, tp_ty: ty::t) {
158         let ccx = bcx.ccx();
159         let lltp_ty = type_of::type_of(ccx, tp_ty);
160         let align = C_i32(ccx, machine::llalign_of_min(ccx, lltp_ty) as i32);
161         let size = machine::llsize_of(ccx, lltp_ty);
162         let name = if machine::llbitsize_of_real(ccx, ccx.int_type) == 32 {
163             "llvm.memset.p0i8.i32"
164         } else {
165             "llvm.memset.p0i8.i64"
166         };
167
168         let decl = bcx.fcx.llfn;
169         let first_real_arg = bcx.fcx.arg_pos(0u);
170         let dst_ptr = PointerCast(bcx, get_param(decl, first_real_arg), Type::i8p(ccx));
171         let val = get_param(decl, first_real_arg + 1);
172         let count = get_param(decl, first_real_arg + 2);
173         let llfn = ccx.get_intrinsic(&name);
174         Call(bcx, llfn, [dst_ptr, val, Mul(bcx, size, count), align, C_i1(ccx, volatile)], []);
175         RetVoid(bcx);
176     }
177
178     fn count_zeros_intrinsic(bcx: &Block, name: &'static str) {
179         let x = get_param(bcx.fcx.llfn, bcx.fcx.arg_pos(0u));
180         let y = C_i1(bcx.ccx(), false);
181         let llfn = bcx.ccx().get_intrinsic(&name);
182         let llcall = Call(bcx, llfn, [x, y], []);
183         Ret(bcx, llcall);
184     }
185
186     let output_type = ty::ty_fn_ret(ty::node_id_to_type(ccx.tcx(), item.id));
187
188     let arena = TypedArena::new();
189     let fcx = new_fn_ctxt(ccx, decl, item.id, false, output_type,
190                           substs, Some(item.span), &arena);
191     init_function(&fcx, true, output_type);
192
193     set_always_inline(fcx.llfn);
194
195     let mut bcx = fcx.entry_bcx.borrow().clone().unwrap();
196     let first_real_arg = fcx.arg_pos(0u);
197
198     let name = token::get_ident(item.ident);
199
200     // This requires that atomic intrinsics follow a specific naming pattern:
201     // "atomic_<operation>[_<ordering>], and no ordering means SeqCst
202     if name.get().starts_with("atomic_") {
203         let split: Vec<&str> = name.get().split('_').collect();
204         assert!(split.len() >= 2, "Atomic intrinsic not correct format");
205         let order = if split.len() == 2 {
206             lib::llvm::SequentiallyConsistent
207         } else {
208             match *split.get(2) {
209                 "relaxed" => lib::llvm::Monotonic,
210                 "acq"     => lib::llvm::Acquire,
211                 "rel"     => lib::llvm::Release,
212                 "acqrel"  => lib::llvm::AcquireRelease,
213                 _ => ccx.sess().fatal("unknown ordering in atomic intrinsic")
214             }
215         };
216
217         match *split.get(1) {
218             "cxchg" => {
219                 // See include/llvm/IR/Instructions.h for their implementation
220                 // of this, I assume that it's good enough for us to use for
221                 // now.
222                 let strongest_failure_ordering = match order {
223                     lib::llvm::NotAtomic | lib::llvm::Unordered =>
224                         ccx.sess().fatal("cmpxchg must be atomic"),
225                     lib::llvm::Monotonic | lib::llvm::Release =>
226                         lib::llvm::Monotonic,
227                     lib::llvm::Acquire | lib::llvm::AcquireRelease =>
228                         lib::llvm::Acquire,
229                     lib::llvm::SequentiallyConsistent =>
230                         lib::llvm::SequentiallyConsistent,
231                 };
232                 let res = AtomicCmpXchg(bcx, get_param(decl, first_real_arg),
233                                         get_param(decl, first_real_arg + 1u),
234                                         get_param(decl, first_real_arg + 2u),
235                                         order, strongest_failure_ordering);
236                 if unsafe { lib::llvm::llvm::LLVMVersionMinor() >= 5 } {
237                     Ret(bcx, ExtractValue(bcx, res, 0));
238                 } else {
239                     Ret(bcx, res);
240                 }
241             }
242             "load" => {
243                 let old = AtomicLoad(bcx, get_param(decl, first_real_arg),
244                                      order);
245                 Ret(bcx, old);
246             }
247             "store" => {
248                 AtomicStore(bcx, get_param(decl, first_real_arg + 1u),
249                             get_param(decl, first_real_arg),
250                             order);
251                 RetVoid(bcx);
252             }
253             "fence" => {
254                 AtomicFence(bcx, order);
255                 RetVoid(bcx);
256             }
257             op => {
258                 // These are all AtomicRMW ops
259                 let atom_op = match op {
260                     "xchg"  => lib::llvm::Xchg,
261                     "xadd"  => lib::llvm::Add,
262                     "xsub"  => lib::llvm::Sub,
263                     "and"   => lib::llvm::And,
264                     "nand"  => lib::llvm::Nand,
265                     "or"    => lib::llvm::Or,
266                     "xor"   => lib::llvm::Xor,
267                     "max"   => lib::llvm::Max,
268                     "min"   => lib::llvm::Min,
269                     "umax"  => lib::llvm::UMax,
270                     "umin"  => lib::llvm::UMin,
271                     _ => ccx.sess().fatal("unknown atomic operation")
272                 };
273
274                 let old = AtomicRMW(bcx, atom_op, get_param(decl, first_real_arg),
275                                     get_param(decl, first_real_arg + 1u),
276                                     order);
277                 Ret(bcx, old);
278             }
279         }
280
281         fcx.cleanup();
282         return;
283     }
284
285     match name.get() {
286         "abort" => {
287             let llfn = bcx.ccx().get_intrinsic(&("llvm.trap"));
288             Call(bcx, llfn, [], []);
289             Unreachable(bcx);
290         }
291         "breakpoint" => {
292             let llfn = bcx.ccx().get_intrinsic(&("llvm.debugtrap"));
293             Call(bcx, llfn, [], []);
294             RetVoid(bcx);
295         }
296         "size_of" => {
297             let tp_ty = *substs.substs.types.get(FnSpace, 0);
298             let lltp_ty = type_of::type_of(ccx, tp_ty);
299             Ret(bcx, C_uint(ccx, machine::llsize_of_real(ccx, lltp_ty) as uint));
300         }
301         "move_val_init" => {
302             // Create a datum reflecting the value being moved.
303             // Use `appropriate_mode` so that the datum is by ref
304             // if the value is non-immediate. Note that, with
305             // intrinsics, there are no argument cleanups to
306             // concern ourselves with, so we can use an rvalue datum.
307             let tp_ty = *substs.substs.types.get(FnSpace, 0);
308             let mode = appropriate_rvalue_mode(ccx, tp_ty);
309             let src = Datum {val: get_param(decl, first_real_arg + 1u),
310                              ty: tp_ty,
311                              kind: Rvalue::new(mode)};
312             bcx = src.store_to(bcx, get_param(decl, first_real_arg));
313             RetVoid(bcx);
314         }
315         "min_align_of" => {
316             let tp_ty = *substs.substs.types.get(FnSpace, 0);
317             let lltp_ty = type_of::type_of(ccx, tp_ty);
318             Ret(bcx, C_uint(ccx, machine::llalign_of_min(ccx, lltp_ty) as uint));
319         }
320         "pref_align_of"=> {
321             let tp_ty = *substs.substs.types.get(FnSpace, 0);
322             let lltp_ty = type_of::type_of(ccx, tp_ty);
323             Ret(bcx, C_uint(ccx, machine::llalign_of_pref(ccx, lltp_ty) as uint));
324         }
325         "get_tydesc" => {
326             let tp_ty = *substs.substs.types.get(FnSpace, 0);
327             let static_ti = get_tydesc(ccx, tp_ty);
328             glue::lazily_emit_visit_glue(ccx, &*static_ti);
329
330             // FIXME (#3730): ideally this shouldn't need a cast,
331             // but there's a circularity between translating rust types to llvm
332             // types and having a tydesc type available. So I can't directly access
333             // the llvm type of intrinsic::TyDesc struct.
334             let userland_tydesc_ty = type_of::type_of(ccx, output_type);
335             let td = PointerCast(bcx, static_ti.tydesc, userland_tydesc_ty);
336             Ret(bcx, td);
337         }
338         "type_id" => {
339             let hash = ty::hash_crate_independent(
340                 ccx.tcx(),
341                 *substs.substs.types.get(FnSpace, 0),
342                 &ccx.link_meta.crate_hash);
343             // NB: This needs to be kept in lockstep with the TypeId struct in
344             //     libstd/unstable/intrinsics.rs
345             let val = C_named_struct(type_of::type_of(ccx, output_type),
346                                      [C_u64(ccx, hash)]);
347             match bcx.fcx.llretptr.get() {
348                 Some(ptr) => {
349                     Store(bcx, val, ptr);
350                     RetVoid(bcx);
351                 },
352                 None => Ret(bcx, val)
353             }
354         }
355         "init" => {
356             let tp_ty = *substs.substs.types.get(FnSpace, 0);
357             let lltp_ty = type_of::type_of(ccx, tp_ty);
358             match bcx.fcx.llretptr.get() {
359                 Some(ptr) => { Store(bcx, C_null(lltp_ty), ptr); RetVoid(bcx); }
360                 None if ty::type_is_nil(tp_ty) => RetVoid(bcx),
361                 None => Ret(bcx, C_null(lltp_ty)),
362             }
363         }
364         "uninit" => {
365             // Do nothing, this is effectively a no-op
366             let retty = *substs.substs.types.get(FnSpace, 0);
367             if type_is_immediate(ccx, retty) && !return_type_is_void(ccx, retty) {
368                 unsafe {
369                     Ret(bcx, lib::llvm::llvm::LLVMGetUndef(type_of(ccx, retty).to_ref()));
370                 }
371             } else {
372                 RetVoid(bcx)
373             }
374         }
375         "forget" => {
376             RetVoid(bcx);
377         }
378         "transmute" => {
379             let (in_type, out_type) = (*substs.substs.types.get(FnSpace, 0),
380                                        *substs.substs.types.get(FnSpace, 1));
381             let llintype = type_of::type_of(ccx, in_type);
382             let llouttype = type_of::type_of(ccx, out_type);
383
384             let in_type_size = machine::llbitsize_of_real(ccx, llintype);
385             let out_type_size = machine::llbitsize_of_real(ccx, llouttype);
386             if in_type_size != out_type_size {
387                 let sp = match ccx.tcx.map.get(ref_id.unwrap()) {
388                     ast_map::NodeExpr(e) => e.span,
389                     _ => fail!("transmute has non-expr arg"),
390                 };
391                 ccx.sess().span_bug(sp,
392                     format!("transmute called on types with different sizes: \
393                              {} ({} bit{}) to \
394                              {} ({} bit{})",
395                             ty_to_str(ccx.tcx(), in_type),
396                             in_type_size,
397                             if in_type_size == 1 {""} else {"s"},
398                             ty_to_str(ccx.tcx(), out_type),
399                             out_type_size,
400                             if out_type_size == 1 {""} else {"s"}).as_slice());
401             }
402
403             if !return_type_is_void(ccx, out_type) {
404                 let llsrcval = get_param(decl, first_real_arg);
405                 if type_is_immediate(ccx, in_type) {
406                     match fcx.llretptr.get() {
407                         Some(llretptr) => {
408                             Store(bcx, llsrcval, PointerCast(bcx, llretptr, llintype.ptr_to()));
409                             RetVoid(bcx);
410                         }
411                         None => match (llintype.kind(), llouttype.kind()) {
412                             (Pointer, other) | (other, Pointer) if other != Pointer => {
413                                 let tmp = Alloca(bcx, llouttype, "");
414                                 Store(bcx, llsrcval, PointerCast(bcx, tmp, llintype.ptr_to()));
415                                 Ret(bcx, Load(bcx, tmp));
416                             }
417                             (Array, _) | (_, Array) | (Struct, _) | (_, Struct) => {
418                                 let tmp = Alloca(bcx, llouttype, "");
419                                 Store(bcx, llsrcval, PointerCast(bcx, tmp, llintype.ptr_to()));
420                                 Ret(bcx, Load(bcx, tmp));
421                             }
422                             _ => {
423                                 let llbitcast = BitCast(bcx, llsrcval, llouttype);
424                                 Ret(bcx, llbitcast)
425                             }
426                         }
427                     }
428                 } else if type_is_immediate(ccx, out_type) {
429                     let llsrcptr = PointerCast(bcx, llsrcval, llouttype.ptr_to());
430                     let ll_load = Load(bcx, llsrcptr);
431                     Ret(bcx, ll_load);
432                 } else {
433                     // NB: Do not use a Load and Store here. This causes massive
434                     // code bloat when `transmute` is used on large structural
435                     // types.
436                     let lldestptr = fcx.llretptr.get().unwrap();
437                     let lldestptr = PointerCast(bcx, lldestptr, Type::i8p(ccx));
438                     let llsrcptr = PointerCast(bcx, llsrcval, Type::i8p(ccx));
439
440                     let llsize = llsize_of(ccx, llintype);
441                     call_memcpy(bcx, lldestptr, llsrcptr, llsize, 1);
442                     RetVoid(bcx);
443                 };
444             } else {
445                 RetVoid(bcx);
446             }
447         }
448         "needs_drop" => {
449             let tp_ty = *substs.substs.types.get(FnSpace, 0);
450             Ret(bcx, C_bool(ccx, ty::type_needs_drop(ccx.tcx(), tp_ty)));
451         }
452         "owns_managed" => {
453             let tp_ty = *substs.substs.types.get(FnSpace, 0);
454             Ret(bcx, C_bool(ccx, ty::type_contents(ccx.tcx(), tp_ty).owns_managed()));
455         }
456         "visit_tydesc" => {
457             let td = get_param(decl, first_real_arg);
458             let visitor = get_param(decl, first_real_arg + 1u);
459             let td = PointerCast(bcx, td, ccx.tydesc_type().ptr_to());
460             glue::call_visit_glue(bcx, visitor, td, None);
461             RetVoid(bcx);
462         }
463         "offset" => {
464             let ptr = get_param(decl, first_real_arg);
465             let offset = get_param(decl, first_real_arg + 1);
466             let lladdr = InBoundsGEP(bcx, ptr, [offset]);
467             Ret(bcx, lladdr);
468         }
469         "copy_nonoverlapping_memory" => {
470             copy_intrinsic(bcx, false, false, *substs.substs.types.get(FnSpace, 0))
471         }
472         "copy_memory" => {
473             copy_intrinsic(bcx, true, false, *substs.substs.types.get(FnSpace, 0))
474         }
475         "set_memory" => {
476             memset_intrinsic(bcx, false, *substs.substs.types.get(FnSpace, 0))
477         }
478
479         "volatile_copy_nonoverlapping_memory" => {
480             copy_intrinsic(bcx, false, true, *substs.substs.types.get(FnSpace, 0))
481         }
482
483         "volatile_copy_memory" => {
484             copy_intrinsic(bcx, true, true, *substs.substs.types.get(FnSpace, 0))
485         }
486
487         "volatile_set_memory" => {
488             memset_intrinsic(bcx, true, *substs.substs.types.get(FnSpace, 0))
489         }
490
491         "ctlz8" => count_zeros_intrinsic(bcx, "llvm.ctlz.i8"),
492         "ctlz16" => count_zeros_intrinsic(bcx, "llvm.ctlz.i16"),
493         "ctlz32" => count_zeros_intrinsic(bcx, "llvm.ctlz.i32"),
494         "ctlz64" => count_zeros_intrinsic(bcx, "llvm.ctlz.i64"),
495         "cttz8" => count_zeros_intrinsic(bcx, "llvm.cttz.i8"),
496         "cttz16" => count_zeros_intrinsic(bcx, "llvm.cttz.i16"),
497         "cttz32" => count_zeros_intrinsic(bcx, "llvm.cttz.i32"),
498         "cttz64" => count_zeros_intrinsic(bcx, "llvm.cttz.i64"),
499
500         "volatile_load" => volatile_load_intrinsic(bcx),
501         "volatile_store" => volatile_store_intrinsic(bcx),
502
503         "i8_add_with_overflow" =>
504             with_overflow_instrinsic(bcx, "llvm.sadd.with.overflow.i8", output_type),
505         "i16_add_with_overflow" =>
506             with_overflow_instrinsic(bcx, "llvm.sadd.with.overflow.i16", output_type),
507         "i32_add_with_overflow" =>
508             with_overflow_instrinsic(bcx, "llvm.sadd.with.overflow.i32", output_type),
509         "i64_add_with_overflow" =>
510             with_overflow_instrinsic(bcx, "llvm.sadd.with.overflow.i64", output_type),
511
512         "u8_add_with_overflow" =>
513             with_overflow_instrinsic(bcx, "llvm.uadd.with.overflow.i8", output_type),
514         "u16_add_with_overflow" =>
515             with_overflow_instrinsic(bcx, "llvm.uadd.with.overflow.i16", output_type),
516         "u32_add_with_overflow" =>
517             with_overflow_instrinsic(bcx, "llvm.uadd.with.overflow.i32", output_type),
518         "u64_add_with_overflow" =>
519             with_overflow_instrinsic(bcx, "llvm.uadd.with.overflow.i64", output_type),
520
521         "i8_sub_with_overflow" =>
522             with_overflow_instrinsic(bcx, "llvm.ssub.with.overflow.i8", output_type),
523         "i16_sub_with_overflow" =>
524             with_overflow_instrinsic(bcx, "llvm.ssub.with.overflow.i16", output_type),
525         "i32_sub_with_overflow" =>
526             with_overflow_instrinsic(bcx, "llvm.ssub.with.overflow.i32", output_type),
527         "i64_sub_with_overflow" =>
528             with_overflow_instrinsic(bcx, "llvm.ssub.with.overflow.i64", output_type),
529
530         "u8_sub_with_overflow" =>
531             with_overflow_instrinsic(bcx, "llvm.usub.with.overflow.i8", output_type),
532         "u16_sub_with_overflow" =>
533             with_overflow_instrinsic(bcx, "llvm.usub.with.overflow.i16", output_type),
534         "u32_sub_with_overflow" =>
535             with_overflow_instrinsic(bcx, "llvm.usub.with.overflow.i32", output_type),
536         "u64_sub_with_overflow" =>
537             with_overflow_instrinsic(bcx, "llvm.usub.with.overflow.i64", output_type),
538
539         "i8_mul_with_overflow" =>
540             with_overflow_instrinsic(bcx, "llvm.smul.with.overflow.i8", output_type),
541         "i16_mul_with_overflow" =>
542             with_overflow_instrinsic(bcx, "llvm.smul.with.overflow.i16", output_type),
543         "i32_mul_with_overflow" =>
544             with_overflow_instrinsic(bcx, "llvm.smul.with.overflow.i32", output_type),
545         "i64_mul_with_overflow" =>
546             with_overflow_instrinsic(bcx, "llvm.smul.with.overflow.i64", output_type),
547
548         "u8_mul_with_overflow" =>
549             with_overflow_instrinsic(bcx, "llvm.umul.with.overflow.i8", output_type),
550         "u16_mul_with_overflow" =>
551             with_overflow_instrinsic(bcx, "llvm.umul.with.overflow.i16", output_type),
552         "u32_mul_with_overflow" =>
553             with_overflow_instrinsic(bcx, "llvm.umul.with.overflow.i32", output_type),
554         "u64_mul_with_overflow" =>
555             with_overflow_instrinsic(bcx, "llvm.umul.with.overflow.i64", output_type),
556
557         _ => {
558             // Could we make this an enum rather than a string? does it get
559             // checked earlier?
560             ccx.sess().span_bug(item.span, "unknown intrinsic");
561         }
562     }
563     fcx.cleanup();
564 }
565
566 /// Performs late verification that intrinsics are used correctly. At present,
567 /// the only intrinsic that needs such verification is `transmute`.
568 pub fn check_intrinsics(ccx: &CrateContext) {
569     for transmute_restriction in ccx.tcx
570                                     .transmute_restrictions
571                                     .borrow()
572                                     .iter() {
573         let llfromtype = type_of::sizing_type_of(ccx,
574                                                  transmute_restriction.from);
575         let lltotype = type_of::sizing_type_of(ccx,
576                                                transmute_restriction.to);
577         let from_type_size = machine::llbitsize_of_real(ccx, llfromtype);
578         let to_type_size = machine::llbitsize_of_real(ccx, lltotype);
579         if from_type_size != to_type_size {
580             ccx.sess()
581                .span_err(transmute_restriction.span,
582                 format!("transmute called on types with different sizes: \
583                          {} ({} bit{}) to {} ({} bit{})",
584                         ty_to_str(ccx.tcx(), transmute_restriction.from),
585                         from_type_size as uint,
586                         if from_type_size == 1 {
587                             ""
588                         } else {
589                             "s"
590                         },
591                         ty_to_str(ccx.tcx(), transmute_restriction.to),
592                         to_type_size as uint,
593                         if to_type_size == 1 {
594                             ""
595                         } else {
596                             "s"
597                         }).as_slice());
598         }
599     }
600     ccx.sess().abort_if_errors();
601 }
602