]> git.lizzy.rs Git - rust.git/blob - src/librustc_codegen_llvm/intrinsic.rs
rustc_codegen_llvm: use safe references for Type.
[rust.git] / src / librustc_codegen_llvm / intrinsic.rs
1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
4 //
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10
11 #![allow(non_upper_case_globals)]
12
13 use intrinsics::{self, Intrinsic};
14 use llvm;
15 use llvm::{ValueRef};
16 use abi::{Abi, FnType, LlvmType, PassMode};
17 use mir::place::PlaceRef;
18 use mir::operand::{OperandRef, OperandValue};
19 use base::*;
20 use common::*;
21 use declare;
22 use glue;
23 use type_::Type;
24 use type_of::LayoutLlvmExt;
25 use rustc::ty::{self, Ty};
26 use rustc::ty::layout::{HasDataLayout, LayoutOf};
27 use rustc::hir;
28 use syntax::ast;
29 use syntax::symbol::Symbol;
30 use builder::Builder;
31
32 use rustc::session::Session;
33 use syntax_pos::Span;
34
35 use std::cmp::Ordering;
36 use std::iter;
37
38 fn get_simple_intrinsic(cx: &CodegenCx, name: &str) -> Option<ValueRef> {
39     let llvm_name = match name {
40         "sqrtf32" => "llvm.sqrt.f32",
41         "sqrtf64" => "llvm.sqrt.f64",
42         "powif32" => "llvm.powi.f32",
43         "powif64" => "llvm.powi.f64",
44         "sinf32" => "llvm.sin.f32",
45         "sinf64" => "llvm.sin.f64",
46         "cosf32" => "llvm.cos.f32",
47         "cosf64" => "llvm.cos.f64",
48         "powf32" => "llvm.pow.f32",
49         "powf64" => "llvm.pow.f64",
50         "expf32" => "llvm.exp.f32",
51         "expf64" => "llvm.exp.f64",
52         "exp2f32" => "llvm.exp2.f32",
53         "exp2f64" => "llvm.exp2.f64",
54         "logf32" => "llvm.log.f32",
55         "logf64" => "llvm.log.f64",
56         "log10f32" => "llvm.log10.f32",
57         "log10f64" => "llvm.log10.f64",
58         "log2f32" => "llvm.log2.f32",
59         "log2f64" => "llvm.log2.f64",
60         "fmaf32" => "llvm.fma.f32",
61         "fmaf64" => "llvm.fma.f64",
62         "fabsf32" => "llvm.fabs.f32",
63         "fabsf64" => "llvm.fabs.f64",
64         "copysignf32" => "llvm.copysign.f32",
65         "copysignf64" => "llvm.copysign.f64",
66         "floorf32" => "llvm.floor.f32",
67         "floorf64" => "llvm.floor.f64",
68         "ceilf32" => "llvm.ceil.f32",
69         "ceilf64" => "llvm.ceil.f64",
70         "truncf32" => "llvm.trunc.f32",
71         "truncf64" => "llvm.trunc.f64",
72         "rintf32" => "llvm.rint.f32",
73         "rintf64" => "llvm.rint.f64",
74         "nearbyintf32" => "llvm.nearbyint.f32",
75         "nearbyintf64" => "llvm.nearbyint.f64",
76         "roundf32" => "llvm.round.f32",
77         "roundf64" => "llvm.round.f64",
78         "assume" => "llvm.assume",
79         "abort" => "llvm.trap",
80         _ => return None
81     };
82     Some(cx.get_intrinsic(&llvm_name))
83 }
84
85 /// Remember to add all intrinsics here, in librustc_typeck/check/mod.rs,
86 /// and in libcore/intrinsics.rs; if you need access to any llvm intrinsics,
87 /// add them to librustc_codegen_llvm/context.rs
88 pub fn codegen_intrinsic_call(
89     bx: &Builder<'a, 'll, 'tcx>,
90     callee_ty: Ty<'tcx>,
91     fn_ty: &FnType<'tcx, Ty<'tcx>>,
92     args: &[OperandRef<'tcx>],
93     llresult: ValueRef,
94     span: Span,
95 ) {
96     let cx = bx.cx;
97     let tcx = cx.tcx;
98
99     let (def_id, substs) = match callee_ty.sty {
100         ty::TyFnDef(def_id, substs) => (def_id, substs),
101         _ => bug!("expected fn item type, found {}", callee_ty)
102     };
103
104     let sig = callee_ty.fn_sig(tcx);
105     let sig = tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &sig);
106     let arg_tys = sig.inputs();
107     let ret_ty = sig.output();
108     let name = &*tcx.item_name(def_id).as_str();
109
110     let llret_ty = cx.layout_of(ret_ty).llvm_type(cx);
111     let result = PlaceRef::new_sized(llresult, fn_ty.ret.layout, fn_ty.ret.layout.align);
112
113     let simple = get_simple_intrinsic(cx, name);
114     let llval = match name {
115         _ if simple.is_some() => {
116             bx.call(simple.unwrap(),
117                      &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(),
118                      None)
119         }
120         "unreachable" => {
121             return;
122         },
123         "likely" => {
124             let expect = cx.get_intrinsic(&("llvm.expect.i1"));
125             bx.call(expect, &[args[0].immediate(), C_bool(cx, true)], None)
126         }
127         "unlikely" => {
128             let expect = cx.get_intrinsic(&("llvm.expect.i1"));
129             bx.call(expect, &[args[0].immediate(), C_bool(cx, false)], None)
130         }
131         "try" => {
132             try_intrinsic(bx, cx,
133                           args[0].immediate(),
134                           args[1].immediate(),
135                           args[2].immediate(),
136                           llresult);
137             return;
138         }
139         "breakpoint" => {
140             let llfn = cx.get_intrinsic(&("llvm.debugtrap"));
141             bx.call(llfn, &[], None)
142         }
143         "size_of" => {
144             let tp_ty = substs.type_at(0);
145             C_usize(cx, cx.size_of(tp_ty).bytes())
146         }
147         "size_of_val" => {
148             let tp_ty = substs.type_at(0);
149             if let OperandValue::Pair(_, meta) = args[0].val {
150                 let (llsize, _) =
151                     glue::size_and_align_of_dst(bx, tp_ty, meta);
152                 llsize
153             } else {
154                 C_usize(cx, cx.size_of(tp_ty).bytes())
155             }
156         }
157         "min_align_of" => {
158             let tp_ty = substs.type_at(0);
159             C_usize(cx, cx.align_of(tp_ty).abi())
160         }
161         "min_align_of_val" => {
162             let tp_ty = substs.type_at(0);
163             if let OperandValue::Pair(_, meta) = args[0].val {
164                 let (_, llalign) =
165                     glue::size_and_align_of_dst(bx, tp_ty, meta);
166                 llalign
167             } else {
168                 C_usize(cx, cx.align_of(tp_ty).abi())
169             }
170         }
171         "pref_align_of" => {
172             let tp_ty = substs.type_at(0);
173             C_usize(cx, cx.align_of(tp_ty).pref())
174         }
175         "type_name" => {
176             let tp_ty = substs.type_at(0);
177             let ty_name = Symbol::intern(&tp_ty.to_string()).as_str();
178             C_str_slice(cx, ty_name)
179         }
180         "type_id" => {
181             C_u64(cx, cx.tcx.type_id_hash(substs.type_at(0)))
182         }
183         "init" => {
184             let ty = substs.type_at(0);
185             if !cx.layout_of(ty).is_zst() {
186                 // Just zero out the stack slot.
187                 // If we store a zero constant, LLVM will drown in vreg allocation for large data
188                 // structures, and the generated code will be awful. (A telltale sign of this is
189                 // large quantities of `mov [byte ptr foo],0` in the generated code.)
190                 memset_intrinsic(bx, false, ty, llresult, C_u8(cx, 0), C_usize(cx, 1));
191             }
192             return;
193         }
194         // Effectively no-ops
195         "uninit" => {
196             return;
197         }
198         "needs_drop" => {
199             let tp_ty = substs.type_at(0);
200
201             C_bool(cx, bx.cx.type_needs_drop(tp_ty))
202         }
203         "offset" => {
204             let ptr = args[0].immediate();
205             let offset = args[1].immediate();
206             bx.inbounds_gep(ptr, &[offset])
207         }
208         "arith_offset" => {
209             let ptr = args[0].immediate();
210             let offset = args[1].immediate();
211             bx.gep(ptr, &[offset])
212         }
213
214         "copy_nonoverlapping" => {
215             copy_intrinsic(bx, false, false, substs.type_at(0),
216                            args[1].immediate(), args[0].immediate(), args[2].immediate())
217         }
218         "copy" => {
219             copy_intrinsic(bx, true, false, substs.type_at(0),
220                            args[1].immediate(), args[0].immediate(), args[2].immediate())
221         }
222         "write_bytes" => {
223             memset_intrinsic(bx, false, substs.type_at(0),
224                              args[0].immediate(), args[1].immediate(), args[2].immediate())
225         }
226
227         "volatile_copy_nonoverlapping_memory" => {
228             copy_intrinsic(bx, false, true, substs.type_at(0),
229                            args[0].immediate(), args[1].immediate(), args[2].immediate())
230         }
231         "volatile_copy_memory" => {
232             copy_intrinsic(bx, true, true, substs.type_at(0),
233                            args[0].immediate(), args[1].immediate(), args[2].immediate())
234         }
235         "volatile_set_memory" => {
236             memset_intrinsic(bx, true, substs.type_at(0),
237                              args[0].immediate(), args[1].immediate(), args[2].immediate())
238         }
239         "volatile_load" | "unaligned_volatile_load" => {
240             let tp_ty = substs.type_at(0);
241             let mut ptr = args[0].immediate();
242             if let PassMode::Cast(ty) = fn_ty.ret.mode {
243                 ptr = bx.pointercast(ptr, ty.llvm_type(cx).ptr_to());
244             }
245             let load = bx.volatile_load(ptr);
246             let align = if name == "unaligned_volatile_load" {
247                 1
248             } else {
249                 cx.align_of(tp_ty).abi() as u32
250             };
251             unsafe {
252                 llvm::LLVMSetAlignment(load, align);
253             }
254             to_immediate(bx, load, cx.layout_of(tp_ty))
255         },
256         "volatile_store" => {
257             let dst = args[0].deref(bx.cx);
258             args[1].val.volatile_store(bx, dst);
259             return;
260         },
261         "unaligned_volatile_store" => {
262             let dst = args[0].deref(bx.cx);
263             args[1].val.unaligned_volatile_store(bx, dst);
264             return;
265         },
266         "prefetch_read_data" | "prefetch_write_data" |
267         "prefetch_read_instruction" | "prefetch_write_instruction" => {
268             let expect = cx.get_intrinsic(&("llvm.prefetch"));
269             let (rw, cache_type) = match name {
270                 "prefetch_read_data" => (0, 1),
271                 "prefetch_write_data" => (1, 1),
272                 "prefetch_read_instruction" => (0, 0),
273                 "prefetch_write_instruction" => (1, 0),
274                 _ => bug!()
275             };
276             bx.call(expect, &[
277                 args[0].immediate(),
278                 C_i32(cx, rw),
279                 args[1].immediate(),
280                 C_i32(cx, cache_type)
281             ], None)
282         },
283         "ctlz" | "ctlz_nonzero" | "cttz" | "cttz_nonzero" | "ctpop" | "bswap" |
284         "bitreverse" | "add_with_overflow" | "sub_with_overflow" |
285         "mul_with_overflow" | "overflowing_add" | "overflowing_sub" | "overflowing_mul" |
286         "unchecked_div" | "unchecked_rem" | "unchecked_shl" | "unchecked_shr" | "exact_div" => {
287             let ty = arg_tys[0];
288             match int_type_width_signed(ty, cx) {
289                 Some((width, signed)) =>
290                     match name {
291                         "ctlz" | "cttz" => {
292                             let y = C_bool(bx.cx, false);
293                             let llfn = cx.get_intrinsic(&format!("llvm.{}.i{}", name, width));
294                             bx.call(llfn, &[args[0].immediate(), y], None)
295                         }
296                         "ctlz_nonzero" | "cttz_nonzero" => {
297                             let y = C_bool(bx.cx, true);
298                             let llvm_name = &format!("llvm.{}.i{}", &name[..4], width);
299                             let llfn = cx.get_intrinsic(llvm_name);
300                             bx.call(llfn, &[args[0].immediate(), y], None)
301                         }
302                         "ctpop" => bx.call(cx.get_intrinsic(&format!("llvm.ctpop.i{}", width)),
303                                         &[args[0].immediate()], None),
304                         "bswap" => {
305                             if width == 8 {
306                                 args[0].immediate() // byte swap a u8/i8 is just a no-op
307                             } else {
308                                 bx.call(cx.get_intrinsic(&format!("llvm.bswap.i{}", width)),
309                                         &[args[0].immediate()], None)
310                             }
311                         }
312                         "bitreverse" => {
313                             bx.call(cx.get_intrinsic(&format!("llvm.bitreverse.i{}", width)),
314                                 &[args[0].immediate()], None)
315                         }
316                         "add_with_overflow" | "sub_with_overflow" | "mul_with_overflow" => {
317                             let intrinsic = format!("llvm.{}{}.with.overflow.i{}",
318                                                     if signed { 's' } else { 'u' },
319                                                     &name[..3], width);
320                             let llfn = bx.cx.get_intrinsic(&intrinsic);
321
322                             // Convert `i1` to a `bool`, and write it to the out parameter
323                             let pair = bx.call(llfn, &[
324                                 args[0].immediate(),
325                                 args[1].immediate()
326                             ], None);
327                             let val = bx.extract_value(pair, 0);
328                             let overflow = bx.zext(bx.extract_value(pair, 1), Type::bool(cx));
329
330                             let dest = result.project_field(bx, 0);
331                             bx.store(val, dest.llval, dest.align);
332                             let dest = result.project_field(bx, 1);
333                             bx.store(overflow, dest.llval, dest.align);
334
335                             return;
336                         },
337                         "overflowing_add" => bx.add(args[0].immediate(), args[1].immediate()),
338                         "overflowing_sub" => bx.sub(args[0].immediate(), args[1].immediate()),
339                         "overflowing_mul" => bx.mul(args[0].immediate(), args[1].immediate()),
340                         "exact_div" =>
341                             if signed {
342                                 bx.exactsdiv(args[0].immediate(), args[1].immediate())
343                             } else {
344                                 bx.exactudiv(args[0].immediate(), args[1].immediate())
345                             },
346                         "unchecked_div" =>
347                             if signed {
348                                 bx.sdiv(args[0].immediate(), args[1].immediate())
349                             } else {
350                                 bx.udiv(args[0].immediate(), args[1].immediate())
351                             },
352                         "unchecked_rem" =>
353                             if signed {
354                                 bx.srem(args[0].immediate(), args[1].immediate())
355                             } else {
356                                 bx.urem(args[0].immediate(), args[1].immediate())
357                             },
358                         "unchecked_shl" => bx.shl(args[0].immediate(), args[1].immediate()),
359                         "unchecked_shr" =>
360                             if signed {
361                                 bx.ashr(args[0].immediate(), args[1].immediate())
362                             } else {
363                                 bx.lshr(args[0].immediate(), args[1].immediate())
364                             },
365                         _ => bug!(),
366                     },
367                 None => {
368                     span_invalid_monomorphization_error(
369                         tcx.sess, span,
370                         &format!("invalid monomorphization of `{}` intrinsic: \
371                                   expected basic integer type, found `{}`", name, ty));
372                     return;
373                 }
374             }
375
376         },
377         "fadd_fast" | "fsub_fast" | "fmul_fast" | "fdiv_fast" | "frem_fast" => {
378             let sty = &arg_tys[0].sty;
379             match float_type_width(sty) {
380                 Some(_width) =>
381                     match name {
382                         "fadd_fast" => bx.fadd_fast(args[0].immediate(), args[1].immediate()),
383                         "fsub_fast" => bx.fsub_fast(args[0].immediate(), args[1].immediate()),
384                         "fmul_fast" => bx.fmul_fast(args[0].immediate(), args[1].immediate()),
385                         "fdiv_fast" => bx.fdiv_fast(args[0].immediate(), args[1].immediate()),
386                         "frem_fast" => bx.frem_fast(args[0].immediate(), args[1].immediate()),
387                         _ => bug!(),
388                     },
389                 None => {
390                     span_invalid_monomorphization_error(
391                         tcx.sess, span,
392                         &format!("invalid monomorphization of `{}` intrinsic: \
393                                   expected basic float type, found `{}`", name, sty));
394                     return;
395                 }
396             }
397
398         },
399
400         "discriminant_value" => {
401             args[0].deref(bx.cx).codegen_get_discr(bx, ret_ty)
402         }
403
404         name if name.starts_with("simd_") => {
405             match generic_simd_intrinsic(bx, name,
406                                          callee_ty,
407                                          args,
408                                          ret_ty, llret_ty,
409                                          span) {
410                 Ok(llval) => llval,
411                 Err(()) => return
412             }
413         }
414         // This requires that atomic intrinsics follow a specific naming pattern:
415         // "atomic_<operation>[_<ordering>]", and no ordering means SeqCst
416         name if name.starts_with("atomic_") => {
417             use llvm::AtomicOrdering::*;
418
419             let split: Vec<&str> = name.split('_').collect();
420
421             let is_cxchg = split[1] == "cxchg" || split[1] == "cxchgweak";
422             let (order, failorder) = match split.len() {
423                 2 => (SequentiallyConsistent, SequentiallyConsistent),
424                 3 => match split[2] {
425                     "unordered" => (Unordered, Unordered),
426                     "relaxed" => (Monotonic, Monotonic),
427                     "acq"     => (Acquire, Acquire),
428                     "rel"     => (Release, Monotonic),
429                     "acqrel"  => (AcquireRelease, Acquire),
430                     "failrelaxed" if is_cxchg =>
431                         (SequentiallyConsistent, Monotonic),
432                     "failacq" if is_cxchg =>
433                         (SequentiallyConsistent, Acquire),
434                     _ => cx.sess().fatal("unknown ordering in atomic intrinsic")
435                 },
436                 4 => match (split[2], split[3]) {
437                     ("acq", "failrelaxed") if is_cxchg =>
438                         (Acquire, Monotonic),
439                     ("acqrel", "failrelaxed") if is_cxchg =>
440                         (AcquireRelease, Monotonic),
441                     _ => cx.sess().fatal("unknown ordering in atomic intrinsic")
442                 },
443                 _ => cx.sess().fatal("Atomic intrinsic not in correct format"),
444             };
445
446             let invalid_monomorphization = |ty| {
447                 span_invalid_monomorphization_error(tcx.sess, span,
448                     &format!("invalid monomorphization of `{}` intrinsic: \
449                               expected basic integer type, found `{}`", name, ty));
450             };
451
452             match split[1] {
453                 "cxchg" | "cxchgweak" => {
454                     let ty = substs.type_at(0);
455                     if int_type_width_signed(ty, cx).is_some() {
456                         let weak = if split[1] == "cxchgweak" { llvm::True } else { llvm::False };
457                         let pair = bx.atomic_cmpxchg(
458                             args[0].immediate(),
459                             args[1].immediate(),
460                             args[2].immediate(),
461                             order,
462                             failorder,
463                             weak);
464                         let val = bx.extract_value(pair, 0);
465                         let success = bx.zext(bx.extract_value(pair, 1), Type::bool(bx.cx));
466
467                         let dest = result.project_field(bx, 0);
468                         bx.store(val, dest.llval, dest.align);
469                         let dest = result.project_field(bx, 1);
470                         bx.store(success, dest.llval, dest.align);
471                         return;
472                     } else {
473                         return invalid_monomorphization(ty);
474                     }
475                 }
476
477                 "load" => {
478                     let ty = substs.type_at(0);
479                     if int_type_width_signed(ty, cx).is_some() {
480                         let align = cx.align_of(ty);
481                         bx.atomic_load(args[0].immediate(), order, align)
482                     } else {
483                         return invalid_monomorphization(ty);
484                     }
485                 }
486
487                 "store" => {
488                     let ty = substs.type_at(0);
489                     if int_type_width_signed(ty, cx).is_some() {
490                         let align = cx.align_of(ty);
491                         bx.atomic_store(args[1].immediate(), args[0].immediate(), order, align);
492                         return;
493                     } else {
494                         return invalid_monomorphization(ty);
495                     }
496                 }
497
498                 "fence" => {
499                     bx.atomic_fence(order, llvm::SynchronizationScope::CrossThread);
500                     return;
501                 }
502
503                 "singlethreadfence" => {
504                     bx.atomic_fence(order, llvm::SynchronizationScope::SingleThread);
505                     return;
506                 }
507
508                 // These are all AtomicRMW ops
509                 op => {
510                     let atom_op = match op {
511                         "xchg"  => llvm::AtomicXchg,
512                         "xadd"  => llvm::AtomicAdd,
513                         "xsub"  => llvm::AtomicSub,
514                         "and"   => llvm::AtomicAnd,
515                         "nand"  => llvm::AtomicNand,
516                         "or"    => llvm::AtomicOr,
517                         "xor"   => llvm::AtomicXor,
518                         "max"   => llvm::AtomicMax,
519                         "min"   => llvm::AtomicMin,
520                         "umax"  => llvm::AtomicUMax,
521                         "umin"  => llvm::AtomicUMin,
522                         _ => cx.sess().fatal("unknown atomic operation")
523                     };
524
525                     let ty = substs.type_at(0);
526                     if int_type_width_signed(ty, cx).is_some() {
527                         bx.atomic_rmw(atom_op, args[0].immediate(), args[1].immediate(), order)
528                     } else {
529                         return invalid_monomorphization(ty);
530                     }
531                 }
532             }
533         }
534
535         "nontemporal_store" => {
536             let dst = args[0].deref(bx.cx);
537             args[1].val.nontemporal_store(bx, dst);
538             return;
539         }
540
541         _ => {
542             let intr = match Intrinsic::find(&name) {
543                 Some(intr) => intr,
544                 None => bug!("unknown intrinsic '{}'", name),
545             };
546             fn one<T>(x: Vec<T>) -> T {
547                 assert_eq!(x.len(), 1);
548                 x.into_iter().next().unwrap()
549             }
550             fn ty_to_type(cx: &CodegenCx<'ll, '_>, t: &intrinsics::Type) -> Vec<&'ll Type> {
551                 use intrinsics::Type::*;
552                 match *t {
553                     Void => vec![Type::void(cx)],
554                     Integer(_signed, _width, llvm_width) => {
555                         vec![Type::ix(cx, llvm_width as u64)]
556                     }
557                     Float(x) => {
558                         match x {
559                             32 => vec![Type::f32(cx)],
560                             64 => vec![Type::f64(cx)],
561                             _ => bug!()
562                         }
563                     }
564                     Pointer(ref t, ref llvm_elem, _const) => {
565                         let t = llvm_elem.as_ref().unwrap_or(t);
566                         let elem = one(ty_to_type(cx, t));
567                         vec![elem.ptr_to()]
568                     }
569                     Vector(ref t, ref llvm_elem, length) => {
570                         let t = llvm_elem.as_ref().unwrap_or(t);
571                         let elem = one(ty_to_type(cx, t));
572                         vec![Type::vector(elem, length as u64)]
573                     }
574                     Aggregate(false, ref contents) => {
575                         let elems = contents.iter()
576                                             .map(|t| one(ty_to_type(cx, t)))
577                                             .collect::<Vec<_>>();
578                         vec![Type::struct_(cx, &elems, false)]
579                     }
580                     Aggregate(true, ref contents) => {
581                         contents.iter()
582                                 .flat_map(|t| ty_to_type(cx, t))
583                                 .collect()
584                     }
585                 }
586             }
587
588             // This allows an argument list like `foo, (bar, baz),
589             // qux` to be converted into `foo, bar, baz, qux`, integer
590             // arguments to be truncated as needed and pointers to be
591             // cast.
592             fn modify_as_needed(
593                 bx: &Builder<'a, 'll, 'tcx>,
594                 t: &intrinsics::Type,
595                 arg: &OperandRef<'tcx>,
596             ) -> Vec<ValueRef>
597             {
598                 match *t {
599                     intrinsics::Type::Aggregate(true, ref contents) => {
600                         // We found a tuple that needs squishing! So
601                         // run over the tuple and load each field.
602                         //
603                         // This assumes the type is "simple", i.e. no
604                         // destructors, and the contents are SIMD
605                         // etc.
606                         assert!(!bx.cx.type_needs_drop(arg.layout.ty));
607                         let (ptr, align) = match arg.val {
608                             OperandValue::Ref(ptr, align) => (ptr, align),
609                             _ => bug!()
610                         };
611                         let arg = PlaceRef::new_sized(ptr, arg.layout, align);
612                         (0..contents.len()).map(|i| {
613                             arg.project_field(bx, i).load(bx).immediate()
614                         }).collect()
615                     }
616                     intrinsics::Type::Pointer(_, Some(ref llvm_elem), _) => {
617                         let llvm_elem = one(ty_to_type(bx.cx, llvm_elem));
618                         vec![bx.pointercast(arg.immediate(), llvm_elem.ptr_to())]
619                     }
620                     intrinsics::Type::Vector(_, Some(ref llvm_elem), length) => {
621                         let llvm_elem = one(ty_to_type(bx.cx, llvm_elem));
622                         vec![bx.bitcast(arg.immediate(), Type::vector(llvm_elem, length as u64))]
623                     }
624                     intrinsics::Type::Integer(_, width, llvm_width) if width != llvm_width => {
625                         // the LLVM intrinsic uses a smaller integer
626                         // size than the C intrinsic's signature, so
627                         // we have to trim it down here.
628                         vec![bx.trunc(arg.immediate(), Type::ix(bx.cx, llvm_width as u64))]
629                     }
630                     _ => vec![arg.immediate()],
631                 }
632             }
633
634
635             let inputs = intr.inputs.iter()
636                                     .flat_map(|t| ty_to_type(cx, t))
637                                     .collect::<Vec<_>>();
638
639             let outputs = one(ty_to_type(cx, &intr.output));
640
641             let llargs: Vec<_> = intr.inputs.iter().zip(args).flat_map(|(t, arg)| {
642                 modify_as_needed(bx, t, arg)
643             }).collect();
644             assert_eq!(inputs.len(), llargs.len());
645
646             let val = match intr.definition {
647                 intrinsics::IntrinsicDef::Named(name) => {
648                     let f = declare::declare_cfn(cx,
649                                                  name,
650                                                  Type::func(&inputs, outputs));
651                     bx.call(f, &llargs, None)
652                 }
653             };
654
655             match *intr.output {
656                 intrinsics::Type::Aggregate(flatten, ref elems) => {
657                     // the output is a tuple so we need to munge it properly
658                     assert!(!flatten);
659
660                     for i in 0..elems.len() {
661                         let dest = result.project_field(bx, i);
662                         let val = bx.extract_value(val, i as u64);
663                         bx.store(val, dest.llval, dest.align);
664                     }
665                     return;
666                 }
667                 _ => val,
668             }
669         }
670     };
671
672     if !fn_ty.ret.is_ignore() {
673         if let PassMode::Cast(ty) = fn_ty.ret.mode {
674             let ptr = bx.pointercast(result.llval, ty.llvm_type(cx).ptr_to());
675             bx.store(llval, ptr, result.align);
676         } else {
677             OperandRef::from_immediate_or_packed_pair(bx, llval, result.layout)
678                 .val.store(bx, result);
679         }
680     }
681 }
682
683 fn copy_intrinsic(
684     bx: &Builder<'a, 'll, 'tcx>,
685     allow_overlap: bool,
686     volatile: bool,
687     ty: Ty<'tcx>,
688     dst: ValueRef,
689     src: ValueRef,
690     count: ValueRef,
691 ) -> ValueRef {
692     let cx = bx.cx;
693     let (size, align) = cx.size_and_align_of(ty);
694     let size = C_usize(cx, size.bytes());
695     let align = C_i32(cx, align.abi() as i32);
696
697     let operation = if allow_overlap {
698         "memmove"
699     } else {
700         "memcpy"
701     };
702
703     let name = format!("llvm.{}.p0i8.p0i8.i{}", operation,
704                        cx.data_layout().pointer_size.bits());
705
706     let dst_ptr = bx.pointercast(dst, Type::i8p(cx));
707     let src_ptr = bx.pointercast(src, Type::i8p(cx));
708     let llfn = cx.get_intrinsic(&name);
709
710     bx.call(llfn,
711         &[dst_ptr,
712         src_ptr,
713         bx.mul(size, count),
714         align,
715         C_bool(cx, volatile)],
716         None)
717 }
718
719 fn memset_intrinsic(
720     bx: &Builder<'a, 'll, 'tcx>,
721     volatile: bool,
722     ty: Ty<'tcx>,
723     dst: ValueRef,
724     val: ValueRef,
725     count: ValueRef
726 ) -> ValueRef {
727     let cx = bx.cx;
728     let (size, align) = cx.size_and_align_of(ty);
729     let size = C_usize(cx, size.bytes());
730     let align = C_i32(cx, align.abi() as i32);
731     let dst = bx.pointercast(dst, Type::i8p(cx));
732     call_memset(bx, dst, val, bx.mul(size, count), align, volatile)
733 }
734
735 fn try_intrinsic(
736     bx: &Builder<'a, 'll, 'tcx>,
737     cx: &CodegenCx,
738     func: ValueRef,
739     data: ValueRef,
740     local_ptr: ValueRef,
741     dest: ValueRef,
742 ) {
743     if bx.sess().no_landing_pads() {
744         bx.call(func, &[data], None);
745         let ptr_align = bx.tcx().data_layout.pointer_align;
746         bx.store(C_null(Type::i8p(&bx.cx)), dest, ptr_align);
747     } else if wants_msvc_seh(bx.sess()) {
748         codegen_msvc_try(bx, cx, func, data, local_ptr, dest);
749     } else {
750         codegen_gnu_try(bx, cx, func, data, local_ptr, dest);
751     }
752 }
753
754 // MSVC's definition of the `rust_try` function.
755 //
756 // This implementation uses the new exception handling instructions in LLVM
757 // which have support in LLVM for SEH on MSVC targets. Although these
758 // instructions are meant to work for all targets, as of the time of this
759 // writing, however, LLVM does not recommend the usage of these new instructions
760 // as the old ones are still more optimized.
761 fn codegen_msvc_try(
762     bx: &Builder<'a, 'll, 'tcx>,
763     cx: &CodegenCx,
764     func: ValueRef,
765     data: ValueRef,
766     local_ptr: ValueRef,
767     dest: ValueRef,
768 ) {
769     let llfn = get_rust_try_fn(cx, &mut |bx| {
770         let cx = bx.cx;
771
772         bx.set_personality_fn(bx.cx.eh_personality());
773
774         let normal = bx.build_sibling_block("normal");
775         let catchswitch = bx.build_sibling_block("catchswitch");
776         let catchpad = bx.build_sibling_block("catchpad");
777         let caught = bx.build_sibling_block("caught");
778
779         let func = llvm::get_param(bx.llfn(), 0);
780         let data = llvm::get_param(bx.llfn(), 1);
781         let local_ptr = llvm::get_param(bx.llfn(), 2);
782
783         // We're generating an IR snippet that looks like:
784         //
785         //   declare i32 @rust_try(%func, %data, %ptr) {
786         //      %slot = alloca i64*
787         //      invoke %func(%data) to label %normal unwind label %catchswitch
788         //
789         //   normal:
790         //      ret i32 0
791         //
792         //   catchswitch:
793         //      %cs = catchswitch within none [%catchpad] unwind to caller
794         //
795         //   catchpad:
796         //      %tok = catchpad within %cs [%type_descriptor, 0, %slot]
797         //      %ptr[0] = %slot[0]
798         //      %ptr[1] = %slot[1]
799         //      catchret from %tok to label %caught
800         //
801         //   caught:
802         //      ret i32 1
803         //   }
804         //
805         // This structure follows the basic usage of throw/try/catch in LLVM.
806         // For example, compile this C++ snippet to see what LLVM generates:
807         //
808         //      #include <stdint.h>
809         //
810         //      int bar(void (*foo)(void), uint64_t *ret) {
811         //          try {
812         //              foo();
813         //              return 0;
814         //          } catch(uint64_t a[2]) {
815         //              ret[0] = a[0];
816         //              ret[1] = a[1];
817         //              return 1;
818         //          }
819         //      }
820         //
821         // More information can be found in libstd's seh.rs implementation.
822         let i64p = Type::i64(cx).ptr_to();
823         let ptr_align = bx.tcx().data_layout.pointer_align;
824         let slot = bx.alloca(i64p, "slot", ptr_align);
825         bx.invoke(func, &[data], normal.llbb(), catchswitch.llbb(),
826             None);
827
828         normal.ret(C_i32(cx, 0));
829
830         let cs = catchswitch.catch_switch(None, None, 1);
831         catchswitch.add_handler(cs, catchpad.llbb());
832
833         let tcx = cx.tcx;
834         let tydesc = match tcx.lang_items().msvc_try_filter() {
835             Some(did) => ::consts::get_static(cx, did),
836             None => bug!("msvc_try_filter not defined"),
837         };
838         let tok = catchpad.catch_pad(cs, &[tydesc, C_i32(cx, 0), slot]);
839         let addr = catchpad.load(slot, ptr_align);
840
841         let i64_align = bx.tcx().data_layout.i64_align;
842         let arg1 = catchpad.load(addr, i64_align);
843         let val1 = C_i32(cx, 1);
844         let arg2 = catchpad.load(catchpad.inbounds_gep(addr, &[val1]), i64_align);
845         let local_ptr = catchpad.bitcast(local_ptr, i64p);
846         catchpad.store(arg1, local_ptr, i64_align);
847         catchpad.store(arg2, catchpad.inbounds_gep(local_ptr, &[val1]), i64_align);
848         catchpad.catch_ret(tok, caught.llbb());
849
850         caught.ret(C_i32(cx, 1));
851     });
852
853     // Note that no invoke is used here because by definition this function
854     // can't panic (that's what it's catching).
855     let ret = bx.call(llfn, &[func, data, local_ptr], None);
856     let i32_align = bx.tcx().data_layout.i32_align;
857     bx.store(ret, dest, i32_align);
858 }
859
860 // Definition of the standard "try" function for Rust using the GNU-like model
861 // of exceptions (e.g. the normal semantics of LLVM's landingpad and invoke
862 // instructions).
863 //
864 // This codegen is a little surprising because we always call a shim
865 // function instead of inlining the call to `invoke` manually here. This is done
866 // because in LLVM we're only allowed to have one personality per function
867 // definition. The call to the `try` intrinsic is being inlined into the
868 // function calling it, and that function may already have other personality
869 // functions in play. By calling a shim we're guaranteed that our shim will have
870 // the right personality function.
871 fn codegen_gnu_try(
872     bx: &Builder<'a, 'll, 'tcx>,
873     cx: &CodegenCx,
874     func: ValueRef,
875     data: ValueRef,
876     local_ptr: ValueRef,
877     dest: ValueRef,
878 ) {
879     let llfn = get_rust_try_fn(cx, &mut |bx| {
880         let cx = bx.cx;
881
882         // Codegens the shims described above:
883         //
884         //   bx:
885         //      invoke %func(%args...) normal %normal unwind %catch
886         //
887         //   normal:
888         //      ret 0
889         //
890         //   catch:
891         //      (ptr, _) = landingpad
892         //      store ptr, %local_ptr
893         //      ret 1
894         //
895         // Note that the `local_ptr` data passed into the `try` intrinsic is
896         // expected to be `*mut *mut u8` for this to actually work, but that's
897         // managed by the standard library.
898
899         let then = bx.build_sibling_block("then");
900         let catch = bx.build_sibling_block("catch");
901
902         let func = llvm::get_param(bx.llfn(), 0);
903         let data = llvm::get_param(bx.llfn(), 1);
904         let local_ptr = llvm::get_param(bx.llfn(), 2);
905         bx.invoke(func, &[data], then.llbb(), catch.llbb(), None);
906         then.ret(C_i32(cx, 0));
907
908         // Type indicator for the exception being thrown.
909         //
910         // The first value in this tuple is a pointer to the exception object
911         // being thrown.  The second value is a "selector" indicating which of
912         // the landing pad clauses the exception's type had been matched to.
913         // rust_try ignores the selector.
914         let lpad_ty = Type::struct_(cx, &[Type::i8p(cx), Type::i32(cx)],
915                                     false);
916         let vals = catch.landing_pad(lpad_ty, bx.cx.eh_personality(), 1);
917         catch.add_clause(vals, C_null(Type::i8p(cx)));
918         let ptr = catch.extract_value(vals, 0);
919         let ptr_align = bx.tcx().data_layout.pointer_align;
920         catch.store(ptr, catch.bitcast(local_ptr, Type::i8p(cx).ptr_to()), ptr_align);
921         catch.ret(C_i32(cx, 1));
922     });
923
924     // Note that no invoke is used here because by definition this function
925     // can't panic (that's what it's catching).
926     let ret = bx.call(llfn, &[func, data, local_ptr], None);
927     let i32_align = bx.tcx().data_layout.i32_align;
928     bx.store(ret, dest, i32_align);
929 }
930
931 // Helper function to give a Block to a closure to codegen a shim function.
932 // This is currently primarily used for the `try` intrinsic functions above.
933 fn gen_fn<'ll, 'tcx>(
934     cx: &CodegenCx<'ll, 'tcx>,
935     name: &str,
936     inputs: Vec<Ty<'tcx>>,
937     output: Ty<'tcx>,
938     codegen: &mut dyn FnMut(Builder<'_, 'll, 'tcx>),
939 ) -> ValueRef {
940     let rust_fn_ty = cx.tcx.mk_fn_ptr(ty::Binder::bind(cx.tcx.mk_fn_sig(
941         inputs.into_iter(),
942         output,
943         false,
944         hir::Unsafety::Unsafe,
945         Abi::Rust
946     )));
947     let llfn = declare::define_internal_fn(cx, name, rust_fn_ty);
948     let bx = Builder::new_block(cx, llfn, "entry-block");
949     codegen(bx);
950     llfn
951 }
952
953 // Helper function used to get a handle to the `__rust_try` function used to
954 // catch exceptions.
955 //
956 // This function is only generated once and is then cached.
957 fn get_rust_try_fn<'ll, 'tcx>(
958     cx: &CodegenCx<'ll, 'tcx>,
959     codegen: &mut dyn FnMut(Builder<'_, 'll, 'tcx>),
960 ) -> ValueRef {
961     if let Some(llfn) = cx.rust_try_fn.get() {
962         return llfn;
963     }
964
965     // Define the type up front for the signature of the rust_try function.
966     let tcx = cx.tcx;
967     let i8p = tcx.mk_mut_ptr(tcx.types.i8);
968     let fn_ty = tcx.mk_fn_ptr(ty::Binder::bind(tcx.mk_fn_sig(
969         iter::once(i8p),
970         tcx.mk_nil(),
971         false,
972         hir::Unsafety::Unsafe,
973         Abi::Rust
974     )));
975     let output = tcx.types.i32;
976     let rust_try = gen_fn(cx, "__rust_try", vec![fn_ty, i8p, i8p], output, codegen);
977     cx.rust_try_fn.set(Some(rust_try));
978     return rust_try
979 }
980
981 fn span_invalid_monomorphization_error(a: &Session, b: Span, c: &str) {
982     span_err!(a, b, E0511, "{}", c);
983 }
984
985 fn generic_simd_intrinsic(
986     bx: &Builder<'a, 'll, 'tcx>,
987     name: &str,
988     callee_ty: Ty<'tcx>,
989     args: &[OperandRef<'tcx>],
990     ret_ty: Ty<'tcx>,
991     llret_ty: &'ll Type,
992     span: Span
993 ) -> Result<ValueRef, ()> {
994     // macros for error handling:
995     macro_rules! emit_error {
996         ($msg: tt) => {
997             emit_error!($msg, )
998         };
999         ($msg: tt, $($fmt: tt)*) => {
1000             span_invalid_monomorphization_error(
1001                 bx.sess(), span,
1002                 &format!(concat!("invalid monomorphization of `{}` intrinsic: ",
1003                                  $msg),
1004                          name, $($fmt)*));
1005         }
1006     }
1007     macro_rules! return_error {
1008         ($($fmt: tt)*) => {
1009             {
1010                 emit_error!($($fmt)*);
1011                 return Err(());
1012             }
1013         }
1014     }
1015
1016     macro_rules! require {
1017         ($cond: expr, $($fmt: tt)*) => {
1018             if !$cond {
1019                 return_error!($($fmt)*);
1020             }
1021         };
1022     }
1023     macro_rules! require_simd {
1024         ($ty: expr, $position: expr) => {
1025             require!($ty.is_simd(), "expected SIMD {} type, found non-SIMD `{}`", $position, $ty)
1026         }
1027     }
1028
1029
1030
1031     let tcx = bx.tcx();
1032     let sig = tcx.normalize_erasing_late_bound_regions(
1033         ty::ParamEnv::reveal_all(),
1034         &callee_ty.fn_sig(tcx),
1035     );
1036     let arg_tys = sig.inputs();
1037
1038     // every intrinsic takes a SIMD vector as its first argument
1039     require_simd!(arg_tys[0], "input");
1040     let in_ty = arg_tys[0];
1041     let in_elem = arg_tys[0].simd_type(tcx);
1042     let in_len = arg_tys[0].simd_size(tcx);
1043
1044     let comparison = match name {
1045         "simd_eq" => Some(hir::BinOpKind::Eq),
1046         "simd_ne" => Some(hir::BinOpKind::Ne),
1047         "simd_lt" => Some(hir::BinOpKind::Lt),
1048         "simd_le" => Some(hir::BinOpKind::Le),
1049         "simd_gt" => Some(hir::BinOpKind::Gt),
1050         "simd_ge" => Some(hir::BinOpKind::Ge),
1051         _ => None
1052     };
1053
1054     if let Some(cmp_op) = comparison {
1055         require_simd!(ret_ty, "return");
1056
1057         let out_len = ret_ty.simd_size(tcx);
1058         require!(in_len == out_len,
1059                  "expected return type with length {} (same as input type `{}`), \
1060                   found `{}` with length {}",
1061                  in_len, in_ty,
1062                  ret_ty, out_len);
1063         require!(llret_ty.element_type().kind() == llvm::Integer,
1064                  "expected return type with integer elements, found `{}` with non-integer `{}`",
1065                  ret_ty,
1066                  ret_ty.simd_type(tcx));
1067
1068         return Ok(compare_simd_types(bx,
1069                                      args[0].immediate(),
1070                                      args[1].immediate(),
1071                                      in_elem,
1072                                      llret_ty,
1073                                      cmp_op))
1074     }
1075
1076     if name.starts_with("simd_shuffle") {
1077         let n: usize = match name["simd_shuffle".len()..].parse() {
1078             Ok(n) => n,
1079             Err(_) => span_bug!(span,
1080                                 "bad `simd_shuffle` instruction only caught in codegen?")
1081         };
1082
1083         require_simd!(ret_ty, "return");
1084
1085         let out_len = ret_ty.simd_size(tcx);
1086         require!(out_len == n,
1087                  "expected return type of length {}, found `{}` with length {}",
1088                  n, ret_ty, out_len);
1089         require!(in_elem == ret_ty.simd_type(tcx),
1090                  "expected return element type `{}` (element of input `{}`), \
1091                   found `{}` with element type `{}`",
1092                  in_elem, in_ty,
1093                  ret_ty, ret_ty.simd_type(tcx));
1094
1095         let total_len = in_len as u128 * 2;
1096
1097         let vector = args[2].immediate();
1098
1099         let indices: Option<Vec<_>> = (0..n)
1100             .map(|i| {
1101                 let arg_idx = i;
1102                 let val = const_get_elt(vector, i as u64);
1103                 match const_to_opt_u128(val, true) {
1104                     None => {
1105                         emit_error!("shuffle index #{} is not a constant", arg_idx);
1106                         None
1107                     }
1108                     Some(idx) if idx >= total_len => {
1109                         emit_error!("shuffle index #{} is out of bounds (limit {})",
1110                                     arg_idx, total_len);
1111                         None
1112                     }
1113                     Some(idx) => Some(C_i32(bx.cx, idx as i32)),
1114                 }
1115             })
1116             .collect();
1117         let indices = match indices {
1118             Some(i) => i,
1119             None => return Ok(C_null(llret_ty))
1120         };
1121
1122         return Ok(bx.shuffle_vector(args[0].immediate(),
1123                                      args[1].immediate(),
1124                                      C_vector(&indices)))
1125     }
1126
1127     if name == "simd_insert" {
1128         require!(in_elem == arg_tys[2],
1129                  "expected inserted type `{}` (element of input `{}`), found `{}`",
1130                  in_elem, in_ty, arg_tys[2]);
1131         return Ok(bx.insert_element(args[0].immediate(),
1132                                      args[2].immediate(),
1133                                      args[1].immediate()))
1134     }
1135     if name == "simd_extract" {
1136         require!(ret_ty == in_elem,
1137                  "expected return type `{}` (element of input `{}`), found `{}`",
1138                  in_elem, in_ty, ret_ty);
1139         return Ok(bx.extract_element(args[0].immediate(), args[1].immediate()))
1140     }
1141
1142     if name == "simd_select" {
1143         let m_elem_ty = in_elem;
1144         let m_len = in_len;
1145         let v_len = arg_tys[1].simd_size(tcx);
1146         require!(m_len == v_len,
1147                  "mismatched lengths: mask length `{}` != other vector length `{}`",
1148                  m_len, v_len
1149         );
1150         match m_elem_ty.sty {
1151             ty::TyInt(_) => {},
1152             _ => {
1153                 return_error!("mask element type is `{}`, expected `i_`", m_elem_ty);
1154             }
1155         }
1156         // truncate the mask to a vector of i1s
1157         let i1 = Type::i1(bx.cx);
1158         let i1xn = Type::vector(i1, m_len as u64);
1159         let m_i1s = bx.trunc(args[0].immediate(), i1xn);
1160         return Ok(bx.select(m_i1s, args[1].immediate(), args[2].immediate()));
1161     }
1162
1163     fn simd_simple_float_intrinsic(
1164         name: &str,
1165         in_elem: &::rustc::ty::TyS,
1166         in_ty: &::rustc::ty::TyS,
1167         in_len: usize,
1168         bx: &Builder<'a, 'll, 'tcx>,
1169         span: Span,
1170         args: &[OperandRef<'tcx>],
1171     ) -> Result<ValueRef, ()> {
1172         macro_rules! emit_error {
1173             ($msg: tt) => {
1174                 emit_error!($msg, )
1175             };
1176             ($msg: tt, $($fmt: tt)*) => {
1177                 span_invalid_monomorphization_error(
1178                     bx.sess(), span,
1179                     &format!(concat!("invalid monomorphization of `{}` intrinsic: ",
1180                                      $msg),
1181                              name, $($fmt)*));
1182             }
1183         }
1184         macro_rules! return_error {
1185             ($($fmt: tt)*) => {
1186                 {
1187                     emit_error!($($fmt)*);
1188                     return Err(());
1189                 }
1190             }
1191         }
1192         let ety = match in_elem.sty {
1193             ty::TyFloat(f) if f.bit_width() == 32 => {
1194                 if in_len < 2 || in_len > 16 {
1195                     return_error!(
1196                         "unsupported floating-point vector `{}` with length `{}` \
1197                          out-of-range [2, 16]",
1198                         in_ty, in_len);
1199                 }
1200                 "f32"
1201             },
1202             ty::TyFloat(f) if f.bit_width() == 64 => {
1203                 if in_len < 2 || in_len > 8 {
1204                     return_error!("unsupported floating-point vector `{}` with length `{}` \
1205                                    out-of-range [2, 8]",
1206                                   in_ty, in_len);
1207                 }
1208                 "f64"
1209             },
1210             ty::TyFloat(f) => {
1211                 return_error!("unsupported element type `{}` of floating-point vector `{}`",
1212                               f, in_ty);
1213             },
1214             _ => {
1215                 return_error!("`{}` is not a floating-point type", in_ty);
1216             }
1217         };
1218
1219         let llvm_name = &format!("llvm.{0}.v{1}{2}", name, in_len, ety);
1220         let intrinsic = bx.cx.get_intrinsic(&llvm_name);
1221         let c = bx.call(intrinsic,
1222                         &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(),
1223                         None);
1224         unsafe { llvm::LLVMRustSetHasUnsafeAlgebra(c) };
1225         return Ok(c);
1226     }
1227
1228     if name == "simd_fsqrt" {
1229         return simd_simple_float_intrinsic("sqrt", in_elem, in_ty, in_len, bx, span, args);
1230     }
1231
1232     if name == "simd_fsin" {
1233         return simd_simple_float_intrinsic("sin", in_elem, in_ty, in_len, bx, span, args);
1234     }
1235
1236     if name == "simd_fcos" {
1237         return simd_simple_float_intrinsic("cos", in_elem, in_ty, in_len, bx, span, args);
1238     }
1239
1240     if name == "simd_fabs" {
1241         return simd_simple_float_intrinsic("fabs", in_elem, in_ty, in_len, bx, span, args);
1242     }
1243
1244     if name == "simd_floor" {
1245         return simd_simple_float_intrinsic("floor", in_elem, in_ty, in_len, bx, span, args);
1246     }
1247
1248     if name == "simd_ceil" {
1249         return simd_simple_float_intrinsic("ceil", in_elem, in_ty, in_len, bx, span, args);
1250     }
1251
1252     if name == "simd_fexp" {
1253         return simd_simple_float_intrinsic("exp", in_elem, in_ty, in_len, bx, span, args);
1254     }
1255
1256     if name == "simd_fexp2" {
1257         return simd_simple_float_intrinsic("exp2", in_elem, in_ty, in_len, bx, span, args);
1258     }
1259
1260     if name == "simd_flog10" {
1261         return simd_simple_float_intrinsic("log10", in_elem, in_ty, in_len, bx, span, args);
1262     }
1263
1264     if name == "simd_flog2" {
1265         return simd_simple_float_intrinsic("log2", in_elem, in_ty, in_len, bx, span, args);
1266     }
1267
1268     if name == "simd_flog" {
1269         return simd_simple_float_intrinsic("log", in_elem, in_ty, in_len, bx, span, args);
1270     }
1271
1272     if name == "simd_fpowi" {
1273         return simd_simple_float_intrinsic("powi", in_elem, in_ty, in_len, bx, span, args);
1274     }
1275
1276     if name == "simd_fpow"  {
1277         return simd_simple_float_intrinsic("pow", in_elem, in_ty, in_len, bx, span, args);
1278     }
1279
1280     if name == "simd_fma" {
1281         return simd_simple_float_intrinsic("fma", in_elem, in_ty, in_len, bx, span, args);
1282     }
1283
1284     // FIXME: use:
1285     //  https://github.com/llvm-mirror/llvm/blob/master/include/llvm/IR/Function.h#L182
1286     //  https://github.com/llvm-mirror/llvm/blob/master/include/llvm/IR/Intrinsics.h#L81
1287     fn llvm_vector_str(elem_ty: ty::Ty, vec_len: usize, no_pointers: usize) -> String {
1288         let p0s: String = "p0".repeat(no_pointers);
1289         match elem_ty.sty {
1290             ty::TyInt(v) => format!("v{}{}i{}", vec_len, p0s, v.bit_width().unwrap()),
1291             ty::TyUint(v) => format!("v{}{}i{}", vec_len, p0s, v.bit_width().unwrap()),
1292             ty::TyFloat(v) => format!("v{}{}f{}", vec_len, p0s, v.bit_width()),
1293             _ => unreachable!(),
1294         }
1295     }
1296
1297     fn llvm_vector_ty(cx: &CodegenCx<'ll, '_>, elem_ty: ty::Ty, vec_len: usize,
1298                       mut no_pointers: usize) -> &'ll Type {
1299         // FIXME: use cx.layout_of(ty).llvm_type() ?
1300         let mut elem_ty = match elem_ty.sty {
1301             ty::TyInt(v) => Type::int_from_ty(cx, v),
1302             ty::TyUint(v) => Type::uint_from_ty(cx, v),
1303             ty::TyFloat(v) => Type::float_from_ty(cx, v),
1304             _ => unreachable!(),
1305         };
1306         while no_pointers > 0 {
1307             elem_ty = elem_ty.ptr_to();
1308             no_pointers -= 1;
1309         }
1310         Type::vector(elem_ty, vec_len as u64)
1311     }
1312
1313
1314     if name == "simd_gather"  {
1315         // simd_gather(values: <N x T>, pointers: <N x *_ T>,
1316         //             mask: <N x i{M}>) -> <N x T>
1317         // * N: number of elements in the input vectors
1318         // * T: type of the element to load
1319         // * M: any integer width is supported, will be truncated to i1
1320
1321         // All types must be simd vector types
1322         require_simd!(in_ty, "first");
1323         require_simd!(arg_tys[1], "second");
1324         require_simd!(arg_tys[2], "third");
1325         require_simd!(ret_ty, "return");
1326
1327         // Of the same length:
1328         require!(in_len == arg_tys[1].simd_size(tcx),
1329                  "expected {} argument with length {} (same as input type `{}`), \
1330                   found `{}` with length {}", "second", in_len, in_ty, arg_tys[1],
1331                  arg_tys[1].simd_size(tcx));
1332         require!(in_len == arg_tys[2].simd_size(tcx),
1333                  "expected {} argument with length {} (same as input type `{}`), \
1334                   found `{}` with length {}", "third", in_len, in_ty, arg_tys[2],
1335                  arg_tys[2].simd_size(tcx));
1336
1337         // The return type must match the first argument type
1338         require!(ret_ty == in_ty,
1339                  "expected return type `{}`, found `{}`",
1340                  in_ty, ret_ty);
1341
1342         // This counts how many pointers
1343         fn ptr_count(t: ty::Ty) -> usize {
1344             match t.sty {
1345                 ty::TyRawPtr(p) => 1 + ptr_count(p.ty),
1346                 _ => 0,
1347             }
1348         }
1349
1350         // Non-ptr type
1351         fn non_ptr(t: ty::Ty) -> ty::Ty {
1352             match t.sty {
1353                 ty::TyRawPtr(p) => non_ptr(p.ty),
1354                 _ => t,
1355             }
1356         }
1357
1358         // The second argument must be a simd vector with an element type that's a pointer
1359         // to the element type of the first argument
1360         let (pointer_count, underlying_ty) = match arg_tys[1].simd_type(tcx).sty {
1361             ty::TyRawPtr(p) if p.ty == in_elem => (ptr_count(arg_tys[1].simd_type(tcx)),
1362                                                    non_ptr(arg_tys[1].simd_type(tcx))),
1363             _ => {
1364                 require!(false, "expected element type `{}` of second argument `{}` \
1365                                  to be a pointer to the element type `{}` of the first \
1366                                  argument `{}`, found `{}` != `*_ {}`",
1367                          arg_tys[1].simd_type(tcx).sty, arg_tys[1], in_elem, in_ty,
1368                          arg_tys[1].simd_type(tcx).sty, in_elem);
1369                 unreachable!();
1370             }
1371         };
1372         assert!(pointer_count > 0);
1373         assert!(pointer_count - 1 == ptr_count(arg_tys[0].simd_type(tcx)));
1374         assert_eq!(underlying_ty, non_ptr(arg_tys[0].simd_type(tcx)));
1375
1376         // The element type of the third argument must be a signed integer type of any width:
1377         match arg_tys[2].simd_type(tcx).sty {
1378             ty::TyInt(_) => (),
1379             _ => {
1380                 require!(false, "expected element type `{}` of third argument `{}` \
1381                                  to be a signed integer type",
1382                          arg_tys[2].simd_type(tcx).sty, arg_tys[2]);
1383             }
1384         }
1385
1386         // Alignment of T, must be a constant integer value:
1387         let alignment_ty = Type::i32(bx.cx);
1388         let alignment = C_i32(bx.cx, bx.cx.align_of(in_elem).abi() as i32);
1389
1390         // Truncate the mask vector to a vector of i1s:
1391         let (mask, mask_ty) = {
1392             let i1 = Type::i1(bx.cx);
1393             let i1xn = Type::vector(i1, in_len as u64);
1394             (bx.trunc(args[2].immediate(), i1xn), i1xn)
1395         };
1396
1397         // Type of the vector of pointers:
1398         let llvm_pointer_vec_ty = llvm_vector_ty(bx.cx, underlying_ty, in_len, pointer_count);
1399         let llvm_pointer_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count);
1400
1401         // Type of the vector of elements:
1402         let llvm_elem_vec_ty = llvm_vector_ty(bx.cx, underlying_ty, in_len, pointer_count - 1);
1403         let llvm_elem_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count - 1);
1404
1405         let llvm_intrinsic = format!("llvm.masked.gather.{}.{}",
1406                                      llvm_elem_vec_str, llvm_pointer_vec_str);
1407         let f = declare::declare_cfn(bx.cx, &llvm_intrinsic,
1408                                      Type::func(&[llvm_pointer_vec_ty, alignment_ty, mask_ty,
1409                                                   llvm_elem_vec_ty], llvm_elem_vec_ty));
1410         llvm::SetUnnamedAddr(f, false);
1411         let v = bx.call(f, &[args[1].immediate(), alignment, mask, args[0].immediate()],
1412                         None);
1413         return Ok(v);
1414     }
1415
1416     if name == "simd_scatter"  {
1417         // simd_scatter(values: <N x T>, pointers: <N x *mut T>,
1418         //             mask: <N x i{M}>) -> ()
1419         // * N: number of elements in the input vectors
1420         // * T: type of the element to load
1421         // * M: any integer width is supported, will be truncated to i1
1422
1423         // All types must be simd vector types
1424         require_simd!(in_ty, "first");
1425         require_simd!(arg_tys[1], "second");
1426         require_simd!(arg_tys[2], "third");
1427
1428         // Of the same length:
1429         require!(in_len == arg_tys[1].simd_size(tcx),
1430                  "expected {} argument with length {} (same as input type `{}`), \
1431                   found `{}` with length {}", "second", in_len, in_ty, arg_tys[1],
1432                  arg_tys[1].simd_size(tcx));
1433         require!(in_len == arg_tys[2].simd_size(tcx),
1434                  "expected {} argument with length {} (same as input type `{}`), \
1435                   found `{}` with length {}", "third", in_len, in_ty, arg_tys[2],
1436                  arg_tys[2].simd_size(tcx));
1437
1438         // This counts how many pointers
1439         fn ptr_count(t: ty::Ty) -> usize {
1440             match t.sty {
1441                 ty::TyRawPtr(p) => 1 + ptr_count(p.ty),
1442                 _ => 0,
1443             }
1444         }
1445
1446         // Non-ptr type
1447         fn non_ptr(t: ty::Ty) -> ty::Ty {
1448             match t.sty {
1449                 ty::TyRawPtr(p) => non_ptr(p.ty),
1450                 _ => t,
1451             }
1452         }
1453
1454         // The second argument must be a simd vector with an element type that's a pointer
1455         // to the element type of the first argument
1456         let (pointer_count, underlying_ty) = match arg_tys[1].simd_type(tcx).sty {
1457             ty::TyRawPtr(p) if p.ty == in_elem && p.mutbl == hir::MutMutable
1458                 => (ptr_count(arg_tys[1].simd_type(tcx)),
1459                     non_ptr(arg_tys[1].simd_type(tcx))),
1460             _ => {
1461                 require!(false, "expected element type `{}` of second argument `{}` \
1462                                  to be a pointer to the element type `{}` of the first \
1463                                  argument `{}`, found `{}` != `*mut {}`",
1464                          arg_tys[1].simd_type(tcx).sty, arg_tys[1], in_elem, in_ty,
1465                          arg_tys[1].simd_type(tcx).sty, in_elem);
1466                 unreachable!();
1467             }
1468         };
1469         assert!(pointer_count > 0);
1470         assert!(pointer_count - 1 == ptr_count(arg_tys[0].simd_type(tcx)));
1471         assert_eq!(underlying_ty, non_ptr(arg_tys[0].simd_type(tcx)));
1472
1473         // The element type of the third argument must be a signed integer type of any width:
1474         match arg_tys[2].simd_type(tcx).sty {
1475             ty::TyInt(_) => (),
1476             _ => {
1477                 require!(false, "expected element type `{}` of third argument `{}` \
1478                                  to be a signed integer type",
1479                          arg_tys[2].simd_type(tcx).sty, arg_tys[2]);
1480             }
1481         }
1482
1483         // Alignment of T, must be a constant integer value:
1484         let alignment_ty = Type::i32(bx.cx);
1485         let alignment = C_i32(bx.cx, bx.cx.align_of(in_elem).abi() as i32);
1486
1487         // Truncate the mask vector to a vector of i1s:
1488         let (mask, mask_ty) = {
1489             let i1 = Type::i1(bx.cx);
1490             let i1xn = Type::vector(i1, in_len as u64);
1491             (bx.trunc(args[2].immediate(), i1xn), i1xn)
1492         };
1493
1494         let ret_t = Type::void(bx.cx);
1495
1496         // Type of the vector of pointers:
1497         let llvm_pointer_vec_ty = llvm_vector_ty(bx.cx, underlying_ty, in_len, pointer_count);
1498         let llvm_pointer_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count);
1499
1500         // Type of the vector of elements:
1501         let llvm_elem_vec_ty = llvm_vector_ty(bx.cx, underlying_ty, in_len, pointer_count - 1);
1502         let llvm_elem_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count - 1);
1503
1504         let llvm_intrinsic = format!("llvm.masked.scatter.{}.{}",
1505                                      llvm_elem_vec_str, llvm_pointer_vec_str);
1506         let f = declare::declare_cfn(bx.cx, &llvm_intrinsic,
1507                                      Type::func(&[llvm_elem_vec_ty,
1508                                                   llvm_pointer_vec_ty,
1509                                                   alignment_ty,
1510                                                   mask_ty], ret_t));
1511         llvm::SetUnnamedAddr(f, false);
1512         let v = bx.call(f, &[args[0].immediate(), args[1].immediate(), alignment, mask],
1513                         None);
1514         return Ok(v);
1515     }
1516
1517     macro_rules! arith_red {
1518         ($name:tt : $integer_reduce:ident, $float_reduce:ident, $ordered:expr) => {
1519             if name == $name {
1520                 require!(ret_ty == in_elem,
1521                          "expected return type `{}` (element of input `{}`), found `{}`",
1522                          in_elem, in_ty, ret_ty);
1523                 return match in_elem.sty {
1524                     ty::TyInt(_) | ty::TyUint(_) => {
1525                         let r = bx.$integer_reduce(args[0].immediate());
1526                         if $ordered {
1527                             // if overflow occurs, the result is the
1528                             // mathematical result modulo 2^n:
1529                             if name.contains("mul") {
1530                                 Ok(bx.mul(args[1].immediate(), r))
1531                             } else {
1532                                 Ok(bx.add(args[1].immediate(), r))
1533                             }
1534                         } else {
1535                             Ok(bx.$integer_reduce(args[0].immediate()))
1536                         }
1537                     },
1538                     ty::TyFloat(f) => {
1539                         // ordered arithmetic reductions take an accumulator
1540                         let acc = if $ordered {
1541                             let acc = args[1].immediate();
1542                             // FIXME: https://bugs.llvm.org/show_bug.cgi?id=36734
1543                             // * if the accumulator of the fadd isn't 0, incorrect
1544                             //   code is generated
1545                             // * if the accumulator of the fmul isn't 1, incorrect
1546                             //   code is generated
1547                             match const_get_real(acc) {
1548                                 None => return_error!("accumulator of {} is not a constant", $name),
1549                                 Some((v, loses_info)) => {
1550                                     if $name.contains("mul") && v != 1.0_f64 {
1551                                         return_error!("accumulator of {} is not 1.0", $name);
1552                                     } else if $name.contains("add") && v != 0.0_f64 {
1553                                         return_error!("accumulator of {} is not 0.0", $name);
1554                                     } else if loses_info {
1555                                         return_error!("accumulator of {} loses information", $name);
1556                                     }
1557                                 }
1558                             }
1559                             acc
1560                         } else {
1561                             // unordered arithmetic reductions do not:
1562                             match f.bit_width() {
1563                                 32 => C_undef(Type::f32(bx.cx)),
1564                                 64 => C_undef(Type::f64(bx.cx)),
1565                                 v => {
1566                                     return_error!(r#"
1567 unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#,
1568                                         $name, in_ty, in_elem, v, ret_ty
1569                                     )
1570                                 }
1571                             }
1572
1573                         };
1574                         Ok(bx.$float_reduce(acc, args[0].immediate()))
1575                     }
1576                     _ => {
1577                         return_error!(
1578                             "unsupported {} from `{}` with element `{}` to `{}`",
1579                             $name, in_ty, in_elem, ret_ty
1580                         )
1581                     },
1582                 }
1583             }
1584         }
1585     }
1586
1587     arith_red!("simd_reduce_add_ordered": vector_reduce_add, vector_reduce_fadd_fast, true);
1588     arith_red!("simd_reduce_mul_ordered": vector_reduce_mul, vector_reduce_fmul_fast, true);
1589     arith_red!("simd_reduce_add_unordered": vector_reduce_add, vector_reduce_fadd_fast, false);
1590     arith_red!("simd_reduce_mul_unordered": vector_reduce_mul, vector_reduce_fmul_fast, false);
1591
1592     macro_rules! minmax_red {
1593         ($name:tt: $int_red:ident, $float_red:ident) => {
1594             if name == $name {
1595                 require!(ret_ty == in_elem,
1596                          "expected return type `{}` (element of input `{}`), found `{}`",
1597                          in_elem, in_ty, ret_ty);
1598                 return match in_elem.sty {
1599                     ty::TyInt(_i) => {
1600                         Ok(bx.$int_red(args[0].immediate(), true))
1601                     },
1602                     ty::TyUint(_u) => {
1603                         Ok(bx.$int_red(args[0].immediate(), false))
1604                     },
1605                     ty::TyFloat(_f) => {
1606                         Ok(bx.$float_red(args[0].immediate()))
1607                     }
1608                     _ => {
1609                         return_error!("unsupported {} from `{}` with element `{}` to `{}`",
1610                                       $name, in_ty, in_elem, ret_ty)
1611                     },
1612                 }
1613             }
1614
1615         }
1616     }
1617
1618     minmax_red!("simd_reduce_min": vector_reduce_min, vector_reduce_fmin);
1619     minmax_red!("simd_reduce_max": vector_reduce_max, vector_reduce_fmax);
1620
1621     minmax_red!("simd_reduce_min_nanless": vector_reduce_min, vector_reduce_fmin_fast);
1622     minmax_red!("simd_reduce_max_nanless": vector_reduce_max, vector_reduce_fmax_fast);
1623
1624     macro_rules! bitwise_red {
1625         ($name:tt : $red:ident, $boolean:expr) => {
1626             if name == $name {
1627                 let input = if !$boolean {
1628                     require!(ret_ty == in_elem,
1629                              "expected return type `{}` (element of input `{}`), found `{}`",
1630                              in_elem, in_ty, ret_ty);
1631                     args[0].immediate()
1632                 } else {
1633                     match in_elem.sty {
1634                         ty::TyInt(_) | ty::TyUint(_) => {},
1635                         _ => {
1636                             return_error!("unsupported {} from `{}` with element `{}` to `{}`",
1637                                           $name, in_ty, in_elem, ret_ty)
1638                         }
1639                     }
1640
1641                     // boolean reductions operate on vectors of i1s:
1642                     let i1 = Type::i1(bx.cx);
1643                     let i1xn = Type::vector(i1, in_len as u64);
1644                     bx.trunc(args[0].immediate(), i1xn)
1645                 };
1646                 return match in_elem.sty {
1647                     ty::TyInt(_) | ty::TyUint(_) => {
1648                         let r = bx.$red(input);
1649                         Ok(
1650                             if !$boolean {
1651                                 r
1652                             } else {
1653                                 bx.zext(r, Type::bool(bx.cx))
1654                             }
1655                         )
1656                     },
1657                     _ => {
1658                         return_error!("unsupported {} from `{}` with element `{}` to `{}`",
1659                                       $name, in_ty, in_elem, ret_ty)
1660                     },
1661                 }
1662             }
1663         }
1664     }
1665
1666     bitwise_red!("simd_reduce_and": vector_reduce_and, false);
1667     bitwise_red!("simd_reduce_or": vector_reduce_or, false);
1668     bitwise_red!("simd_reduce_xor": vector_reduce_xor, false);
1669     bitwise_red!("simd_reduce_all": vector_reduce_and, true);
1670     bitwise_red!("simd_reduce_any": vector_reduce_or, true);
1671
1672     if name == "simd_cast" {
1673         require_simd!(ret_ty, "return");
1674         let out_len = ret_ty.simd_size(tcx);
1675         require!(in_len == out_len,
1676                  "expected return type with length {} (same as input type `{}`), \
1677                   found `{}` with length {}",
1678                  in_len, in_ty,
1679                  ret_ty, out_len);
1680         // casting cares about nominal type, not just structural type
1681         let out_elem = ret_ty.simd_type(tcx);
1682
1683         if in_elem == out_elem { return Ok(args[0].immediate()); }
1684
1685         enum Style { Float, Int(/* is signed? */ bool), Unsupported }
1686
1687         let (in_style, in_width) = match in_elem.sty {
1688             // vectors of pointer-sized integers should've been
1689             // disallowed before here, so this unwrap is safe.
1690             ty::TyInt(i) => (Style::Int(true), i.bit_width().unwrap()),
1691             ty::TyUint(u) => (Style::Int(false), u.bit_width().unwrap()),
1692             ty::TyFloat(f) => (Style::Float, f.bit_width()),
1693             _ => (Style::Unsupported, 0)
1694         };
1695         let (out_style, out_width) = match out_elem.sty {
1696             ty::TyInt(i) => (Style::Int(true), i.bit_width().unwrap()),
1697             ty::TyUint(u) => (Style::Int(false), u.bit_width().unwrap()),
1698             ty::TyFloat(f) => (Style::Float, f.bit_width()),
1699             _ => (Style::Unsupported, 0)
1700         };
1701
1702         match (in_style, out_style) {
1703             (Style::Int(in_is_signed), Style::Int(_)) => {
1704                 return Ok(match in_width.cmp(&out_width) {
1705                     Ordering::Greater => bx.trunc(args[0].immediate(), llret_ty),
1706                     Ordering::Equal => args[0].immediate(),
1707                     Ordering::Less => if in_is_signed {
1708                         bx.sext(args[0].immediate(), llret_ty)
1709                     } else {
1710                         bx.zext(args[0].immediate(), llret_ty)
1711                     }
1712                 })
1713             }
1714             (Style::Int(in_is_signed), Style::Float) => {
1715                 return Ok(if in_is_signed {
1716                     bx.sitofp(args[0].immediate(), llret_ty)
1717                 } else {
1718                     bx.uitofp(args[0].immediate(), llret_ty)
1719                 })
1720             }
1721             (Style::Float, Style::Int(out_is_signed)) => {
1722                 return Ok(if out_is_signed {
1723                     bx.fptosi(args[0].immediate(), llret_ty)
1724                 } else {
1725                     bx.fptoui(args[0].immediate(), llret_ty)
1726                 })
1727             }
1728             (Style::Float, Style::Float) => {
1729                 return Ok(match in_width.cmp(&out_width) {
1730                     Ordering::Greater => bx.fptrunc(args[0].immediate(), llret_ty),
1731                     Ordering::Equal => args[0].immediate(),
1732                     Ordering::Less => bx.fpext(args[0].immediate(), llret_ty)
1733                 })
1734             }
1735             _ => {/* Unsupported. Fallthrough. */}
1736         }
1737         require!(false,
1738                  "unsupported cast from `{}` with element `{}` to `{}` with element `{}`",
1739                  in_ty, in_elem,
1740                  ret_ty, out_elem);
1741     }
1742     macro_rules! arith {
1743         ($($name: ident: $($($p: ident),* => $call: ident),*;)*) => {
1744             $(if name == stringify!($name) {
1745                 match in_elem.sty {
1746                     $($(ty::$p(_))|* => {
1747                         return Ok(bx.$call(args[0].immediate(), args[1].immediate()))
1748                     })*
1749                     _ => {},
1750                 }
1751                 require!(false,
1752                             "unsupported operation on `{}` with element `{}`",
1753                             in_ty,
1754                             in_elem)
1755             })*
1756         }
1757     }
1758     arith! {
1759         simd_add: TyUint, TyInt => add, TyFloat => fadd;
1760         simd_sub: TyUint, TyInt => sub, TyFloat => fsub;
1761         simd_mul: TyUint, TyInt => mul, TyFloat => fmul;
1762         simd_div: TyUint => udiv, TyInt => sdiv, TyFloat => fdiv;
1763         simd_rem: TyUint => urem, TyInt => srem, TyFloat => frem;
1764         simd_shl: TyUint, TyInt => shl;
1765         simd_shr: TyUint => lshr, TyInt => ashr;
1766         simd_and: TyUint, TyInt => and;
1767         simd_or: TyUint, TyInt => or;
1768         simd_xor: TyUint, TyInt => xor;
1769         simd_fmax: TyFloat => maxnum;
1770         simd_fmin: TyFloat => minnum;
1771     }
1772     span_bug!(span, "unknown SIMD intrinsic");
1773 }
1774
1775 // Returns the width of an int Ty, and if it's signed or not
1776 // Returns None if the type is not an integer
1777 // FIXME: there’s multiple of this functions, investigate using some of the already existing
1778 // stuffs.
1779 fn int_type_width_signed(ty: Ty, cx: &CodegenCx) -> Option<(u64, bool)> {
1780     match ty.sty {
1781         ty::TyInt(t) => Some((match t {
1782             ast::IntTy::Isize => {
1783                 match &cx.tcx.sess.target.target.target_pointer_width[..] {
1784                     "16" => 16,
1785                     "32" => 32,
1786                     "64" => 64,
1787                     tws => bug!("Unsupported target word size for isize: {}", tws),
1788                 }
1789             },
1790             ast::IntTy::I8 => 8,
1791             ast::IntTy::I16 => 16,
1792             ast::IntTy::I32 => 32,
1793             ast::IntTy::I64 => 64,
1794             ast::IntTy::I128 => 128,
1795         }, true)),
1796         ty::TyUint(t) => Some((match t {
1797             ast::UintTy::Usize => {
1798                 match &cx.tcx.sess.target.target.target_pointer_width[..] {
1799                     "16" => 16,
1800                     "32" => 32,
1801                     "64" => 64,
1802                     tws => bug!("Unsupported target word size for usize: {}", tws),
1803                 }
1804             },
1805             ast::UintTy::U8 => 8,
1806             ast::UintTy::U16 => 16,
1807             ast::UintTy::U32 => 32,
1808             ast::UintTy::U64 => 64,
1809             ast::UintTy::U128 => 128,
1810         }, false)),
1811         _ => None,
1812     }
1813 }
1814
1815 // Returns the width of a float TypeVariant
1816 // Returns None if the type is not a float
1817 fn float_type_width<'tcx>(sty: &ty::TypeVariants<'tcx>)
1818         -> Option<u64> {
1819     use rustc::ty::TyFloat;
1820     match *sty {
1821         TyFloat(t) => Some(match t {
1822             ast::FloatTy::F32 => 32,
1823             ast::FloatTy::F64 => 64,
1824         }),
1825         _ => None,
1826     }
1827 }