]> git.lizzy.rs Git - rust.git/blob - compiler/rustc_codegen_gcc/src/intrinsic/mod.rs
Merge commit '0c89065b934397b62838fe3e4ef6f6352fc52daf' into libgccjit-codegen
[rust.git] / compiler / rustc_codegen_gcc / src / intrinsic / mod.rs
1 pub mod llvm;
2 mod simd;
3
4 use gccjit::{ComparisonOp, Function, RValue, ToRValue, Type, UnaryOp};
5 use rustc_codegen_ssa::MemFlags;
6 use rustc_codegen_ssa::base::wants_msvc_seh;
7 use rustc_codegen_ssa::common::{IntPredicate, span_invalid_monomorphization_error};
8 use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue};
9 use rustc_codegen_ssa::mir::place::PlaceRef;
10 use rustc_codegen_ssa::traits::{ArgAbiMethods, BaseTypeMethods, BuilderMethods, ConstMethods, IntrinsicCallMethods};
11 use rustc_middle::bug;
12 use rustc_middle::ty::{self, Instance, Ty};
13 use rustc_span::{Span, Symbol, symbol::kw, sym};
14 use rustc_target::abi::{HasDataLayout, LayoutOf};
15 use rustc_target::abi::call::{ArgAbi, FnAbi, PassMode};
16 use rustc_target::spec::PanicStrategy;
17
18 use crate::abi::GccType;
19 use crate::builder::Builder;
20 use crate::common::TypeReflection;
21 use crate::context::CodegenCx;
22 use crate::type_of::LayoutGccExt;
23 use crate::intrinsic::simd::generic_simd_intrinsic;
24
25 fn get_simple_intrinsic<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, name: Symbol) -> Option<Function<'gcc>> {
26     let gcc_name = match name {
27         sym::sqrtf32 => "sqrtf",
28         sym::sqrtf64 => "sqrt",
29         sym::powif32 => "__builtin_powif",
30         sym::powif64 => "__builtin_powi",
31         sym::sinf32 => "sinf",
32         sym::sinf64 => "sin",
33         sym::cosf32 => "cosf",
34         sym::cosf64 => "cos",
35         sym::powf32 => "powf",
36         sym::powf64 => "pow",
37         sym::expf32 => "expf",
38         sym::expf64 => "exp",
39         sym::exp2f32 => "exp2f",
40         sym::exp2f64 => "exp2",
41         sym::logf32 => "logf",
42         sym::logf64 => "log",
43         sym::log10f32 => "log10f",
44         sym::log10f64 => "log10",
45         sym::log2f32 => "log2f",
46         sym::log2f64 => "log2",
47         sym::fmaf32 => "fmaf",
48         sym::fmaf64 => "fma",
49         sym::fabsf32 => "fabsf",
50         sym::fabsf64 => "fabs",
51         sym::minnumf32 => "fminf",
52         sym::minnumf64 => "fmin",
53         sym::maxnumf32 => "fmaxf",
54         sym::maxnumf64 => "fmax",
55         sym::copysignf32 => "copysignf",
56         sym::copysignf64 => "copysign",
57         sym::floorf32 => "floorf",
58         sym::floorf64 => "floor",
59         sym::ceilf32 => "ceilf",
60         sym::ceilf64 => "ceil",
61         sym::truncf32 => "truncf",
62         sym::truncf64 => "trunc",
63         sym::rintf32 => "rintf",
64         sym::rintf64 => "rint",
65         sym::nearbyintf32 => "nearbyintf",
66         sym::nearbyintf64 => "nearbyint",
67         sym::roundf32 => "roundf",
68         sym::roundf64 => "round",
69         sym::abort => "abort",
70         _ => return None,
71     };
72     Some(cx.context.get_builtin_function(&gcc_name))
73 }
74
75 impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
76     fn codegen_intrinsic_call(&mut self, instance: Instance<'tcx>, fn_abi: &FnAbi<'tcx, Ty<'tcx>>, args: &[OperandRef<'tcx, RValue<'gcc>>], llresult: RValue<'gcc>, span: Span) {
77         let tcx = self.tcx;
78         let callee_ty = instance.ty(tcx, ty::ParamEnv::reveal_all());
79
80         let (def_id, substs) = match *callee_ty.kind() {
81             ty::FnDef(def_id, substs) => (def_id, substs),
82             _ => bug!("expected fn item type, found {}", callee_ty),
83         };
84
85         let sig = callee_ty.fn_sig(tcx);
86         let sig = tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), sig);
87         let arg_tys = sig.inputs();
88         let ret_ty = sig.output();
89         let name = tcx.item_name(def_id);
90         let name_str = &*name.as_str();
91
92         let llret_ty = self.layout_of(ret_ty).gcc_type(self, true);
93         let result = PlaceRef::new_sized(llresult, fn_abi.ret.layout);
94
95         let simple = get_simple_intrinsic(self, name);
96         let llval =
97             match name {
98                 _ if simple.is_some() => {
99                     // FIXME: remove this cast when the API supports function.
100                     let func = unsafe { std::mem::transmute(simple.expect("simple")) };
101                     self.call(self.type_void(), func, &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(), None)
102                 },
103                 sym::likely => {
104                     self.expect(args[0].immediate(), true)
105                 }
106                 sym::unlikely => {
107                     self.expect(args[0].immediate(), false)
108                 }
109                 kw::Try => {
110                     try_intrinsic(
111                         self,
112                         args[0].immediate(),
113                         args[1].immediate(),
114                         args[2].immediate(),
115                         llresult,
116                     );
117                     return;
118                 }
119                 sym::breakpoint => {
120                     unimplemented!();
121                     /*let llfn = self.get_intrinsic(&("llvm.debugtrap"));
122                     self.call(llfn, &[], None)*/
123                 }
124                 sym::va_copy => {
125                     unimplemented!();
126                     /*let intrinsic = self.cx().get_intrinsic(&("llvm.va_copy"));
127                     self.call(intrinsic, &[args[0].immediate(), args[1].immediate()], None)*/
128                 }
129                 sym::va_arg => {
130                     unimplemented!();
131                     /*match fn_abi.ret.layout.abi {
132                         abi::Abi::Scalar(ref scalar) => {
133                             match scalar.value {
134                                 Primitive::Int(..) => {
135                                     if self.cx().size_of(ret_ty).bytes() < 4 {
136                                         // `va_arg` should not be called on a integer type
137                                         // less than 4 bytes in length. If it is, promote
138                                         // the integer to a `i32` and truncate the result
139                                         // back to the smaller type.
140                                         let promoted_result = emit_va_arg(self, args[0], tcx.types.i32);
141                                         self.trunc(promoted_result, llret_ty)
142                                     } else {
143                                         emit_va_arg(self, args[0], ret_ty)
144                                     }
145                                 }
146                                 Primitive::F64 | Primitive::Pointer => {
147                                     emit_va_arg(self, args[0], ret_ty)
148                                 }
149                                 // `va_arg` should never be used with the return type f32.
150                                 Primitive::F32 => bug!("the va_arg intrinsic does not work with `f32`"),
151                             }
152                         }
153                         _ => bug!("the va_arg intrinsic does not work with non-scalar types"),
154                     }*/
155                 }
156
157                 sym::volatile_load | sym::unaligned_volatile_load => {
158                     let tp_ty = substs.type_at(0);
159                     let mut ptr = args[0].immediate();
160                     if let PassMode::Cast(ty) = fn_abi.ret.mode {
161                         ptr = self.pointercast(ptr, self.type_ptr_to(ty.gcc_type(self)));
162                     }
163                     let load = self.volatile_load(ptr.get_type(), ptr);
164                     // TODO
165                     /*let align = if name == sym::unaligned_volatile_load {
166                         1
167                     } else {
168                         self.align_of(tp_ty).bytes() as u32
169                     };
170                     unsafe {
171                       llvm::LLVMSetAlignment(load, align);
172                       }*/
173                     self.to_immediate(load, self.layout_of(tp_ty))
174                 }
175                 sym::volatile_store => {
176                     let dst = args[0].deref(self.cx());
177                     args[1].val.volatile_store(self, dst);
178                     return;
179                 }
180                 sym::unaligned_volatile_store => {
181                     let dst = args[0].deref(self.cx());
182                     args[1].val.unaligned_volatile_store(self, dst);
183                     return;
184                 }
185                 sym::prefetch_read_data
186                     | sym::prefetch_write_data
187                     | sym::prefetch_read_instruction
188                     | sym::prefetch_write_instruction => {
189                         unimplemented!();
190                         /*let expect = self.get_intrinsic(&("llvm.prefetch"));
191                         let (rw, cache_type) = match name {
192                             sym::prefetch_read_data => (0, 1),
193                             sym::prefetch_write_data => (1, 1),
194                             sym::prefetch_read_instruction => (0, 0),
195                             sym::prefetch_write_instruction => (1, 0),
196                             _ => bug!(),
197                         };
198                         self.call(
199                             expect,
200                             &[
201                             args[0].immediate(),
202                             self.const_i32(rw),
203                             args[1].immediate(),
204                             self.const_i32(cache_type),
205                             ],
206                             None,
207                         )*/
208                     }
209                 sym::ctlz
210                     | sym::ctlz_nonzero
211                     | sym::cttz
212                     | sym::cttz_nonzero
213                     | sym::ctpop
214                     | sym::bswap
215                     | sym::bitreverse
216                     | sym::rotate_left
217                     | sym::rotate_right
218                     | sym::saturating_add
219                     | sym::saturating_sub => {
220                         let ty = arg_tys[0];
221                         match int_type_width_signed(ty, self) {
222                             Some((width, signed)) => match name {
223                                 sym::ctlz | sym::cttz => {
224                                     let func = self.current_func.borrow().expect("func");
225                                     let then_block = func.new_block("then");
226                                     let else_block = func.new_block("else");
227                                     let after_block = func.new_block("after");
228
229                                     let arg = args[0].immediate();
230                                     let result = func.new_local(None, arg.get_type(), "zeros");
231                                     let zero = self.cx.context.new_rvalue_zero(arg.get_type());
232                                     let cond = self.cx.context.new_comparison(None, ComparisonOp::Equals, arg, zero);
233                                     self.block.expect("block").end_with_conditional(None, cond, then_block, else_block);
234
235                                     let zero_result = self.cx.context.new_rvalue_from_long(arg.get_type(), width as i64);
236                                     then_block.add_assignment(None, result, zero_result);
237                                     then_block.end_with_jump(None, after_block);
238
239                                     // NOTE: since jumps were added in a place
240                                     // count_leading_zeroes() does not expect, the current blocks
241                                     // in the state need to be updated.
242                                     *self.current_block.borrow_mut() = Some(else_block);
243                                     self.block = Some(else_block);
244
245                                     let zeros =
246                                         match name {
247                                             sym::ctlz => self.count_leading_zeroes(width, arg),
248                                             sym::cttz => self.count_trailing_zeroes(width, arg),
249                                             _ => unreachable!(),
250                                         };
251                                     else_block.add_assignment(None, result, zeros);
252                                     else_block.end_with_jump(None, after_block);
253
254                                     // NOTE: since jumps were added in a place rustc does not
255                                     // expect, the current blocks in the state need to be updated.
256                                     *self.current_block.borrow_mut() = Some(after_block);
257                                     self.block = Some(after_block);
258
259                                     result.to_rvalue()
260
261                                     /*let y = self.const_bool(false);
262                                     let llfn = self.get_intrinsic(&format!("llvm.{}.i{}", name, width));
263                                     self.call(llfn, &[args[0].immediate(), y], None)*/
264                                 }
265                                 sym::ctlz_nonzero => {
266                                     self.count_leading_zeroes(width, args[0].immediate())
267                                 },
268                                 sym::cttz_nonzero => {
269                                     self.count_trailing_zeroes(width, args[0].immediate())
270                                 }
271                                 sym::ctpop => self.pop_count(args[0].immediate()),
272                                 sym::bswap => {
273                                     if width == 8 {
274                                         args[0].immediate() // byte swap a u8/i8 is just a no-op
275                                     }
276                                     else {
277                                         // TODO: check if it's faster to use string literals and a
278                                         // match instead of format!.
279                                         let bswap = self.cx.context.get_builtin_function(&format!("__builtin_bswap{}", width));
280                                         let mut arg = args[0].immediate();
281                                         // FIXME: this cast should not be necessary. Remove
282                                         // when having proper sized integer types.
283                                         let param_type = bswap.get_param(0).to_rvalue().get_type();
284                                         if param_type != arg.get_type() {
285                                             arg = self.bitcast(arg, param_type);
286                                         }
287                                         self.cx.context.new_call(None, bswap, &[arg])
288                                     }
289                                 },
290                                 sym::bitreverse => self.bit_reverse(width, args[0].immediate()),
291                                 sym::rotate_left | sym::rotate_right => {
292                                     // TODO: implement using algorithm from:
293                                     // https://blog.regehr.org/archives/1063
294                                     // for other platforms.
295                                     let is_left = name == sym::rotate_left;
296                                     let val = args[0].immediate();
297                                     let raw_shift = args[1].immediate();
298                                     if is_left {
299                                         self.rotate_left(val, raw_shift, width)
300                                     }
301                                     else {
302                                         self.rotate_right(val, raw_shift, width)
303                                     }
304                                 },
305                                 sym::saturating_add => {
306                                     self.saturating_add(args[0].immediate(), args[1].immediate(), signed, width)
307                                 },
308                                 sym::saturating_sub => {
309                                     self.saturating_sub(args[0].immediate(), args[1].immediate(), signed, width)
310                                 },
311                                 _ => bug!(),
312                             },
313                             None => {
314                                 span_invalid_monomorphization_error(
315                                     tcx.sess,
316                                     span,
317                                     &format!(
318                                         "invalid monomorphization of `{}` intrinsic: \
319                                       expected basic integer type, found `{}`",
320                                       name, ty
321                                     ),
322                                 );
323                                 return;
324                             }
325                         }
326                     }
327
328                 sym::raw_eq => {
329                     use rustc_target::abi::Abi::*;
330                     let tp_ty = substs.type_at(0);
331                     let layout = self.layout_of(tp_ty).layout;
332                     let use_integer_compare = match layout.abi {
333                         Scalar(_) | ScalarPair(_, _) => true,
334                         Uninhabited | Vector { .. } => false,
335                         Aggregate { .. } => {
336                             // For rusty ABIs, small aggregates are actually passed
337                             // as `RegKind::Integer` (see `FnAbi::adjust_for_abi`),
338                             // so we re-use that same threshold here.
339                             layout.size <= self.data_layout().pointer_size * 2
340                         }
341                     };
342
343                     let a = args[0].immediate();
344                     let b = args[1].immediate();
345                     if layout.size.bytes() == 0 {
346                         self.const_bool(true)
347                     }
348                     /*else if use_integer_compare {
349                         let integer_ty = self.type_ix(layout.size.bits()); // FIXME: LLVM creates an integer of 96 bits for [i32; 3], but gcc doesn't support this, so it creates an integer of 128 bits.
350                         let ptr_ty = self.type_ptr_to(integer_ty);
351                         let a_ptr = self.bitcast(a, ptr_ty);
352                         let a_val = self.load(integer_ty, a_ptr, layout.align.abi);
353                         let b_ptr = self.bitcast(b, ptr_ty);
354                         let b_val = self.load(integer_ty, b_ptr, layout.align.abi);
355                         self.icmp(IntPredicate::IntEQ, a_val, b_val)
356                     }*/
357                     else {
358                         let void_ptr_type = self.context.new_type::<*const ()>();
359                         let a_ptr = self.bitcast(a, void_ptr_type);
360                         let b_ptr = self.bitcast(b, void_ptr_type);
361                         let n = self.context.new_cast(None, self.const_usize(layout.size.bytes()), self.sizet_type);
362                         let builtin = self.context.get_builtin_function("memcmp");
363                         let cmp = self.context.new_call(None, builtin, &[a_ptr, b_ptr, n]);
364                         self.icmp(IntPredicate::IntEQ, cmp, self.const_i32(0))
365                     }
366                 }
367
368                 _ if name_str.starts_with("simd_") => {
369                     match generic_simd_intrinsic(self, name, callee_ty, args, ret_ty, llret_ty, span) {
370                         Ok(llval) => llval,
371                         Err(()) => return,
372                     }
373                 }
374
375                 _ => bug!("unknown intrinsic '{}'", name),
376             };
377
378         if !fn_abi.ret.is_ignore() {
379             if let PassMode::Cast(ty) = fn_abi.ret.mode {
380                 let ptr_llty = self.type_ptr_to(ty.gcc_type(self));
381                 let ptr = self.pointercast(result.llval, ptr_llty);
382                 self.store(llval, ptr, result.align);
383             }
384             else {
385                 OperandRef::from_immediate_or_packed_pair(self, llval, result.layout)
386                     .val
387                     .store(self, result);
388             }
389         }
390     }
391
392     fn abort(&mut self) {
393         let func = self.context.get_builtin_function("abort");
394         let func: RValue<'gcc> = unsafe { std::mem::transmute(func) };
395         self.call(self.type_void(), func, &[], None);
396     }
397
398     fn assume(&mut self, value: Self::Value) {
399         // TODO: switch to asumme when it exists.
400         // Or use something like this:
401         // #define __assume(cond) do { if (!(cond)) __builtin_unreachable(); } while (0)
402         self.expect(value, true);
403     }
404
405     fn expect(&mut self, cond: Self::Value, _expected: bool) -> Self::Value {
406         // TODO
407         /*let expect = self.context.get_builtin_function("__builtin_expect");
408         let expect: RValue<'gcc> = unsafe { std::mem::transmute(expect) };
409         self.call(expect, &[cond, self.const_bool(expected)], None)*/
410         cond
411     }
412
413     fn sideeffect(&mut self) {
414         // TODO
415         /*if self.tcx().sess.opts.debugging_opts.insert_sideeffect {
416             let fnname = self.get_intrinsic(&("llvm.sideeffect"));
417             self.call(fnname, &[], None);
418         }*/
419     }
420
421     fn va_start(&mut self, _va_list: RValue<'gcc>) -> RValue<'gcc> {
422         unimplemented!();
423         /*let intrinsic = self.cx().get_intrinsic("llvm.va_start");
424         self.call(intrinsic, &[va_list], None)*/
425     }
426
427     fn va_end(&mut self, _va_list: RValue<'gcc>) -> RValue<'gcc> {
428         unimplemented!();
429         /*let intrinsic = self.cx().get_intrinsic("llvm.va_end");
430         self.call(intrinsic, &[va_list], None)*/
431     }
432 }
433
434 impl<'a, 'gcc, 'tcx> ArgAbiMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
435     fn store_fn_arg(&mut self, arg_abi: &ArgAbi<'tcx, Ty<'tcx>>, idx: &mut usize, dst: PlaceRef<'tcx, Self::Value>) {
436         arg_abi.store_fn_arg(self, idx, dst)
437     }
438
439     fn store_arg(&mut self, arg_abi: &ArgAbi<'tcx, Ty<'tcx>>, val: RValue<'gcc>, dst: PlaceRef<'tcx, RValue<'gcc>>) {
440         arg_abi.store(self, val, dst)
441     }
442
443     fn arg_memory_ty(&self, arg_abi: &ArgAbi<'tcx, Ty<'tcx>>) -> Type<'gcc> {
444         arg_abi.memory_ty(self)
445     }
446 }
447
448 pub trait ArgAbiExt<'gcc, 'tcx> {
449     fn memory_ty(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc>;
450     fn store(&self, bx: &mut Builder<'_, 'gcc, 'tcx>, val: RValue<'gcc>, dst: PlaceRef<'tcx, RValue<'gcc>>);
451     fn store_fn_arg(&self, bx: &mut Builder<'_, 'gcc, 'tcx>, idx: &mut usize, dst: PlaceRef<'tcx, RValue<'gcc>>);
452 }
453
454 impl<'gcc, 'tcx> ArgAbiExt<'gcc, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
455     /// Gets the LLVM type for a place of the original Rust type of
456     /// this argument/return, i.e., the result of `type_of::type_of`.
457     fn memory_ty(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc> {
458         self.layout.gcc_type(cx, true)
459     }
460
461     /// Stores a direct/indirect value described by this ArgAbi into a
462     /// place for the original Rust type of this argument/return.
463     /// Can be used for both storing formal arguments into Rust variables
464     /// or results of call/invoke instructions into their destinations.
465     fn store(&self, bx: &mut Builder<'_, 'gcc, 'tcx>, val: RValue<'gcc>, dst: PlaceRef<'tcx, RValue<'gcc>>) {
466         if self.is_ignore() {
467             return;
468         }
469         if self.is_sized_indirect() {
470             OperandValue::Ref(val, None, self.layout.align.abi).store(bx, dst)
471         }
472         else if self.is_unsized_indirect() {
473             bug!("unsized `ArgAbi` must be handled through `store_fn_arg`");
474         }
475         else if let PassMode::Cast(cast) = self.mode {
476             // FIXME(eddyb): Figure out when the simpler Store is safe, clang
477             // uses it for i16 -> {i8, i8}, but not for i24 -> {i8, i8, i8}.
478             let can_store_through_cast_ptr = false;
479             if can_store_through_cast_ptr {
480                 let cast_ptr_llty = bx.type_ptr_to(cast.gcc_type(bx));
481                 let cast_dst = bx.pointercast(dst.llval, cast_ptr_llty);
482                 bx.store(val, cast_dst, self.layout.align.abi);
483             }
484             else {
485                 // The actual return type is a struct, but the ABI
486                 // adaptation code has cast it into some scalar type.  The
487                 // code that follows is the only reliable way I have
488                 // found to do a transform like i64 -> {i32,i32}.
489                 // Basically we dump the data onto the stack then memcpy it.
490                 //
491                 // Other approaches I tried:
492                 // - Casting rust ret pointer to the foreign type and using Store
493                 //   is (a) unsafe if size of foreign type > size of rust type and
494                 //   (b) runs afoul of strict aliasing rules, yielding invalid
495                 //   assembly under -O (specifically, the store gets removed).
496                 // - Truncating foreign type to correct integral type and then
497                 //   bitcasting to the struct type yields invalid cast errors.
498
499                 // We instead thus allocate some scratch space...
500                 let scratch_size = cast.size(bx);
501                 let scratch_align = cast.align(bx);
502                 let llscratch = bx.alloca(cast.gcc_type(bx), scratch_align);
503                 bx.lifetime_start(llscratch, scratch_size);
504
505                 // ... where we first store the value...
506                 bx.store(val, llscratch, scratch_align);
507
508                 // ... and then memcpy it to the intended destination.
509                 bx.memcpy(
510                     dst.llval,
511                     self.layout.align.abi,
512                     llscratch,
513                     scratch_align,
514                     bx.const_usize(self.layout.size.bytes()),
515                     MemFlags::empty(),
516                 );
517
518                 bx.lifetime_end(llscratch, scratch_size);
519             }
520         }
521         else {
522             OperandValue::Immediate(val).store(bx, dst);
523         }
524     }
525
526     fn store_fn_arg<'a>(&self, bx: &mut Builder<'a, 'gcc, 'tcx>, idx: &mut usize, dst: PlaceRef<'tcx, RValue<'gcc>>) {
527         let mut next = || {
528             let val = bx.current_func().get_param(*idx as i32);
529             *idx += 1;
530             val.to_rvalue()
531         };
532         match self.mode {
533             PassMode::Ignore => {}
534             PassMode::Pair(..) => {
535                 OperandValue::Pair(next(), next()).store(bx, dst);
536             }
537             PassMode::Indirect { extra_attrs: Some(_), .. } => {
538                 OperandValue::Ref(next(), Some(next()), self.layout.align.abi).store(bx, dst);
539             }
540             PassMode::Direct(_) | PassMode::Indirect { extra_attrs: None, .. } | PassMode::Cast(_) => {
541                 let next_arg = next();
542                 self.store(bx, next_arg.to_rvalue(), dst);
543             }
544         }
545     }
546 }
547
548 fn int_type_width_signed<'gcc, 'tcx>(ty: Ty<'tcx>, cx: &CodegenCx<'gcc, 'tcx>) -> Option<(u64, bool)> {
549     match ty.kind() {
550         ty::Int(t) => Some((
551             match t {
552                 rustc_middle::ty::IntTy::Isize => u64::from(cx.tcx.sess.target.pointer_width),
553                 rustc_middle::ty::IntTy::I8 => 8,
554                 rustc_middle::ty::IntTy::I16 => 16,
555                 rustc_middle::ty::IntTy::I32 => 32,
556                 rustc_middle::ty::IntTy::I64 => 64,
557                 rustc_middle::ty::IntTy::I128 => 128,
558             },
559             true,
560         )),
561         ty::Uint(t) => Some((
562             match t {
563                 rustc_middle::ty::UintTy::Usize => u64::from(cx.tcx.sess.target.pointer_width),
564                 rustc_middle::ty::UintTy::U8 => 8,
565                 rustc_middle::ty::UintTy::U16 => 16,
566                 rustc_middle::ty::UintTy::U32 => 32,
567                 rustc_middle::ty::UintTy::U64 => 64,
568                 rustc_middle::ty::UintTy::U128 => 128,
569             },
570             false,
571         )),
572         _ => None,
573     }
574 }
575
576 impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
577     fn bit_reverse(&mut self, width: u64, value: RValue<'gcc>) -> RValue<'gcc> {
578         let typ = value.get_type();
579         let context = &self.cx.context;
580         match width {
581             8 => {
582                 // First step.
583                 let left = self.and(value, context.new_rvalue_from_int(typ, 0xF0));
584                 let left = self.lshr(left, context.new_rvalue_from_int(typ, 4));
585                 let right = self.and(value, context.new_rvalue_from_int(typ, 0x0F));
586                 let right = self.shl(right, context.new_rvalue_from_int(typ, 4));
587                 let step1 = self.or(left, right);
588
589                 // Second step.
590                 let left = self.and(step1, context.new_rvalue_from_int(typ, 0xCC));
591                 let left = self.lshr(left, context.new_rvalue_from_int(typ, 2));
592                 let right = self.and(step1, context.new_rvalue_from_int(typ, 0x33));
593                 let right = self.shl(right, context.new_rvalue_from_int(typ, 2));
594                 let step2 = self.or(left, right);
595
596                 // Third step.
597                 let left = self.and(step2, context.new_rvalue_from_int(typ, 0xAA));
598                 let left = self.lshr(left, context.new_rvalue_from_int(typ, 1));
599                 let right = self.and(step2, context.new_rvalue_from_int(typ, 0x55));
600                 let right = self.shl(right, context.new_rvalue_from_int(typ, 1));
601                 let step3 = self.or(left, right);
602
603                 step3
604             },
605             16 => {
606                 // First step.
607                 let left = self.and(value, context.new_rvalue_from_int(typ, 0x5555));
608                 let left = self.shl(left, context.new_rvalue_from_int(typ, 1));
609                 let right = self.and(value, context.new_rvalue_from_int(typ, 0xAAAA));
610                 let right = self.lshr(right, context.new_rvalue_from_int(typ, 1));
611                 let step1 = self.or(left, right);
612
613                 // Second step.
614                 let left = self.and(step1, context.new_rvalue_from_int(typ, 0x3333));
615                 let left = self.shl(left, context.new_rvalue_from_int(typ, 2));
616                 let right = self.and(step1, context.new_rvalue_from_int(typ, 0xCCCC));
617                 let right = self.lshr(right, context.new_rvalue_from_int(typ, 2));
618                 let step2 = self.or(left, right);
619
620                 // Third step.
621                 let left = self.and(step2, context.new_rvalue_from_int(typ, 0x0F0F));
622                 let left = self.shl(left, context.new_rvalue_from_int(typ, 4));
623                 let right = self.and(step2, context.new_rvalue_from_int(typ, 0xF0F0));
624                 let right = self.lshr(right, context.new_rvalue_from_int(typ, 4));
625                 let step3 = self.or(left, right);
626
627                 // Fourth step.
628                 let left = self.and(step3, context.new_rvalue_from_int(typ, 0x00FF));
629                 let left = self.shl(left, context.new_rvalue_from_int(typ, 8));
630                 let right = self.and(step3, context.new_rvalue_from_int(typ, 0xFF00));
631                 let right = self.lshr(right, context.new_rvalue_from_int(typ, 8));
632                 let step4 = self.or(left, right);
633
634                 step4
635             },
636             32 => {
637                 // TODO: Refactor with other implementations.
638                 // First step.
639                 let left = self.and(value, context.new_rvalue_from_long(typ, 0x55555555));
640                 let left = self.shl(left, context.new_rvalue_from_long(typ, 1));
641                 let right = self.and(value, context.new_rvalue_from_long(typ, 0xAAAAAAAA));
642                 let right = self.lshr(right, context.new_rvalue_from_long(typ, 1));
643                 let step1 = self.or(left, right);
644
645                 // Second step.
646                 let left = self.and(step1, context.new_rvalue_from_long(typ, 0x33333333));
647                 let left = self.shl(left, context.new_rvalue_from_long(typ, 2));
648                 let right = self.and(step1, context.new_rvalue_from_long(typ, 0xCCCCCCCC));
649                 let right = self.lshr(right, context.new_rvalue_from_long(typ, 2));
650                 let step2 = self.or(left, right);
651
652                 // Third step.
653                 let left = self.and(step2, context.new_rvalue_from_long(typ, 0x0F0F0F0F));
654                 let left = self.shl(left, context.new_rvalue_from_long(typ, 4));
655                 let right = self.and(step2, context.new_rvalue_from_long(typ, 0xF0F0F0F0));
656                 let right = self.lshr(right, context.new_rvalue_from_long(typ, 4));
657                 let step3 = self.or(left, right);
658
659                 // Fourth step.
660                 let left = self.and(step3, context.new_rvalue_from_long(typ, 0x00FF00FF));
661                 let left = self.shl(left, context.new_rvalue_from_long(typ, 8));
662                 let right = self.and(step3, context.new_rvalue_from_long(typ, 0xFF00FF00));
663                 let right = self.lshr(right, context.new_rvalue_from_long(typ, 8));
664                 let step4 = self.or(left, right);
665
666                 // Fifth step.
667                 let left = self.and(step4, context.new_rvalue_from_long(typ, 0x0000FFFF));
668                 let left = self.shl(left, context.new_rvalue_from_long(typ, 16));
669                 let right = self.and(step4, context.new_rvalue_from_long(typ, 0xFFFF0000));
670                 let right = self.lshr(right, context.new_rvalue_from_long(typ, 16));
671                 let step5 = self.or(left, right);
672
673                 step5
674             },
675             64 => {
676                 // First step.
677                 let left = self.shl(value, context.new_rvalue_from_long(typ, 32));
678                 let right = self.lshr(value, context.new_rvalue_from_long(typ, 32));
679                 let step1 = self.or(left, right);
680
681                 // Second step.
682                 let left = self.and(step1, context.new_rvalue_from_long(typ, 0x0001FFFF0001FFFF));
683                 let left = self.shl(left, context.new_rvalue_from_long(typ, 15));
684                 let right = self.and(step1, context.new_rvalue_from_long(typ, 0xFFFE0000FFFE0000u64 as i64)); // TODO: transmute the number instead?
685                 let right = self.lshr(right, context.new_rvalue_from_long(typ, 17));
686                 let step2 = self.or(left, right);
687
688                 // Third step.
689                 let left = self.lshr(step2, context.new_rvalue_from_long(typ, 10));
690                 let left = self.xor(step2, left);
691                 let temp = self.and(left, context.new_rvalue_from_long(typ, 0x003F801F003F801F));
692
693                 let left = self.shl(temp, context.new_rvalue_from_long(typ, 10));
694                 let left = self.or(temp, left);
695                 let step3 = self.xor(left, step2);
696
697                 // Fourth step.
698                 let left = self.lshr(step3, context.new_rvalue_from_long(typ, 4));
699                 let left = self.xor(step3, left);
700                 let temp = self.and(left, context.new_rvalue_from_long(typ, 0x0E0384210E038421));
701
702                 let left = self.shl(temp, context.new_rvalue_from_long(typ, 4));
703                 let left = self.or(temp, left);
704                 let step4 = self.xor(left, step3);
705
706                 // Fifth step.
707                 let left = self.lshr(step4, context.new_rvalue_from_long(typ, 2));
708                 let left = self.xor(step4, left);
709                 let temp = self.and(left, context.new_rvalue_from_long(typ, 0x2248884222488842));
710
711                 let left = self.shl(temp, context.new_rvalue_from_long(typ, 2));
712                 let left = self.or(temp, left);
713                 let step5 = self.xor(left, step4);
714
715                 step5
716             },
717             128 => {
718                 // TODO: find a more efficient implementation?
719                 let sixty_four = self.context.new_rvalue_from_long(typ, 64);
720                 let high = self.context.new_cast(None, value >> sixty_four, self.u64_type);
721                 let low = self.context.new_cast(None, value, self.u64_type);
722
723                 let reversed_high = self.bit_reverse(64, high);
724                 let reversed_low = self.bit_reverse(64, low);
725
726                 let new_low = self.context.new_cast(None, reversed_high, typ);
727                 let new_high = self.context.new_cast(None, reversed_low, typ) << sixty_four;
728
729                 new_low | new_high
730             },
731             _ => {
732                 panic!("cannot bit reverse with width = {}", width);
733             },
734         }
735     }
736
737     fn count_leading_zeroes(&self, width: u64, arg: RValue<'gcc>) -> RValue<'gcc> {
738         // TODO: use width?
739         let arg_type = arg.get_type();
740         let count_leading_zeroes =
741             if arg_type.is_uint(&self.cx) {
742                 "__builtin_clz"
743             }
744             else if arg_type.is_ulong(&self.cx) {
745                 "__builtin_clzl"
746             }
747             else if arg_type.is_ulonglong(&self.cx) {
748                 "__builtin_clzll"
749             }
750             else if width == 128 {
751                 // Algorithm from: https://stackoverflow.com/a/28433850/389119
752                 let array_type = self.context.new_array_type(None, arg_type, 3);
753                 let result = self.current_func()
754                     .new_local(None, array_type, "count_loading_zeroes_results");
755
756                 let sixty_four = self.context.new_rvalue_from_long(arg_type, 64);
757                 let high = self.context.new_cast(None, arg >> sixty_four, self.u64_type);
758                 let low = self.context.new_cast(None, arg, self.u64_type);
759
760                 let zero = self.context.new_rvalue_zero(self.usize_type);
761                 let one = self.context.new_rvalue_one(self.usize_type);
762                 let two = self.context.new_rvalue_from_long(self.usize_type, 2);
763
764                 let clzll = self.context.get_builtin_function("__builtin_clzll");
765
766                 let first_elem = self.context.new_array_access(None, result, zero);
767                 let first_value = self.context.new_cast(None, self.context.new_call(None, clzll, &[high]), arg_type);
768                 self.llbb()
769                     .add_assignment(None, first_elem, first_value);
770
771                 let second_elem = self.context.new_array_access(None, result, one);
772                 let second_value = self.context.new_cast(None, self.context.new_call(None, clzll, &[low]), arg_type) + sixty_four;
773                 self.llbb()
774                     .add_assignment(None, second_elem, second_value);
775
776                 let third_elem = self.context.new_array_access(None, result, two);
777                 let third_value = self.context.new_rvalue_from_long(arg_type, 128);
778                 self.llbb()
779                     .add_assignment(None, third_elem, third_value);
780
781                 let not_high = self.context.new_unary_op(None, UnaryOp::LogicalNegate, self.u64_type, high);
782                 let not_low = self.context.new_unary_op(None, UnaryOp::LogicalNegate, self.u64_type, low);
783                 let not_low_and_not_high = not_low & not_high;
784                 let index = not_high + not_low_and_not_high;
785
786                 let res = self.context.new_array_access(None, result, index);
787
788                 return self.context.new_cast(None, res, arg_type);
789             }
790             else {
791                 let count_leading_zeroes = self.context.get_builtin_function("__builtin_clz");
792                 let arg = self.context.new_cast(None, arg, self.uint_type);
793                 let diff = self.int_width(self.uint_type) - self.int_width(arg_type);
794                 let diff = self.context.new_rvalue_from_long(self.int_type, diff);
795                 let res = self.context.new_call(None, count_leading_zeroes, &[arg]) - diff;
796                 return self.context.new_cast(None, res, arg_type);
797             };
798         let count_leading_zeroes = self.context.get_builtin_function(count_leading_zeroes);
799         let res = self.context.new_call(None, count_leading_zeroes, &[arg]);
800         self.context.new_cast(None, res, arg_type)
801     }
802
803     fn count_trailing_zeroes(&self, _width: u64, arg: RValue<'gcc>) -> RValue<'gcc> {
804         let arg_type = arg.get_type();
805         let (count_trailing_zeroes, expected_type) =
806             if arg_type.is_uchar(&self.cx) || arg_type.is_ushort(&self.cx) || arg_type.is_uint(&self.cx) {
807                 // NOTE: we don't need to & 0xFF for uchar because the result is undefined on zero.
808                 ("__builtin_ctz", self.cx.uint_type)
809             }
810             else if arg_type.is_ulong(&self.cx) {
811                 ("__builtin_ctzl", self.cx.ulong_type)
812             }
813             else if arg_type.is_ulonglong(&self.cx) {
814                 ("__builtin_ctzll", self.cx.ulonglong_type)
815             }
816             else if arg_type.is_u128(&self.cx) {
817                 // Adapted from the algorithm to count leading zeroes from: https://stackoverflow.com/a/28433850/389119
818                 let array_type = self.context.new_array_type(None, arg_type, 3);
819                 let result = self.current_func()
820                     .new_local(None, array_type, "count_loading_zeroes_results");
821
822                 let sixty_four = self.context.new_rvalue_from_long(arg_type, 64);
823                 let high = self.context.new_cast(None, arg >> sixty_four, self.u64_type);
824                 let low = self.context.new_cast(None, arg, self.u64_type);
825
826                 let zero = self.context.new_rvalue_zero(self.usize_type);
827                 let one = self.context.new_rvalue_one(self.usize_type);
828                 let two = self.context.new_rvalue_from_long(self.usize_type, 2);
829
830                 let ctzll = self.context.get_builtin_function("__builtin_ctzll");
831
832                 let first_elem = self.context.new_array_access(None, result, zero);
833                 let first_value = self.context.new_cast(None, self.context.new_call(None, ctzll, &[low]), arg_type);
834                 self.llbb()
835                     .add_assignment(None, first_elem, first_value);
836
837                 let second_elem = self.context.new_array_access(None, result, one);
838                 let second_value = self.context.new_cast(None, self.context.new_call(None, ctzll, &[high]), arg_type) + sixty_four;
839                 self.llbb()
840                     .add_assignment(None, second_elem, second_value);
841
842                 let third_elem = self.context.new_array_access(None, result, two);
843                 let third_value = self.context.new_rvalue_from_long(arg_type, 128);
844                 self.llbb()
845                     .add_assignment(None, third_elem, third_value);
846
847                 let not_low = self.context.new_unary_op(None, UnaryOp::LogicalNegate, self.u64_type, low);
848                 let not_high = self.context.new_unary_op(None, UnaryOp::LogicalNegate, self.u64_type, high);
849                 let not_low_and_not_high = not_low & not_high;
850                 let index = not_low + not_low_and_not_high;
851
852                 let res = self.context.new_array_access(None, result, index);
853
854                 return self.context.new_cast(None, res, arg_type);
855             }
856             else {
857                 unimplemented!("count_trailing_zeroes for {:?}", arg_type);
858             };
859         let count_trailing_zeroes = self.context.get_builtin_function(count_trailing_zeroes);
860         let arg =
861             if arg_type != expected_type {
862                 self.context.new_cast(None, arg, expected_type)
863             }
864             else {
865                 arg
866             };
867         let res = self.context.new_call(None, count_trailing_zeroes, &[arg]);
868         self.context.new_cast(None, res, arg_type)
869     }
870
871     fn int_width(&self, typ: Type<'gcc>) -> i64 {
872         self.cx.int_width(typ) as i64
873     }
874
875     fn pop_count(&self, value: RValue<'gcc>) -> RValue<'gcc> {
876         // TODO: use the optimized version with fewer operations.
877         let value_type = value.get_type();
878
879         if value_type.is_u128(&self.cx) {
880             // TODO: implement in the normal algorithm below to have a more efficient
881             // implementation (that does not require a call to __popcountdi2).
882             let popcount = self.context.get_builtin_function("__builtin_popcountll");
883             let sixty_four = self.context.new_rvalue_from_long(value_type, 64);
884             let high = self.context.new_cast(None, value >> sixty_four, self.cx.ulonglong_type);
885             let high = self.context.new_call(None, popcount, &[high]);
886             let low = self.context.new_cast(None, value, self.cx.ulonglong_type);
887             let low = self.context.new_call(None, popcount, &[low]);
888             return high + low;
889         }
890
891         // First step.
892         let mask = self.context.new_rvalue_from_long(value_type, 0x5555555555555555);
893         let left = value & mask;
894         let shifted = value >> self.context.new_rvalue_from_int(value_type, 1);
895         let right = shifted & mask;
896         let value = left + right;
897
898         // Second step.
899         let mask = self.context.new_rvalue_from_long(value_type, 0x3333333333333333);
900         let left = value & mask;
901         let shifted = value >> self.context.new_rvalue_from_int(value_type, 2);
902         let right = shifted & mask;
903         let value = left + right;
904
905         // Third step.
906         let mask = self.context.new_rvalue_from_long(value_type, 0x0F0F0F0F0F0F0F0F);
907         let left = value & mask;
908         let shifted = value >> self.context.new_rvalue_from_int(value_type, 4);
909         let right = shifted & mask;
910         let value = left + right;
911
912         if value_type.is_u8(&self.cx) {
913             return value;
914         }
915
916         // Fourth step.
917         let mask = self.context.new_rvalue_from_long(value_type, 0x00FF00FF00FF00FF);
918         let left = value & mask;
919         let shifted = value >> self.context.new_rvalue_from_int(value_type, 8);
920         let right = shifted & mask;
921         let value = left + right;
922
923         if value_type.is_u16(&self.cx) {
924             return value;
925         }
926
927         // Fifth step.
928         let mask = self.context.new_rvalue_from_long(value_type, 0x0000FFFF0000FFFF);
929         let left = value & mask;
930         let shifted = value >> self.context.new_rvalue_from_int(value_type, 16);
931         let right = shifted & mask;
932         let value = left + right;
933
934         if value_type.is_u32(&self.cx) {
935             return value;
936         }
937
938         // Sixth step.
939         let mask = self.context.new_rvalue_from_long(value_type, 0x00000000FFFFFFFF);
940         let left = value & mask;
941         let shifted = value >> self.context.new_rvalue_from_int(value_type, 32);
942         let right = shifted & mask;
943         let value = left + right;
944
945         value
946     }
947
948     // Algorithm from: https://blog.regehr.org/archives/1063
949     fn rotate_left(&mut self, value: RValue<'gcc>, shift: RValue<'gcc>, width: u64) -> RValue<'gcc> {
950         let max = self.context.new_rvalue_from_long(shift.get_type(), width as i64);
951         let shift = shift % max;
952         let lhs = self.shl(value, shift);
953         let result_and =
954             self.and(
955                 self.context.new_unary_op(None, UnaryOp::Minus, shift.get_type(), shift),
956                 self.context.new_rvalue_from_long(shift.get_type(), width as i64 - 1),
957             );
958         let rhs = self.lshr(value, result_and);
959         self.or(lhs, rhs)
960     }
961
962     // Algorithm from: https://blog.regehr.org/archives/1063
963     fn rotate_right(&mut self, value: RValue<'gcc>, shift: RValue<'gcc>, width: u64) -> RValue<'gcc> {
964         let max = self.context.new_rvalue_from_long(shift.get_type(), width as i64);
965         let shift = shift % max;
966         let lhs = self.lshr(value, shift);
967         let result_and =
968             self.and(
969                 self.context.new_unary_op(None, UnaryOp::Minus, shift.get_type(), shift),
970                 self.context.new_rvalue_from_long(shift.get_type(), width as i64 - 1),
971             );
972         let rhs = self.shl(value, result_and);
973         self.or(lhs, rhs)
974     }
975
976     fn saturating_add(&mut self, lhs: RValue<'gcc>, rhs: RValue<'gcc>, signed: bool, width: u64) -> RValue<'gcc> {
977         let func = self.current_func.borrow().expect("func");
978
979         if signed {
980             // Algorithm from: https://stackoverflow.com/a/56531252/389119
981             let after_block = func.new_block("after");
982             let func_name =
983                 match width {
984                     8 => "__builtin_add_overflow",
985                     16 => "__builtin_add_overflow",
986                     32 => "__builtin_sadd_overflow",
987                     64 => "__builtin_saddll_overflow",
988                     128 => "__builtin_add_overflow",
989                     _ => unreachable!(),
990                 };
991             let overflow_func = self.context.get_builtin_function(func_name);
992             let result_type = lhs.get_type();
993             let res = func.new_local(None, result_type, "saturating_sum");
994             let overflow = self.overflow_call(overflow_func, &[lhs, rhs, res.get_address(None)], None);
995
996             let then_block = func.new_block("then");
997
998             let unsigned_type = self.context.new_int_type(width as i32 / 8, false);
999             let shifted = self.context.new_cast(None, lhs, unsigned_type) >> self.context.new_rvalue_from_int(unsigned_type, width as i32 - 1);
1000             let uint_max = self.context.new_unary_op(None, UnaryOp::BitwiseNegate, unsigned_type,
1001                 self.context.new_rvalue_from_int(unsigned_type, 0)
1002             );
1003             let int_max = uint_max >> self.context.new_rvalue_one(unsigned_type);
1004             then_block.add_assignment(None, res, self.context.new_cast(None, shifted + int_max, result_type));
1005             then_block.end_with_jump(None, after_block);
1006
1007             self.block.expect("block").end_with_conditional(None, overflow, then_block, after_block);
1008
1009             // NOTE: since jumps were added in a place rustc does not
1010             // expect, the current blocks in the state need to be updated.
1011             *self.current_block.borrow_mut() = Some(after_block);
1012             self.block = Some(after_block);
1013
1014             res.to_rvalue()
1015         }
1016         else {
1017             // Algorithm from: http://locklessinc.com/articles/sat_arithmetic/
1018             let res = lhs + rhs;
1019             let res_type = res.get_type();
1020             let cond = self.context.new_comparison(None, ComparisonOp::LessThan, res, lhs);
1021             let value = self.context.new_unary_op(None, UnaryOp::Minus, res_type, self.context.new_cast(None, cond, res_type));
1022             res | value
1023         }
1024     }
1025
1026     // Algorithm from: https://locklessinc.com/articles/sat_arithmetic/
1027     fn saturating_sub(&mut self, lhs: RValue<'gcc>, rhs: RValue<'gcc>, signed: bool, width: u64) -> RValue<'gcc> {
1028         if signed {
1029             // Also based on algorithm from: https://stackoverflow.com/a/56531252/389119
1030             let func_name =
1031                 match width {
1032                     8 => "__builtin_sub_overflow",
1033                     16 => "__builtin_sub_overflow",
1034                     32 => "__builtin_ssub_overflow",
1035                     64 => "__builtin_ssubll_overflow",
1036                     128 => "__builtin_sub_overflow",
1037                     _ => unreachable!(),
1038                 };
1039             let overflow_func = self.context.get_builtin_function(func_name);
1040             let result_type = lhs.get_type();
1041             let func = self.current_func.borrow().expect("func");
1042             let res = func.new_local(None, result_type, "saturating_diff");
1043             let overflow = self.overflow_call(overflow_func, &[lhs, rhs, res.get_address(None)], None);
1044
1045             let then_block = func.new_block("then");
1046             let after_block = func.new_block("after");
1047
1048             let unsigned_type = self.context.new_int_type(width as i32 / 8, false);
1049             let shifted = self.context.new_cast(None, lhs, unsigned_type) >> self.context.new_rvalue_from_int(unsigned_type, width as i32 - 1);
1050             let uint_max = self.context.new_unary_op(None, UnaryOp::BitwiseNegate, unsigned_type,
1051                 self.context.new_rvalue_from_int(unsigned_type, 0)
1052             );
1053             let int_max = uint_max >> self.context.new_rvalue_one(unsigned_type);
1054             then_block.add_assignment(None, res, self.context.new_cast(None, shifted + int_max, result_type));
1055             then_block.end_with_jump(None, after_block);
1056
1057             self.block.expect("block").end_with_conditional(None, overflow, then_block, after_block);
1058
1059             // NOTE: since jumps were added in a place rustc does not
1060             // expect, the current blocks in the state need to be updated.
1061             *self.current_block.borrow_mut() = Some(after_block);
1062             self.block = Some(after_block);
1063
1064             res.to_rvalue()
1065         }
1066         else {
1067             let res = lhs - rhs;
1068             let comparison = self.context.new_comparison(None, ComparisonOp::LessThanEquals, res, lhs);
1069             let comparison = self.context.new_cast(None, comparison, lhs.get_type());
1070             let unary_op = self.context.new_unary_op(None, UnaryOp::Minus, comparison.get_type(), comparison);
1071             self.and(res, unary_op)
1072         }
1073     }
1074 }
1075
1076 fn try_intrinsic<'gcc, 'tcx>(bx: &mut Builder<'_, 'gcc, 'tcx>, try_func: RValue<'gcc>, data: RValue<'gcc>, _catch_func: RValue<'gcc>, dest: RValue<'gcc>) {
1077     if bx.sess().panic_strategy() == PanicStrategy::Abort {
1078         bx.call(bx.type_void(), try_func, &[data], None);
1079         // Return 0 unconditionally from the intrinsic call;
1080         // we can never unwind.
1081         let ret_align = bx.tcx.data_layout.i32_align.abi;
1082         bx.store(bx.const_i32(0), dest, ret_align);
1083     }
1084     else if wants_msvc_seh(bx.sess()) {
1085         unimplemented!();
1086         //codegen_msvc_try(bx, try_func, data, catch_func, dest);
1087     }
1088     else {
1089         unimplemented!();
1090         //codegen_gnu_try(bx, try_func, data, catch_func, dest);
1091     }
1092 }
1093
1094 // MSVC's definition of the `rust_try` function.
1095 //
1096 // This implementation uses the new exception handling instructions in LLVM
1097 // which have support in LLVM for SEH on MSVC targets. Although these
1098 // instructions are meant to work for all targets, as of the time of this
1099 // writing, however, LLVM does not recommend the usage of these new instructions
1100 // as the old ones are still more optimized.
1101 /*fn codegen_msvc_try<'a, 'gcc, 'tcx>(_bx: &mut Builder<'a, 'gcc, 'tcx>, _try_func: RValue<'gcc>, _data: RValue<'gcc>, _catch_func: RValue<'gcc>, _dest: RValue<'gcc>) {
1102     unimplemented!();
1103     /*let llfn = get_rust_try_fn(bx, &mut |mut bx| {
1104         bx.set_personality_fn(bx.eh_personality());
1105         bx.sideeffect();
1106
1107         let mut normal = bx.build_sibling_block("normal");
1108         let mut catchswitch = bx.build_sibling_block("catchswitch");
1109         let mut catchpad = bx.build_sibling_block("catchpad");
1110         let mut caught = bx.build_sibling_block("caught");
1111
1112         let try_func = llvm::get_param(bx.llfn(), 0);
1113         let data = llvm::get_param(bx.llfn(), 1);
1114         let catch_func = llvm::get_param(bx.llfn(), 2);
1115
1116         // We're generating an IR snippet that looks like:
1117         //
1118         //   declare i32 @rust_try(%try_func, %data, %catch_func) {
1119         //      %slot = alloca u8*
1120         //      invoke %try_func(%data) to label %normal unwind label %catchswitch
1121         //
1122         //   normal:
1123         //      ret i32 0
1124         //
1125         //   catchswitch:
1126         //      %cs = catchswitch within none [%catchpad] unwind to caller
1127         //
1128         //   catchpad:
1129         //      %tok = catchpad within %cs [%type_descriptor, 0, %slot]
1130         //      %ptr = load %slot
1131         //      call %catch_func(%data, %ptr)
1132         //      catchret from %tok to label %caught
1133         //
1134         //   caught:
1135         //      ret i32 1
1136         //   }
1137         //
1138         // This structure follows the basic usage of throw/try/catch in LLVM.
1139         // For example, compile this C++ snippet to see what LLVM generates:
1140         //
1141         //      #include <stdint.h>
1142         //
1143         //      struct rust_panic {
1144         //          rust_panic(const rust_panic&);
1145         //          ~rust_panic();
1146         //
1147         //          uint64_t x[2];
1148         //      };
1149         //
1150         //      int __rust_try(
1151         //          void (*try_func)(void*),
1152         //          void *data,
1153         //          void (*catch_func)(void*, void*) noexcept
1154         //      ) {
1155         //          try {
1156         //              try_func(data);
1157         //              return 0;
1158         //          } catch(rust_panic& a) {
1159         //              catch_func(data, &a);
1160         //              return 1;
1161         //          }
1162         //      }
1163         //
1164         // More information can be found in libstd's seh.rs implementation.
1165         let ptr_align = bx.tcx().data_layout.pointer_align.abi;
1166         let slot = bx.alloca(bx.type_i8p(), ptr_align);
1167         bx.invoke(try_func, &[data], normal.llbb(), catchswitch.llbb(), None);
1168
1169         normal.ret(bx.const_i32(0));
1170
1171         let cs = catchswitch.catch_switch(None, None, 1);
1172         catchswitch.add_handler(cs, catchpad.llbb());
1173
1174         // We can't use the TypeDescriptor defined in libpanic_unwind because it
1175         // might be in another DLL and the SEH encoding only supports specifying
1176         // a TypeDescriptor from the current module.
1177         //
1178         // However this isn't an issue since the MSVC runtime uses string
1179         // comparison on the type name to match TypeDescriptors rather than
1180         // pointer equality.
1181         //
1182         // So instead we generate a new TypeDescriptor in each module that uses
1183         // `try` and let the linker merge duplicate definitions in the same
1184         // module.
1185         //
1186         // When modifying, make sure that the type_name string exactly matches
1187         // the one used in src/libpanic_unwind/seh.rs.
1188         let type_info_vtable = bx.declare_global("??_7type_info@@6B@", bx.type_i8p());
1189         let type_name = bx.const_bytes(b"rust_panic\0");
1190         let type_info =
1191             bx.const_struct(&[type_info_vtable, bx.const_null(bx.type_i8p()), type_name], false);
1192         let tydesc = bx.declare_global("__rust_panic_type_info", bx.val_ty(type_info));
1193         unsafe {
1194             llvm::LLVMRustSetLinkage(tydesc, llvm::Linkage::LinkOnceODRLinkage);
1195             llvm::SetUniqueComdat(bx.llmod, tydesc);
1196             llvm::LLVMSetInitializer(tydesc, type_info);
1197         }
1198
1199         // The flag value of 8 indicates that we are catching the exception by
1200         // reference instead of by value. We can't use catch by value because
1201         // that requires copying the exception object, which we don't support
1202         // since our exception object effectively contains a Box.
1203         //
1204         // Source: MicrosoftCXXABI::getAddrOfCXXCatchHandlerType in clang
1205         let flags = bx.const_i32(8);
1206         let funclet = catchpad.catch_pad(cs, &[tydesc, flags, slot]);
1207         let ptr = catchpad.load(slot, ptr_align);
1208         catchpad.call(catch_func, &[data, ptr], Some(&funclet));
1209
1210         catchpad.catch_ret(&funclet, caught.llbb());
1211
1212         caught.ret(bx.const_i32(1));
1213     });
1214
1215     // Note that no invoke is used here because by definition this function
1216     // can't panic (that's what it's catching).
1217     let ret = bx.call(llfn, &[try_func, data, catch_func], None);
1218     let i32_align = bx.tcx().data_layout.i32_align.abi;
1219     bx.store(ret, dest, i32_align);*/
1220 }*/
1221
1222 // Definition of the standard `try` function for Rust using the GNU-like model
1223 // of exceptions (e.g., the normal semantics of LLVM's `landingpad` and `invoke`
1224 // instructions).
1225 //
1226 // This codegen is a little surprising because we always call a shim
1227 // function instead of inlining the call to `invoke` manually here. This is done
1228 // because in LLVM we're only allowed to have one personality per function
1229 // definition. The call to the `try` intrinsic is being inlined into the
1230 // function calling it, and that function may already have other personality
1231 // functions in play. By calling a shim we're guaranteed that our shim will have
1232 // the right personality function.
1233 /*fn codegen_gnu_try<'a, 'gcc, 'tcx>(_bx: &mut Builder<'a, 'gcc, 'tcx>, _try_func: RValue<'gcc>, _data: RValue<'gcc>, _catch_func: RValue<'gcc>, _dest: RValue<'gcc>) {
1234     unimplemented!();
1235     /*let llfn = get_rust_try_fn(bx, &mut |mut bx| {
1236         // Codegens the shims described above:
1237         //
1238         //   bx:
1239         //      invoke %try_func(%data) normal %normal unwind %catch
1240         //
1241         //   normal:
1242         //      ret 0
1243         //
1244         //   catch:
1245         //      (%ptr, _) = landingpad
1246         //      call %catch_func(%data, %ptr)
1247         //      ret 1
1248
1249         bx.sideeffect();
1250
1251         let mut then = bx.build_sibling_block("then");
1252         let mut catch = bx.build_sibling_block("catch");
1253
1254         let try_func = llvm::get_param(bx.llfn(), 0);
1255         let data = llvm::get_param(bx.llfn(), 1);
1256         let catch_func = llvm::get_param(bx.llfn(), 2);
1257         bx.invoke(try_func, &[data], then.llbb(), catch.llbb(), None);
1258         then.ret(bx.const_i32(0));
1259
1260         // Type indicator for the exception being thrown.
1261         //
1262         // The first value in this tuple is a pointer to the exception object
1263         // being thrown.  The second value is a "selector" indicating which of
1264         // the landing pad clauses the exception's type had been matched to.
1265         // rust_try ignores the selector.
1266         let lpad_ty = bx.type_struct(&[bx.type_i8p(), bx.type_i32()], false);
1267         let vals = catch.landing_pad(lpad_ty, bx.eh_personality(), 1);
1268         let tydesc = match bx.tcx().lang_items().eh_catch_typeinfo() {
1269             Some(tydesc) => {
1270                 let tydesc = bx.get_static(tydesc);
1271                 bx.bitcast(tydesc, bx.type_i8p())
1272             }
1273             None => bx.const_null(bx.type_i8p()),
1274         };
1275         catch.add_clause(vals, tydesc);
1276         let ptr = catch.extract_value(vals, 0);
1277         catch.call(catch_func, &[data, ptr], None);
1278         catch.ret(bx.const_i32(1));
1279     });
1280
1281     // Note that no invoke is used here because by definition this function
1282     // can't panic (that's what it's catching).
1283     let ret = bx.call(llfn, &[try_func, data, catch_func], None);
1284     let i32_align = bx.tcx().data_layout.i32_align.abi;
1285     bx.store(ret, dest, i32_align);*/
1286 }*/