4 use gccjit::{ComparisonOp, Function, RValue, ToRValue, Type, UnaryOp};
5 use rustc_codegen_ssa::MemFlags;
6 use rustc_codegen_ssa::base::wants_msvc_seh;
7 use rustc_codegen_ssa::common::{IntPredicate, span_invalid_monomorphization_error};
8 use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue};
9 use rustc_codegen_ssa::mir::place::PlaceRef;
10 use rustc_codegen_ssa::traits::{ArgAbiMethods, BaseTypeMethods, BuilderMethods, ConstMethods, IntrinsicCallMethods};
11 use rustc_middle::bug;
12 use rustc_middle::ty::{self, Instance, Ty};
13 use rustc_span::{Span, Symbol, symbol::kw, sym};
14 use rustc_target::abi::{HasDataLayout, LayoutOf};
15 use rustc_target::abi::call::{ArgAbi, FnAbi, PassMode};
16 use rustc_target::spec::PanicStrategy;
18 use crate::abi::GccType;
19 use crate::builder::Builder;
20 use crate::common::TypeReflection;
21 use crate::context::CodegenCx;
22 use crate::type_of::LayoutGccExt;
23 use crate::intrinsic::simd::generic_simd_intrinsic;
25 fn get_simple_intrinsic<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, name: Symbol) -> Option<Function<'gcc>> {
26 let gcc_name = match name {
27 sym::sqrtf32 => "sqrtf",
28 sym::sqrtf64 => "sqrt",
29 sym::powif32 => "__builtin_powif",
30 sym::powif64 => "__builtin_powi",
31 sym::sinf32 => "sinf",
33 sym::cosf32 => "cosf",
35 sym::powf32 => "powf",
37 sym::expf32 => "expf",
39 sym::exp2f32 => "exp2f",
40 sym::exp2f64 => "exp2",
41 sym::logf32 => "logf",
43 sym::log10f32 => "log10f",
44 sym::log10f64 => "log10",
45 sym::log2f32 => "log2f",
46 sym::log2f64 => "log2",
47 sym::fmaf32 => "fmaf",
49 sym::fabsf32 => "fabsf",
50 sym::fabsf64 => "fabs",
51 sym::minnumf32 => "fminf",
52 sym::minnumf64 => "fmin",
53 sym::maxnumf32 => "fmaxf",
54 sym::maxnumf64 => "fmax",
55 sym::copysignf32 => "copysignf",
56 sym::copysignf64 => "copysign",
57 sym::floorf32 => "floorf",
58 sym::floorf64 => "floor",
59 sym::ceilf32 => "ceilf",
60 sym::ceilf64 => "ceil",
61 sym::truncf32 => "truncf",
62 sym::truncf64 => "trunc",
63 sym::rintf32 => "rintf",
64 sym::rintf64 => "rint",
65 sym::nearbyintf32 => "nearbyintf",
66 sym::nearbyintf64 => "nearbyint",
67 sym::roundf32 => "roundf",
68 sym::roundf64 => "round",
69 sym::abort => "abort",
72 Some(cx.context.get_builtin_function(&gcc_name))
75 impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
76 fn codegen_intrinsic_call(&mut self, instance: Instance<'tcx>, fn_abi: &FnAbi<'tcx, Ty<'tcx>>, args: &[OperandRef<'tcx, RValue<'gcc>>], llresult: RValue<'gcc>, span: Span) {
78 let callee_ty = instance.ty(tcx, ty::ParamEnv::reveal_all());
80 let (def_id, substs) = match *callee_ty.kind() {
81 ty::FnDef(def_id, substs) => (def_id, substs),
82 _ => bug!("expected fn item type, found {}", callee_ty),
85 let sig = callee_ty.fn_sig(tcx);
86 let sig = tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), sig);
87 let arg_tys = sig.inputs();
88 let ret_ty = sig.output();
89 let name = tcx.item_name(def_id);
90 let name_str = &*name.as_str();
92 let llret_ty = self.layout_of(ret_ty).gcc_type(self, true);
93 let result = PlaceRef::new_sized(llresult, fn_abi.ret.layout);
95 let simple = get_simple_intrinsic(self, name);
98 _ if simple.is_some() => {
99 // FIXME: remove this cast when the API supports function.
100 let func = unsafe { std::mem::transmute(simple.expect("simple")) };
101 self.call(func, &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(), None)
104 self.expect(args[0].immediate(), true)
107 self.expect(args[0].immediate(), false)
121 /*let llfn = self.get_intrinsic(&("llvm.debugtrap"));
122 self.call(llfn, &[], None)*/
126 /*let intrinsic = self.cx().get_intrinsic(&("llvm.va_copy"));
127 self.call(intrinsic, &[args[0].immediate(), args[1].immediate()], None)*/
131 /*match fn_abi.ret.layout.abi {
132 abi::Abi::Scalar(ref scalar) => {
134 Primitive::Int(..) => {
135 if self.cx().size_of(ret_ty).bytes() < 4 {
136 // `va_arg` should not be called on a integer type
137 // less than 4 bytes in length. If it is, promote
138 // the integer to a `i32` and truncate the result
139 // back to the smaller type.
140 let promoted_result = emit_va_arg(self, args[0], tcx.types.i32);
141 self.trunc(promoted_result, llret_ty)
143 emit_va_arg(self, args[0], ret_ty)
146 Primitive::F64 | Primitive::Pointer => {
147 emit_va_arg(self, args[0], ret_ty)
149 // `va_arg` should never be used with the return type f32.
150 Primitive::F32 => bug!("the va_arg intrinsic does not work with `f32`"),
153 _ => bug!("the va_arg intrinsic does not work with non-scalar types"),
157 sym::volatile_load | sym::unaligned_volatile_load => {
158 let tp_ty = substs.type_at(0);
159 let mut ptr = args[0].immediate();
160 if let PassMode::Cast(ty) = fn_abi.ret.mode {
161 ptr = self.pointercast(ptr, self.type_ptr_to(ty.gcc_type(self)));
163 let load = self.volatile_load(ptr.get_type(), ptr);
165 /*let align = if name == sym::unaligned_volatile_load {
168 self.align_of(tp_ty).bytes() as u32
171 llvm::LLVMSetAlignment(load, align);
173 self.to_immediate(load, self.layout_of(tp_ty))
175 sym::volatile_store => {
176 let dst = args[0].deref(self.cx());
177 args[1].val.volatile_store(self, dst);
180 sym::unaligned_volatile_store => {
181 let dst = args[0].deref(self.cx());
182 args[1].val.unaligned_volatile_store(self, dst);
185 sym::prefetch_read_data
186 | sym::prefetch_write_data
187 | sym::prefetch_read_instruction
188 | sym::prefetch_write_instruction => {
190 /*let expect = self.get_intrinsic(&("llvm.prefetch"));
191 let (rw, cache_type) = match name {
192 sym::prefetch_read_data => (0, 1),
193 sym::prefetch_write_data => (1, 1),
194 sym::prefetch_read_instruction => (0, 0),
195 sym::prefetch_write_instruction => (1, 0),
204 self.const_i32(cache_type),
218 | sym::saturating_add
219 | sym::saturating_sub => {
221 match int_type_width_signed(ty, self) {
222 Some((width, signed)) => match name {
223 sym::ctlz | sym::cttz => {
224 let func = self.current_func.borrow().expect("func");
225 let then_block = func.new_block("then");
226 let else_block = func.new_block("else");
227 let after_block = func.new_block("after");
229 let arg = args[0].immediate();
230 let result = func.new_local(None, arg.get_type(), "zeros");
231 let zero = self.cx.context.new_rvalue_zero(arg.get_type());
232 let cond = self.cx.context.new_comparison(None, ComparisonOp::Equals, arg, zero);
233 self.block.expect("block").end_with_conditional(None, cond, then_block, else_block);
235 let zero_result = self.cx.context.new_rvalue_from_long(arg.get_type(), width as i64);
236 then_block.add_assignment(None, result, zero_result);
237 then_block.end_with_jump(None, after_block);
239 // NOTE: since jumps were added in a place
240 // count_leading_zeroes() does not expect, the current blocks
241 // in the state need to be updated.
242 *self.current_block.borrow_mut() = Some(else_block);
243 self.block = Some(else_block);
247 sym::ctlz => self.count_leading_zeroes(width, arg),
248 sym::cttz => self.count_trailing_zeroes(width, arg),
251 else_block.add_assignment(None, result, zeros);
252 else_block.end_with_jump(None, after_block);
254 // NOTE: since jumps were added in a place rustc does not
255 // expect, the current blocks in the state need to be updated.
256 *self.current_block.borrow_mut() = Some(after_block);
257 self.block = Some(after_block);
261 /*let y = self.const_bool(false);
262 let llfn = self.get_intrinsic(&format!("llvm.{}.i{}", name, width));
263 self.call(llfn, &[args[0].immediate(), y], None)*/
265 sym::ctlz_nonzero => {
266 self.count_leading_zeroes(width, args[0].immediate())
268 sym::cttz_nonzero => {
269 self.count_trailing_zeroes(width, args[0].immediate())
271 sym::ctpop => self.pop_count(args[0].immediate()),
274 args[0].immediate() // byte swap a u8/i8 is just a no-op
277 // TODO: check if it's faster to use string literals and a
278 // match instead of format!.
279 let bswap = self.cx.context.get_builtin_function(&format!("__builtin_bswap{}", width));
280 let mut arg = args[0].immediate();
281 // FIXME: this cast should not be necessary. Remove
282 // when having proper sized integer types.
283 let param_type = bswap.get_param(0).to_rvalue().get_type();
284 if param_type != arg.get_type() {
285 arg = self.bitcast(arg, param_type);
287 self.cx.context.new_call(None, bswap, &[arg])
290 sym::bitreverse => self.bit_reverse(width, args[0].immediate()),
291 sym::rotate_left | sym::rotate_right => {
292 // TODO: implement using algorithm from:
293 // https://blog.regehr.org/archives/1063
294 // for other platforms.
295 let is_left = name == sym::rotate_left;
296 let val = args[0].immediate();
297 let raw_shift = args[1].immediate();
299 self.rotate_left(val, raw_shift, width)
302 self.rotate_right(val, raw_shift, width)
305 sym::saturating_add => {
306 self.saturating_add(args[0].immediate(), args[1].immediate(), signed, width)
308 sym::saturating_sub => {
309 self.saturating_sub(args[0].immediate(), args[1].immediate(), signed, width)
314 span_invalid_monomorphization_error(
318 "invalid monomorphization of `{}` intrinsic: \
319 expected basic integer type, found `{}`",
329 use rustc_target::abi::Abi::*;
330 let tp_ty = substs.type_at(0);
331 let layout = self.layout_of(tp_ty).layout;
332 let use_integer_compare = match layout.abi {
333 Scalar(_) | ScalarPair(_, _) => true,
334 Uninhabited | Vector { .. } => false,
335 Aggregate { .. } => {
336 // For rusty ABIs, small aggregates are actually passed
337 // as `RegKind::Integer` (see `FnAbi::adjust_for_abi`),
338 // so we re-use that same threshold here.
339 layout.size <= self.data_layout().pointer_size * 2
343 let a = args[0].immediate();
344 let b = args[1].immediate();
345 if layout.size.bytes() == 0 {
346 self.const_bool(true)
348 /*else if use_integer_compare {
349 let integer_ty = self.type_ix(layout.size.bits()); // FIXME: LLVM creates an integer of 96 bits for [i32; 3], but gcc doesn't support this, so it creates an integer of 128 bits.
350 let ptr_ty = self.type_ptr_to(integer_ty);
351 let a_ptr = self.bitcast(a, ptr_ty);
352 let a_val = self.load(integer_ty, a_ptr, layout.align.abi);
353 let b_ptr = self.bitcast(b, ptr_ty);
354 let b_val = self.load(integer_ty, b_ptr, layout.align.abi);
355 self.icmp(IntPredicate::IntEQ, a_val, b_val)
358 let void_ptr_type = self.context.new_type::<*const ()>();
359 let a_ptr = self.bitcast(a, void_ptr_type);
360 let b_ptr = self.bitcast(b, void_ptr_type);
361 let n = self.context.new_cast(None, self.const_usize(layout.size.bytes()), self.sizet_type);
362 let builtin = self.context.get_builtin_function("memcmp");
363 let cmp = self.context.new_call(None, builtin, &[a_ptr, b_ptr, n]);
364 self.icmp(IntPredicate::IntEQ, cmp, self.const_i32(0))
368 _ if name_str.starts_with("simd_") => {
369 match generic_simd_intrinsic(self, name, callee_ty, args, ret_ty, llret_ty, span) {
375 _ => bug!("unknown intrinsic '{}'", name),
378 if !fn_abi.ret.is_ignore() {
379 if let PassMode::Cast(ty) = fn_abi.ret.mode {
380 let ptr_llty = self.type_ptr_to(ty.gcc_type(self));
381 let ptr = self.pointercast(result.llval, ptr_llty);
382 self.store(llval, ptr, result.align);
385 OperandRef::from_immediate_or_packed_pair(self, llval, result.layout)
387 .store(self, result);
392 fn abort(&mut self) {
393 let func = self.context.get_builtin_function("abort");
394 let func: RValue<'gcc> = unsafe { std::mem::transmute(func) };
395 self.call(func, &[], None);
398 fn assume(&mut self, value: Self::Value) {
399 // TODO: switch to asumme when it exists.
400 // Or use something like this:
401 // #define __assume(cond) do { if (!(cond)) __builtin_unreachable(); } while (0)
402 self.expect(value, true);
405 fn expect(&mut self, cond: Self::Value, _expected: bool) -> Self::Value {
407 /*let expect = self.context.get_builtin_function("__builtin_expect");
408 let expect: RValue<'gcc> = unsafe { std::mem::transmute(expect) };
409 self.call(expect, &[cond, self.const_bool(expected)], None)*/
413 fn sideeffect(&mut self) {
415 /*if self.tcx().sess.opts.debugging_opts.insert_sideeffect {
416 let fnname = self.get_intrinsic(&("llvm.sideeffect"));
417 self.call(fnname, &[], None);
421 fn va_start(&mut self, _va_list: RValue<'gcc>) -> RValue<'gcc> {
423 /*let intrinsic = self.cx().get_intrinsic("llvm.va_start");
424 self.call(intrinsic, &[va_list], None)*/
427 fn va_end(&mut self, _va_list: RValue<'gcc>) -> RValue<'gcc> {
429 /*let intrinsic = self.cx().get_intrinsic("llvm.va_end");
430 self.call(intrinsic, &[va_list], None)*/
434 impl<'a, 'gcc, 'tcx> ArgAbiMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
435 fn store_fn_arg(&mut self, arg_abi: &ArgAbi<'tcx, Ty<'tcx>>, idx: &mut usize, dst: PlaceRef<'tcx, Self::Value>) {
436 arg_abi.store_fn_arg(self, idx, dst)
439 fn store_arg(&mut self, arg_abi: &ArgAbi<'tcx, Ty<'tcx>>, val: RValue<'gcc>, dst: PlaceRef<'tcx, RValue<'gcc>>) {
440 arg_abi.store(self, val, dst)
443 fn arg_memory_ty(&self, arg_abi: &ArgAbi<'tcx, Ty<'tcx>>) -> Type<'gcc> {
444 arg_abi.memory_ty(self)
448 pub trait ArgAbiExt<'gcc, 'tcx> {
449 fn memory_ty(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc>;
450 fn store(&self, bx: &mut Builder<'_, 'gcc, 'tcx>, val: RValue<'gcc>, dst: PlaceRef<'tcx, RValue<'gcc>>);
451 fn store_fn_arg(&self, bx: &mut Builder<'_, 'gcc, 'tcx>, idx: &mut usize, dst: PlaceRef<'tcx, RValue<'gcc>>);
454 impl<'gcc, 'tcx> ArgAbiExt<'gcc, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
455 /// Gets the LLVM type for a place of the original Rust type of
456 /// this argument/return, i.e., the result of `type_of::type_of`.
457 fn memory_ty(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc> {
458 self.layout.gcc_type(cx, true)
461 /// Stores a direct/indirect value described by this ArgAbi into a
462 /// place for the original Rust type of this argument/return.
463 /// Can be used for both storing formal arguments into Rust variables
464 /// or results of call/invoke instructions into their destinations.
465 fn store(&self, bx: &mut Builder<'_, 'gcc, 'tcx>, val: RValue<'gcc>, dst: PlaceRef<'tcx, RValue<'gcc>>) {
466 if self.is_ignore() {
469 if self.is_sized_indirect() {
470 OperandValue::Ref(val, None, self.layout.align.abi).store(bx, dst)
472 else if self.is_unsized_indirect() {
473 bug!("unsized `ArgAbi` must be handled through `store_fn_arg`");
475 else if let PassMode::Cast(cast) = self.mode {
476 // FIXME(eddyb): Figure out when the simpler Store is safe, clang
477 // uses it for i16 -> {i8, i8}, but not for i24 -> {i8, i8, i8}.
478 let can_store_through_cast_ptr = false;
479 if can_store_through_cast_ptr {
480 let cast_ptr_llty = bx.type_ptr_to(cast.gcc_type(bx));
481 let cast_dst = bx.pointercast(dst.llval, cast_ptr_llty);
482 bx.store(val, cast_dst, self.layout.align.abi);
485 // The actual return type is a struct, but the ABI
486 // adaptation code has cast it into some scalar type. The
487 // code that follows is the only reliable way I have
488 // found to do a transform like i64 -> {i32,i32}.
489 // Basically we dump the data onto the stack then memcpy it.
491 // Other approaches I tried:
492 // - Casting rust ret pointer to the foreign type and using Store
493 // is (a) unsafe if size of foreign type > size of rust type and
494 // (b) runs afoul of strict aliasing rules, yielding invalid
495 // assembly under -O (specifically, the store gets removed).
496 // - Truncating foreign type to correct integral type and then
497 // bitcasting to the struct type yields invalid cast errors.
499 // We instead thus allocate some scratch space...
500 let scratch_size = cast.size(bx);
501 let scratch_align = cast.align(bx);
502 let llscratch = bx.alloca(cast.gcc_type(bx), scratch_align);
503 bx.lifetime_start(llscratch, scratch_size);
505 // ... where we first store the value...
506 bx.store(val, llscratch, scratch_align);
508 // ... and then memcpy it to the intended destination.
511 self.layout.align.abi,
514 bx.const_usize(self.layout.size.bytes()),
518 bx.lifetime_end(llscratch, scratch_size);
522 OperandValue::Immediate(val).store(bx, dst);
526 fn store_fn_arg<'a>(&self, bx: &mut Builder<'a, 'gcc, 'tcx>, idx: &mut usize, dst: PlaceRef<'tcx, RValue<'gcc>>) {
528 let val = bx.current_func().get_param(*idx as i32);
533 PassMode::Ignore => {}
534 PassMode::Pair(..) => {
535 OperandValue::Pair(next(), next()).store(bx, dst);
537 PassMode::Indirect { extra_attrs: Some(_), .. } => {
538 OperandValue::Ref(next(), Some(next()), self.layout.align.abi).store(bx, dst);
540 PassMode::Direct(_) | PassMode::Indirect { extra_attrs: None, .. } | PassMode::Cast(_) => {
541 let next_arg = next();
542 self.store(bx, next_arg.to_rvalue(), dst);
548 fn int_type_width_signed<'gcc, 'tcx>(ty: Ty<'tcx>, cx: &CodegenCx<'gcc, 'tcx>) -> Option<(u64, bool)> {
552 rustc_middle::ty::IntTy::Isize => u64::from(cx.tcx.sess.target.pointer_width),
553 rustc_middle::ty::IntTy::I8 => 8,
554 rustc_middle::ty::IntTy::I16 => 16,
555 rustc_middle::ty::IntTy::I32 => 32,
556 rustc_middle::ty::IntTy::I64 => 64,
557 rustc_middle::ty::IntTy::I128 => 128,
561 ty::Uint(t) => Some((
563 rustc_middle::ty::UintTy::Usize => u64::from(cx.tcx.sess.target.pointer_width),
564 rustc_middle::ty::UintTy::U8 => 8,
565 rustc_middle::ty::UintTy::U16 => 16,
566 rustc_middle::ty::UintTy::U32 => 32,
567 rustc_middle::ty::UintTy::U64 => 64,
568 rustc_middle::ty::UintTy::U128 => 128,
576 impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
577 fn bit_reverse(&mut self, width: u64, value: RValue<'gcc>) -> RValue<'gcc> {
578 let typ = value.get_type();
579 let context = &self.cx.context;
583 let left = self.and(value, context.new_rvalue_from_int(typ, 0xF0));
584 let left = self.lshr(left, context.new_rvalue_from_int(typ, 4));
585 let right = self.and(value, context.new_rvalue_from_int(typ, 0x0F));
586 let right = self.shl(right, context.new_rvalue_from_int(typ, 4));
587 let step1 = self.or(left, right);
590 let left = self.and(step1, context.new_rvalue_from_int(typ, 0xCC));
591 let left = self.lshr(left, context.new_rvalue_from_int(typ, 2));
592 let right = self.and(step1, context.new_rvalue_from_int(typ, 0x33));
593 let right = self.shl(right, context.new_rvalue_from_int(typ, 2));
594 let step2 = self.or(left, right);
597 let left = self.and(step2, context.new_rvalue_from_int(typ, 0xAA));
598 let left = self.lshr(left, context.new_rvalue_from_int(typ, 1));
599 let right = self.and(step2, context.new_rvalue_from_int(typ, 0x55));
600 let right = self.shl(right, context.new_rvalue_from_int(typ, 1));
601 let step3 = self.or(left, right);
607 let left = self.and(value, context.new_rvalue_from_int(typ, 0x5555));
608 let left = self.shl(left, context.new_rvalue_from_int(typ, 1));
609 let right = self.and(value, context.new_rvalue_from_int(typ, 0xAAAA));
610 let right = self.lshr(right, context.new_rvalue_from_int(typ, 1));
611 let step1 = self.or(left, right);
614 let left = self.and(step1, context.new_rvalue_from_int(typ, 0x3333));
615 let left = self.shl(left, context.new_rvalue_from_int(typ, 2));
616 let right = self.and(step1, context.new_rvalue_from_int(typ, 0xCCCC));
617 let right = self.lshr(right, context.new_rvalue_from_int(typ, 2));
618 let step2 = self.or(left, right);
621 let left = self.and(step2, context.new_rvalue_from_int(typ, 0x0F0F));
622 let left = self.shl(left, context.new_rvalue_from_int(typ, 4));
623 let right = self.and(step2, context.new_rvalue_from_int(typ, 0xF0F0));
624 let right = self.lshr(right, context.new_rvalue_from_int(typ, 4));
625 let step3 = self.or(left, right);
628 let left = self.and(step3, context.new_rvalue_from_int(typ, 0x00FF));
629 let left = self.shl(left, context.new_rvalue_from_int(typ, 8));
630 let right = self.and(step3, context.new_rvalue_from_int(typ, 0xFF00));
631 let right = self.lshr(right, context.new_rvalue_from_int(typ, 8));
632 let step4 = self.or(left, right);
637 // TODO: Refactor with other implementations.
639 let left = self.and(value, context.new_rvalue_from_long(typ, 0x55555555));
640 let left = self.shl(left, context.new_rvalue_from_long(typ, 1));
641 let right = self.and(value, context.new_rvalue_from_long(typ, 0xAAAAAAAA));
642 let right = self.lshr(right, context.new_rvalue_from_long(typ, 1));
643 let step1 = self.or(left, right);
646 let left = self.and(step1, context.new_rvalue_from_long(typ, 0x33333333));
647 let left = self.shl(left, context.new_rvalue_from_long(typ, 2));
648 let right = self.and(step1, context.new_rvalue_from_long(typ, 0xCCCCCCCC));
649 let right = self.lshr(right, context.new_rvalue_from_long(typ, 2));
650 let step2 = self.or(left, right);
653 let left = self.and(step2, context.new_rvalue_from_long(typ, 0x0F0F0F0F));
654 let left = self.shl(left, context.new_rvalue_from_long(typ, 4));
655 let right = self.and(step2, context.new_rvalue_from_long(typ, 0xF0F0F0F0));
656 let right = self.lshr(right, context.new_rvalue_from_long(typ, 4));
657 let step3 = self.or(left, right);
660 let left = self.and(step3, context.new_rvalue_from_long(typ, 0x00FF00FF));
661 let left = self.shl(left, context.new_rvalue_from_long(typ, 8));
662 let right = self.and(step3, context.new_rvalue_from_long(typ, 0xFF00FF00));
663 let right = self.lshr(right, context.new_rvalue_from_long(typ, 8));
664 let step4 = self.or(left, right);
667 let left = self.and(step4, context.new_rvalue_from_long(typ, 0x0000FFFF));
668 let left = self.shl(left, context.new_rvalue_from_long(typ, 16));
669 let right = self.and(step4, context.new_rvalue_from_long(typ, 0xFFFF0000));
670 let right = self.lshr(right, context.new_rvalue_from_long(typ, 16));
671 let step5 = self.or(left, right);
677 let left = self.shl(value, context.new_rvalue_from_long(typ, 32));
678 let right = self.lshr(value, context.new_rvalue_from_long(typ, 32));
679 let step1 = self.or(left, right);
682 let left = self.and(step1, context.new_rvalue_from_long(typ, 0x0001FFFF0001FFFF));
683 let left = self.shl(left, context.new_rvalue_from_long(typ, 15));
684 let right = self.and(step1, context.new_rvalue_from_long(typ, 0xFFFE0000FFFE0000u64 as i64)); // TODO: transmute the number instead?
685 let right = self.lshr(right, context.new_rvalue_from_long(typ, 17));
686 let step2 = self.or(left, right);
689 let left = self.lshr(step2, context.new_rvalue_from_long(typ, 10));
690 let left = self.xor(step2, left);
691 let temp = self.and(left, context.new_rvalue_from_long(typ, 0x003F801F003F801F));
693 let left = self.shl(temp, context.new_rvalue_from_long(typ, 10));
694 let left = self.or(temp, left);
695 let step3 = self.xor(left, step2);
698 let left = self.lshr(step3, context.new_rvalue_from_long(typ, 4));
699 let left = self.xor(step3, left);
700 let temp = self.and(left, context.new_rvalue_from_long(typ, 0x0E0384210E038421));
702 let left = self.shl(temp, context.new_rvalue_from_long(typ, 4));
703 let left = self.or(temp, left);
704 let step4 = self.xor(left, step3);
707 let left = self.lshr(step4, context.new_rvalue_from_long(typ, 2));
708 let left = self.xor(step4, left);
709 let temp = self.and(left, context.new_rvalue_from_long(typ, 0x2248884222488842));
711 let left = self.shl(temp, context.new_rvalue_from_long(typ, 2));
712 let left = self.or(temp, left);
713 let step5 = self.xor(left, step4);
718 // TODO: find a more efficient implementation?
719 let sixty_four = self.context.new_rvalue_from_long(typ, 64);
720 let high = self.context.new_cast(None, value >> sixty_four, self.u64_type);
721 let low = self.context.new_cast(None, value, self.u64_type);
723 let reversed_high = self.bit_reverse(64, high);
724 let reversed_low = self.bit_reverse(64, low);
726 let new_low = self.context.new_cast(None, reversed_high, typ);
727 let new_high = self.context.new_cast(None, reversed_low, typ) << sixty_four;
732 panic!("cannot bit reverse with width = {}", width);
737 fn count_leading_zeroes(&self, width: u64, arg: RValue<'gcc>) -> RValue<'gcc> {
739 let arg_type = arg.get_type();
740 let count_leading_zeroes =
741 if arg_type.is_uint(&self.cx) {
744 else if arg_type.is_ulong(&self.cx) {
747 else if arg_type.is_ulonglong(&self.cx) {
750 else if width == 128 {
751 // Algorithm from: https://stackoverflow.com/a/28433850/389119
752 let array_type = self.context.new_array_type(None, arg_type, 3);
753 let result = self.current_func()
754 .new_local(None, array_type, "count_loading_zeroes_results");
756 let sixty_four = self.context.new_rvalue_from_long(arg_type, 64);
757 let high = self.context.new_cast(None, arg >> sixty_four, self.u64_type);
758 let low = self.context.new_cast(None, arg, self.u64_type);
760 let zero = self.context.new_rvalue_zero(self.usize_type);
761 let one = self.context.new_rvalue_one(self.usize_type);
762 let two = self.context.new_rvalue_from_long(self.usize_type, 2);
764 let clzll = self.context.get_builtin_function("__builtin_clzll");
766 let first_elem = self.context.new_array_access(None, result, zero);
767 let first_value = self.context.new_cast(None, self.context.new_call(None, clzll, &[high]), arg_type);
769 .add_assignment(None, first_elem, first_value);
771 let second_elem = self.context.new_array_access(None, result, one);
772 let second_value = self.context.new_cast(None, self.context.new_call(None, clzll, &[low]), arg_type) + sixty_four;
774 .add_assignment(None, second_elem, second_value);
776 let third_elem = self.context.new_array_access(None, result, two);
777 let third_value = self.context.new_rvalue_from_long(arg_type, 128);
779 .add_assignment(None, third_elem, third_value);
781 let not_high = self.context.new_unary_op(None, UnaryOp::LogicalNegate, self.u64_type, high);
782 let not_low = self.context.new_unary_op(None, UnaryOp::LogicalNegate, self.u64_type, low);
783 let not_low_and_not_high = not_low & not_high;
784 let index = not_high + not_low_and_not_high;
786 let res = self.context.new_array_access(None, result, index);
788 return self.context.new_cast(None, res, arg_type);
791 let count_leading_zeroes = self.context.get_builtin_function("__builtin_clz");
792 let arg = self.context.new_cast(None, arg, self.uint_type);
793 let diff = self.int_width(self.uint_type) - self.int_width(arg_type);
794 let diff = self.context.new_rvalue_from_long(self.int_type, diff);
795 let res = self.context.new_call(None, count_leading_zeroes, &[arg]) - diff;
796 return self.context.new_cast(None, res, arg_type);
798 let count_leading_zeroes = self.context.get_builtin_function(count_leading_zeroes);
799 let res = self.context.new_call(None, count_leading_zeroes, &[arg]);
800 self.context.new_cast(None, res, arg_type)
803 fn count_trailing_zeroes(&self, _width: u64, arg: RValue<'gcc>) -> RValue<'gcc> {
804 let arg_type = arg.get_type();
805 let (count_trailing_zeroes, expected_type) =
806 if arg_type.is_uchar(&self.cx) || arg_type.is_ushort(&self.cx) || arg_type.is_uint(&self.cx) {
807 // NOTE: we don't need to & 0xFF for uchar because the result is undefined on zero.
808 ("__builtin_ctz", self.cx.uint_type)
810 else if arg_type.is_ulong(&self.cx) {
811 ("__builtin_ctzl", self.cx.ulong_type)
813 else if arg_type.is_ulonglong(&self.cx) {
814 ("__builtin_ctzll", self.cx.ulonglong_type)
816 else if arg_type.is_u128(&self.cx) {
817 // Adapted from the algorithm to count leading zeroes from: https://stackoverflow.com/a/28433850/389119
818 let array_type = self.context.new_array_type(None, arg_type, 3);
819 let result = self.current_func()
820 .new_local(None, array_type, "count_loading_zeroes_results");
822 let sixty_four = self.context.new_rvalue_from_long(arg_type, 64);
823 let high = self.context.new_cast(None, arg >> sixty_four, self.u64_type);
824 let low = self.context.new_cast(None, arg, self.u64_type);
826 let zero = self.context.new_rvalue_zero(self.usize_type);
827 let one = self.context.new_rvalue_one(self.usize_type);
828 let two = self.context.new_rvalue_from_long(self.usize_type, 2);
830 let ctzll = self.context.get_builtin_function("__builtin_ctzll");
832 let first_elem = self.context.new_array_access(None, result, zero);
833 let first_value = self.context.new_cast(None, self.context.new_call(None, ctzll, &[low]), arg_type);
835 .add_assignment(None, first_elem, first_value);
837 let second_elem = self.context.new_array_access(None, result, one);
838 let second_value = self.context.new_cast(None, self.context.new_call(None, ctzll, &[high]), arg_type) + sixty_four;
840 .add_assignment(None, second_elem, second_value);
842 let third_elem = self.context.new_array_access(None, result, two);
843 let third_value = self.context.new_rvalue_from_long(arg_type, 128);
845 .add_assignment(None, third_elem, third_value);
847 let not_low = self.context.new_unary_op(None, UnaryOp::LogicalNegate, self.u64_type, low);
848 let not_high = self.context.new_unary_op(None, UnaryOp::LogicalNegate, self.u64_type, high);
849 let not_low_and_not_high = not_low & not_high;
850 let index = not_low + not_low_and_not_high;
852 let res = self.context.new_array_access(None, result, index);
854 return self.context.new_cast(None, res, arg_type);
857 unimplemented!("count_trailing_zeroes for {:?}", arg_type);
859 let count_trailing_zeroes = self.context.get_builtin_function(count_trailing_zeroes);
861 if arg_type != expected_type {
862 self.context.new_cast(None, arg, expected_type)
867 let res = self.context.new_call(None, count_trailing_zeroes, &[arg]);
868 self.context.new_cast(None, res, arg_type)
871 fn int_width(&self, typ: Type<'gcc>) -> i64 {
872 self.cx.int_width(typ) as i64
875 fn pop_count(&self, value: RValue<'gcc>) -> RValue<'gcc> {
876 // TODO: use the optimized version with fewer operations.
877 let value_type = value.get_type();
879 if value_type.is_u128(&self.cx) {
880 // TODO: implement in the normal algorithm below to have a more efficient
881 // implementation (that does not require a call to __popcountdi2).
882 let popcount = self.context.get_builtin_function("__builtin_popcountll");
883 let sixty_four = self.context.new_rvalue_from_long(value_type, 64);
884 let high = self.context.new_cast(None, value >> sixty_four, self.cx.ulonglong_type);
885 let high = self.context.new_call(None, popcount, &[high]);
886 let low = self.context.new_cast(None, value, self.cx.ulonglong_type);
887 let low = self.context.new_call(None, popcount, &[low]);
892 let mask = self.context.new_rvalue_from_long(value_type, 0x5555555555555555);
893 let left = value & mask;
894 let shifted = value >> self.context.new_rvalue_from_int(value_type, 1);
895 let right = shifted & mask;
896 let value = left + right;
899 let mask = self.context.new_rvalue_from_long(value_type, 0x3333333333333333);
900 let left = value & mask;
901 let shifted = value >> self.context.new_rvalue_from_int(value_type, 2);
902 let right = shifted & mask;
903 let value = left + right;
906 let mask = self.context.new_rvalue_from_long(value_type, 0x0F0F0F0F0F0F0F0F);
907 let left = value & mask;
908 let shifted = value >> self.context.new_rvalue_from_int(value_type, 4);
909 let right = shifted & mask;
910 let value = left + right;
912 if value_type.is_u8(&self.cx) {
917 let mask = self.context.new_rvalue_from_long(value_type, 0x00FF00FF00FF00FF);
918 let left = value & mask;
919 let shifted = value >> self.context.new_rvalue_from_int(value_type, 8);
920 let right = shifted & mask;
921 let value = left + right;
923 if value_type.is_u16(&self.cx) {
928 let mask = self.context.new_rvalue_from_long(value_type, 0x0000FFFF0000FFFF);
929 let left = value & mask;
930 let shifted = value >> self.context.new_rvalue_from_int(value_type, 16);
931 let right = shifted & mask;
932 let value = left + right;
934 if value_type.is_u32(&self.cx) {
939 let mask = self.context.new_rvalue_from_long(value_type, 0x00000000FFFFFFFF);
940 let left = value & mask;
941 let shifted = value >> self.context.new_rvalue_from_int(value_type, 32);
942 let right = shifted & mask;
943 let value = left + right;
948 // Algorithm from: https://blog.regehr.org/archives/1063
949 fn rotate_left(&mut self, value: RValue<'gcc>, shift: RValue<'gcc>, width: u64) -> RValue<'gcc> {
950 let max = self.context.new_rvalue_from_long(shift.get_type(), width as i64);
951 let shift = shift % max;
952 let lhs = self.shl(value, shift);
955 self.context.new_unary_op(None, UnaryOp::Minus, shift.get_type(), shift),
956 self.context.new_rvalue_from_long(shift.get_type(), width as i64 - 1),
958 let rhs = self.lshr(value, result_and);
962 // Algorithm from: https://blog.regehr.org/archives/1063
963 fn rotate_right(&mut self, value: RValue<'gcc>, shift: RValue<'gcc>, width: u64) -> RValue<'gcc> {
964 let max = self.context.new_rvalue_from_long(shift.get_type(), width as i64);
965 let shift = shift % max;
966 let lhs = self.lshr(value, shift);
969 self.context.new_unary_op(None, UnaryOp::Minus, shift.get_type(), shift),
970 self.context.new_rvalue_from_long(shift.get_type(), width as i64 - 1),
972 let rhs = self.shl(value, result_and);
976 fn saturating_add(&mut self, lhs: RValue<'gcc>, rhs: RValue<'gcc>, signed: bool, width: u64) -> RValue<'gcc> {
977 let func = self.current_func.borrow().expect("func");
980 // Algorithm from: https://stackoverflow.com/a/56531252/389119
981 let after_block = func.new_block("after");
984 8 => "__builtin_add_overflow",
985 16 => "__builtin_add_overflow",
986 32 => "__builtin_sadd_overflow",
987 64 => "__builtin_saddll_overflow",
988 128 => "__builtin_add_overflow",
991 let overflow_func = self.context.get_builtin_function(func_name);
992 let result_type = lhs.get_type();
993 let res = func.new_local(None, result_type, "saturating_sum");
994 let overflow = self.overflow_call(overflow_func, &[lhs, rhs, res.get_address(None)], None);
996 let then_block = func.new_block("then");
998 let unsigned_type = self.context.new_int_type(width as i32 / 8, false);
999 let shifted = self.context.new_cast(None, lhs, unsigned_type) >> self.context.new_rvalue_from_int(unsigned_type, width as i32 - 1);
1000 let uint_max = self.context.new_unary_op(None, UnaryOp::BitwiseNegate, unsigned_type,
1001 self.context.new_rvalue_from_int(unsigned_type, 0)
1003 let int_max = uint_max >> self.context.new_rvalue_one(unsigned_type);
1004 then_block.add_assignment(None, res, self.context.new_cast(None, shifted + int_max, result_type));
1005 then_block.end_with_jump(None, after_block);
1007 self.block.expect("block").end_with_conditional(None, overflow, then_block, after_block);
1009 // NOTE: since jumps were added in a place rustc does not
1010 // expect, the current blocks in the state need to be updated.
1011 *self.current_block.borrow_mut() = Some(after_block);
1012 self.block = Some(after_block);
1017 // Algorithm from: http://locklessinc.com/articles/sat_arithmetic/
1018 let res = lhs + rhs;
1019 let res_type = res.get_type();
1020 let cond = self.context.new_comparison(None, ComparisonOp::LessThan, res, lhs);
1021 let value = self.context.new_unary_op(None, UnaryOp::Minus, res_type, self.context.new_cast(None, cond, res_type));
1026 // Algorithm from: https://locklessinc.com/articles/sat_arithmetic/
1027 fn saturating_sub(&mut self, lhs: RValue<'gcc>, rhs: RValue<'gcc>, signed: bool, width: u64) -> RValue<'gcc> {
1029 // Also based on algorithm from: https://stackoverflow.com/a/56531252/389119
1032 8 => "__builtin_sub_overflow",
1033 16 => "__builtin_sub_overflow",
1034 32 => "__builtin_ssub_overflow",
1035 64 => "__builtin_ssubll_overflow",
1036 128 => "__builtin_sub_overflow",
1037 _ => unreachable!(),
1039 let overflow_func = self.context.get_builtin_function(func_name);
1040 let result_type = lhs.get_type();
1041 let func = self.current_func.borrow().expect("func");
1042 let res = func.new_local(None, result_type, "saturating_diff");
1043 let overflow = self.overflow_call(overflow_func, &[lhs, rhs, res.get_address(None)], None);
1045 let then_block = func.new_block("then");
1046 let after_block = func.new_block("after");
1048 let unsigned_type = self.context.new_int_type(width as i32 / 8, false);
1049 let shifted = self.context.new_cast(None, lhs, unsigned_type) >> self.context.new_rvalue_from_int(unsigned_type, width as i32 - 1);
1050 let uint_max = self.context.new_unary_op(None, UnaryOp::BitwiseNegate, unsigned_type,
1051 self.context.new_rvalue_from_int(unsigned_type, 0)
1053 let int_max = uint_max >> self.context.new_rvalue_one(unsigned_type);
1054 then_block.add_assignment(None, res, self.context.new_cast(None, shifted + int_max, result_type));
1055 then_block.end_with_jump(None, after_block);
1057 self.block.expect("block").end_with_conditional(None, overflow, then_block, after_block);
1059 // NOTE: since jumps were added in a place rustc does not
1060 // expect, the current blocks in the state need to be updated.
1061 *self.current_block.borrow_mut() = Some(after_block);
1062 self.block = Some(after_block);
1067 let res = lhs - rhs;
1068 let comparison = self.context.new_comparison(None, ComparisonOp::LessThanEquals, res, lhs);
1069 let comparison = self.context.new_cast(None, comparison, lhs.get_type());
1070 let unary_op = self.context.new_unary_op(None, UnaryOp::Minus, comparison.get_type(), comparison);
1071 self.and(res, unary_op)
1076 fn try_intrinsic<'gcc, 'tcx>(bx: &mut Builder<'_, 'gcc, 'tcx>, try_func: RValue<'gcc>, data: RValue<'gcc>, _catch_func: RValue<'gcc>, dest: RValue<'gcc>) {
1077 if bx.sess().panic_strategy() == PanicStrategy::Abort {
1078 bx.call(try_func, &[data], None);
1079 // Return 0 unconditionally from the intrinsic call;
1080 // we can never unwind.
1081 let ret_align = bx.tcx.data_layout.i32_align.abi;
1082 bx.store(bx.const_i32(0), dest, ret_align);
1084 else if wants_msvc_seh(bx.sess()) {
1086 //codegen_msvc_try(bx, try_func, data, catch_func, dest);
1090 //codegen_gnu_try(bx, try_func, data, catch_func, dest);
1094 // MSVC's definition of the `rust_try` function.
1096 // This implementation uses the new exception handling instructions in LLVM
1097 // which have support in LLVM for SEH on MSVC targets. Although these
1098 // instructions are meant to work for all targets, as of the time of this
1099 // writing, however, LLVM does not recommend the usage of these new instructions
1100 // as the old ones are still more optimized.
1101 /*fn codegen_msvc_try<'a, 'gcc, 'tcx>(_bx: &mut Builder<'a, 'gcc, 'tcx>, _try_func: RValue<'gcc>, _data: RValue<'gcc>, _catch_func: RValue<'gcc>, _dest: RValue<'gcc>) {
1103 /*let llfn = get_rust_try_fn(bx, &mut |mut bx| {
1104 bx.set_personality_fn(bx.eh_personality());
1107 let mut normal = bx.build_sibling_block("normal");
1108 let mut catchswitch = bx.build_sibling_block("catchswitch");
1109 let mut catchpad = bx.build_sibling_block("catchpad");
1110 let mut caught = bx.build_sibling_block("caught");
1112 let try_func = llvm::get_param(bx.llfn(), 0);
1113 let data = llvm::get_param(bx.llfn(), 1);
1114 let catch_func = llvm::get_param(bx.llfn(), 2);
1116 // We're generating an IR snippet that looks like:
1118 // declare i32 @rust_try(%try_func, %data, %catch_func) {
1119 // %slot = alloca u8*
1120 // invoke %try_func(%data) to label %normal unwind label %catchswitch
1126 // %cs = catchswitch within none [%catchpad] unwind to caller
1129 // %tok = catchpad within %cs [%type_descriptor, 0, %slot]
1130 // %ptr = load %slot
1131 // call %catch_func(%data, %ptr)
1132 // catchret from %tok to label %caught
1138 // This structure follows the basic usage of throw/try/catch in LLVM.
1139 // For example, compile this C++ snippet to see what LLVM generates:
1141 // #include <stdint.h>
1143 // struct rust_panic {
1144 // rust_panic(const rust_panic&);
1151 // void (*try_func)(void*),
1153 // void (*catch_func)(void*, void*) noexcept
1158 // } catch(rust_panic& a) {
1159 // catch_func(data, &a);
1164 // More information can be found in libstd's seh.rs implementation.
1165 let ptr_align = bx.tcx().data_layout.pointer_align.abi;
1166 let slot = bx.alloca(bx.type_i8p(), ptr_align);
1167 bx.invoke(try_func, &[data], normal.llbb(), catchswitch.llbb(), None);
1169 normal.ret(bx.const_i32(0));
1171 let cs = catchswitch.catch_switch(None, None, 1);
1172 catchswitch.add_handler(cs, catchpad.llbb());
1174 // We can't use the TypeDescriptor defined in libpanic_unwind because it
1175 // might be in another DLL and the SEH encoding only supports specifying
1176 // a TypeDescriptor from the current module.
1178 // However this isn't an issue since the MSVC runtime uses string
1179 // comparison on the type name to match TypeDescriptors rather than
1180 // pointer equality.
1182 // So instead we generate a new TypeDescriptor in each module that uses
1183 // `try` and let the linker merge duplicate definitions in the same
1186 // When modifying, make sure that the type_name string exactly matches
1187 // the one used in src/libpanic_unwind/seh.rs.
1188 let type_info_vtable = bx.declare_global("??_7type_info@@6B@", bx.type_i8p());
1189 let type_name = bx.const_bytes(b"rust_panic\0");
1191 bx.const_struct(&[type_info_vtable, bx.const_null(bx.type_i8p()), type_name], false);
1192 let tydesc = bx.declare_global("__rust_panic_type_info", bx.val_ty(type_info));
1194 llvm::LLVMRustSetLinkage(tydesc, llvm::Linkage::LinkOnceODRLinkage);
1195 llvm::SetUniqueComdat(bx.llmod, tydesc);
1196 llvm::LLVMSetInitializer(tydesc, type_info);
1199 // The flag value of 8 indicates that we are catching the exception by
1200 // reference instead of by value. We can't use catch by value because
1201 // that requires copying the exception object, which we don't support
1202 // since our exception object effectively contains a Box.
1204 // Source: MicrosoftCXXABI::getAddrOfCXXCatchHandlerType in clang
1205 let flags = bx.const_i32(8);
1206 let funclet = catchpad.catch_pad(cs, &[tydesc, flags, slot]);
1207 let ptr = catchpad.load(slot, ptr_align);
1208 catchpad.call(catch_func, &[data, ptr], Some(&funclet));
1210 catchpad.catch_ret(&funclet, caught.llbb());
1212 caught.ret(bx.const_i32(1));
1215 // Note that no invoke is used here because by definition this function
1216 // can't panic (that's what it's catching).
1217 let ret = bx.call(llfn, &[try_func, data, catch_func], None);
1218 let i32_align = bx.tcx().data_layout.i32_align.abi;
1219 bx.store(ret, dest, i32_align);*/
1222 // Definition of the standard `try` function for Rust using the GNU-like model
1223 // of exceptions (e.g., the normal semantics of LLVM's `landingpad` and `invoke`
1226 // This codegen is a little surprising because we always call a shim
1227 // function instead of inlining the call to `invoke` manually here. This is done
1228 // because in LLVM we're only allowed to have one personality per function
1229 // definition. The call to the `try` intrinsic is being inlined into the
1230 // function calling it, and that function may already have other personality
1231 // functions in play. By calling a shim we're guaranteed that our shim will have
1232 // the right personality function.
1233 /*fn codegen_gnu_try<'a, 'gcc, 'tcx>(_bx: &mut Builder<'a, 'gcc, 'tcx>, _try_func: RValue<'gcc>, _data: RValue<'gcc>, _catch_func: RValue<'gcc>, _dest: RValue<'gcc>) {
1235 /*let llfn = get_rust_try_fn(bx, &mut |mut bx| {
1236 // Codegens the shims described above:
1239 // invoke %try_func(%data) normal %normal unwind %catch
1245 // (%ptr, _) = landingpad
1246 // call %catch_func(%data, %ptr)
1251 let mut then = bx.build_sibling_block("then");
1252 let mut catch = bx.build_sibling_block("catch");
1254 let try_func = llvm::get_param(bx.llfn(), 0);
1255 let data = llvm::get_param(bx.llfn(), 1);
1256 let catch_func = llvm::get_param(bx.llfn(), 2);
1257 bx.invoke(try_func, &[data], then.llbb(), catch.llbb(), None);
1258 then.ret(bx.const_i32(0));
1260 // Type indicator for the exception being thrown.
1262 // The first value in this tuple is a pointer to the exception object
1263 // being thrown. The second value is a "selector" indicating which of
1264 // the landing pad clauses the exception's type had been matched to.
1265 // rust_try ignores the selector.
1266 let lpad_ty = bx.type_struct(&[bx.type_i8p(), bx.type_i32()], false);
1267 let vals = catch.landing_pad(lpad_ty, bx.eh_personality(), 1);
1268 let tydesc = match bx.tcx().lang_items().eh_catch_typeinfo() {
1270 let tydesc = bx.get_static(tydesc);
1271 bx.bitcast(tydesc, bx.type_i8p())
1273 None => bx.const_null(bx.type_i8p()),
1275 catch.add_clause(vals, tydesc);
1276 let ptr = catch.extract_value(vals, 0);
1277 catch.call(catch_func, &[data, ptr], None);
1278 catch.ret(bx.const_i32(1));
1281 // Note that no invoke is used here because by definition this function
1282 // can't panic (that's what it's catching).
1283 let ret = bx.call(llfn, &[try_func, data, catch_func], None);
1284 let i32_align = bx.tcx().data_layout.i32_align.abi;
1285 bx.store(ret, dest, i32_align);*/