]> git.lizzy.rs Git - rust.git/commitdiff
Merge commit '0c89065b934397b62838fe3e4ef6f6352fc52daf' into libgccjit-codegen
authorAntoni Boucher <bouanto@zoho.com>
Sat, 14 Aug 2021 14:06:05 +0000 (10:06 -0400)
committerAntoni Boucher <bouanto@zoho.com>
Sat, 14 Aug 2021 14:06:05 +0000 (10:06 -0400)
1  2 
compiler/rustc_codegen_gcc/rust-toolchain
compiler/rustc_codegen_gcc/src/builder.rs
compiler/rustc_codegen_gcc/src/context.rs
compiler/rustc_codegen_gcc/src/intrinsic/mod.rs
compiler/rustc_codegen_gcc/src/type_.rs
compiler/rustc_codegen_gcc/src/type_of.rs

index 3a29ac5ddb120e1c1b8cfece8efd36eac5329e28,0000000000000000000000000000000000000000..4e9771311efbdf057464c8c454ce135e66b06d30
mode 100644,000000..100644
--- /dev/null
@@@ -1,1 -1,0 +1,1 @@@
- nightly-2021-07-21
++nightly-2021-08-12
index 8bdcb08bd3d6b7f196bdfce1bd479e011cd51b1e,0000000000000000000000000000000000000000..bb864c27e1b4d03f90b2480b31afef580d1c237d
mode 100644,000000..100644
--- /dev/null
@@@ -1,1812 -1,0 +1,1822 @@@
-     fn invoke(&mut self, _func: RValue<'gcc>, _args: &[RValue<'gcc>], _then: Block<'gcc>, _catch: Block<'gcc>, _funclet: Option<&Funclet>) -> RValue<'gcc> {
-         unimplemented!();
 +use std::borrow::Cow;
 +use std::cell::Cell;
 +use std::convert::TryFrom;
 +use std::ops::{Deref, Range};
 +
 +use gccjit::FunctionType;
 +use gccjit::{
 +    BinaryOp,
 +    Block,
 +    ComparisonOp,
 +    Function,
 +    LValue,
 +    RValue,
 +    ToRValue,
 +    Type,
 +    UnaryOp,
 +};
 +use rustc_codegen_ssa::MemFlags;
 +use rustc_codegen_ssa::common::{AtomicOrdering, AtomicRmwBinOp, IntPredicate, RealPredicate, SynchronizationScope};
 +use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue};
 +use rustc_codegen_ssa::mir::place::PlaceRef;
 +use rustc_codegen_ssa::traits::{
 +    BackendTypes,
 +    BaseTypeMethods,
 +    BuilderMethods,
 +    ConstMethods,
 +    DerivedTypeMethods,
++    LayoutTypeMethods,
 +    HasCodegen,
 +    OverflowOp,
 +    StaticBuilderMethods,
 +};
 +use rustc_middle::ty::{ParamEnv, Ty, TyCtxt};
 +use rustc_middle::ty::layout::{HasParamEnv, HasTyCtxt, TyAndLayout};
 +use rustc_span::Span;
 +use rustc_span::def_id::DefId;
 +use rustc_target::abi::{
 +    self,
 +    Align,
 +    HasDataLayout,
 +    LayoutOf,
 +    Size,
 +    TargetDataLayout,
 +};
 +use rustc_target::spec::{HasTargetSpec, Target};
 +
 +use crate::common::{SignType, TypeReflection, type_is_pointer};
 +use crate::context::CodegenCx;
 +use crate::type_of::LayoutGccExt;
 +
 +// TODO
 +type Funclet = ();
 +
 +// TODO: remove this variable.
 +static mut RETURN_VALUE_COUNT: usize = 0;
 +
 +enum ExtremumOperation {
 +    Max,
 +    Min,
 +}
 +
 +trait EnumClone {
 +    fn clone(&self) -> Self;
 +}
 +
 +impl EnumClone for AtomicOrdering {
 +    fn clone(&self) -> Self {
 +        match *self {
 +            AtomicOrdering::NotAtomic => AtomicOrdering::NotAtomic,
 +            AtomicOrdering::Unordered => AtomicOrdering::Unordered,
 +            AtomicOrdering::Monotonic => AtomicOrdering::Monotonic,
 +            AtomicOrdering::Acquire => AtomicOrdering::Acquire,
 +            AtomicOrdering::Release => AtomicOrdering::Release,
 +            AtomicOrdering::AcquireRelease => AtomicOrdering::AcquireRelease,
 +            AtomicOrdering::SequentiallyConsistent => AtomicOrdering::SequentiallyConsistent,
 +        }
 +    }
 +}
 +
 +pub struct Builder<'a: 'gcc, 'gcc, 'tcx> {
 +    pub cx: &'a CodegenCx<'gcc, 'tcx>,
 +    pub block: Option<Block<'gcc>>,
 +    stack_var_count: Cell<usize>,
 +}
 +
 +impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
 +    fn with_cx(cx: &'a CodegenCx<'gcc, 'tcx>) -> Self {
 +        Builder {
 +            cx,
 +            block: None,
 +            stack_var_count: Cell::new(0),
 +        }
 +    }
 +
 +    fn atomic_extremum(&mut self, operation: ExtremumOperation, dst: RValue<'gcc>, src: RValue<'gcc>, order: AtomicOrdering) -> RValue<'gcc> {
 +        let size = self.cx.int_width(src.get_type()) / 8;
 +
 +        let func = self.current_func();
 +
 +        let load_ordering =
 +            match order {
 +                // TODO: does this make sense?
 +                AtomicOrdering::AcquireRelease | AtomicOrdering::Release => AtomicOrdering::Acquire,
 +                _ => order.clone(),
 +            };
 +        let previous_value = self.atomic_load(dst.get_type(), dst, load_ordering.clone(), Size::from_bytes(size));
 +        let previous_var = func.new_local(None, previous_value.get_type(), "previous_value");
 +        let return_value = func.new_local(None, previous_value.get_type(), "return_value");
 +        self.llbb().add_assignment(None, previous_var, previous_value);
 +        self.llbb().add_assignment(None, return_value, previous_var.to_rvalue());
 +
 +        let while_block = func.new_block("while");
 +        let after_block = func.new_block("after_while");
 +        self.llbb().end_with_jump(None, while_block);
 +
 +        // NOTE: since jumps were added and compare_exchange doesn't expect this, the current blocks in the
 +        // state need to be updated.
 +        self.block = Some(while_block);
 +        *self.cx.current_block.borrow_mut() = Some(while_block);
 +
 +        let comparison_operator =
 +            match operation {
 +                ExtremumOperation::Max => ComparisonOp::LessThan,
 +                ExtremumOperation::Min => ComparisonOp::GreaterThan,
 +            };
 +
 +        let cond1 = self.context.new_comparison(None, comparison_operator, previous_var.to_rvalue(), self.context.new_cast(None, src, previous_value.get_type()));
 +        let compare_exchange = self.compare_exchange(dst, previous_var, src, order, load_ordering, false);
 +        let cond2 = self.cx.context.new_unary_op(None, UnaryOp::LogicalNegate, compare_exchange.get_type(), compare_exchange);
 +        let cond = self.cx.context.new_binary_op(None, BinaryOp::LogicalAnd, self.cx.bool_type, cond1, cond2);
 +
 +        while_block.end_with_conditional(None, cond, while_block, after_block);
 +
 +        // NOTE: since jumps were added in a place rustc does not expect, the current blocks in the
 +        // state need to be updated.
 +        self.block = Some(after_block);
 +        *self.cx.current_block.borrow_mut() = Some(after_block);
 +
 +        return_value.to_rvalue()
 +    }
 +
 +    fn compare_exchange(&self, dst: RValue<'gcc>, cmp: LValue<'gcc>, src: RValue<'gcc>, order: AtomicOrdering, failure_order: AtomicOrdering, weak: bool) -> RValue<'gcc> {
 +        let size = self.cx.int_width(src.get_type());
 +        let compare_exchange = self.context.get_builtin_function(&format!("__atomic_compare_exchange_{}", size / 8));
 +        let order = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc());
 +        let failure_order = self.context.new_rvalue_from_int(self.i32_type, failure_order.to_gcc());
 +        let weak = self.context.new_rvalue_from_int(self.bool_type, weak as i32);
 +
 +        let void_ptr_type = self.context.new_type::<*mut ()>();
 +        let volatile_void_ptr_type = void_ptr_type.make_volatile();
 +        let dst = self.context.new_cast(None, dst, volatile_void_ptr_type);
 +        let expected = self.context.new_cast(None, cmp.get_address(None), void_ptr_type);
 +
 +        // NOTE: not sure why, but we have the wrong type here.
 +        let int_type = compare_exchange.get_param(2).to_rvalue().get_type();
 +        let src = self.context.new_cast(None, src, int_type);
 +        self.context.new_call(None, compare_exchange, &[dst, expected, src, weak, order, failure_order])
 +    }
 +
 +    pub fn assign(&self, lvalue: LValue<'gcc>, value: RValue<'gcc>) {
 +        self.llbb().add_assignment(None, lvalue, value);
 +    }
 +
 +    fn check_call<'b>(&mut self, _typ: &str, func: Function<'gcc>, args: &'b [RValue<'gcc>]) -> Cow<'b, [RValue<'gcc>]> {
 +        //let mut fn_ty = self.cx.val_ty(func);
 +        // Strip off pointers
 +        /*while self.cx.type_kind(fn_ty) == TypeKind::Pointer {
 +            fn_ty = self.cx.element_type(fn_ty);
 +        }*/
 +
 +        /*assert!(
 +            self.cx.type_kind(fn_ty) == TypeKind::Function,
 +            "builder::{} not passed a function, but {:?}",
 +            typ,
 +            fn_ty
 +        );
 +
 +        let param_tys = self.cx.func_params_types(fn_ty);
 +
 +        let all_args_match = param_tys
 +            .iter()
 +            .zip(args.iter().map(|&v| self.val_ty(v)))
 +            .all(|(expected_ty, actual_ty)| *expected_ty == actual_ty);*/
 +
 +        let mut all_args_match = true;
 +        let mut param_types = vec![];
 +        let param_count = func.get_param_count();
 +        for (index, arg) in args.iter().enumerate().take(param_count) {
 +            let param = func.get_param(index as i32);
 +            let param = param.to_rvalue().get_type();
 +            if param != arg.get_type() {
 +                all_args_match = false;
 +            }
 +            param_types.push(param);
 +        }
 +
 +        if all_args_match {
 +            return Cow::Borrowed(args);
 +        }
 +
 +        let casted_args: Vec<_> = param_types
 +            .into_iter()
 +            .zip(args.iter())
 +            .enumerate()
 +            .map(|(_i, (expected_ty, &actual_val))| {
 +                let actual_ty = actual_val.get_type();
 +                if expected_ty != actual_ty {
 +                    /*debug!(
 +                        "type mismatch in function call of {:?}. \
 +                            Expected {:?} for param {}, got {:?}; injecting bitcast",
 +                        func, expected_ty, i, actual_ty
 +                    );*/
 +                    /*println!(
 +                        "type mismatch in function call of {:?}. \
 +                            Expected {:?} for param {}, got {:?}; injecting bitcast",
 +                        func, expected_ty, i, actual_ty
 +                    );*/
 +                    self.bitcast(actual_val, expected_ty)
 +                }
 +                else {
 +                    actual_val
 +                }
 +            })
 +            .collect();
 +
 +        Cow::Owned(casted_args)
 +    }
 +
 +    fn check_ptr_call<'b>(&mut self, _typ: &str, func_ptr: RValue<'gcc>, args: &'b [RValue<'gcc>]) -> Cow<'b, [RValue<'gcc>]> {
 +        //let mut fn_ty = self.cx.val_ty(func);
 +        // Strip off pointers
 +        /*while self.cx.type_kind(fn_ty) == TypeKind::Pointer {
 +            fn_ty = self.cx.element_type(fn_ty);
 +        }*/
 +
 +        /*assert!(
 +            self.cx.type_kind(fn_ty) == TypeKind::Function,
 +            "builder::{} not passed a function, but {:?}",
 +            typ,
 +            fn_ty
 +        );
 +
 +        let param_tys = self.cx.func_params_types(fn_ty);
 +
 +        let all_args_match = param_tys
 +            .iter()
 +            .zip(args.iter().map(|&v| self.val_ty(v)))
 +            .all(|(expected_ty, actual_ty)| *expected_ty == actual_ty);*/
 +
 +        let mut all_args_match = true;
 +        let mut param_types = vec![];
 +        let gcc_func = func_ptr.get_type().is_function_ptr_type().expect("function ptr");
 +        for (index, arg) in args.iter().enumerate().take(gcc_func.get_param_count()) {
 +            let param = gcc_func.get_param_type(index);
 +            if param != arg.get_type() {
 +                all_args_match = false;
 +            }
 +            param_types.push(param);
 +        }
 +
 +        if all_args_match {
 +            return Cow::Borrowed(args);
 +        }
 +
 +        let casted_args: Vec<_> = param_types
 +            .into_iter()
 +            .zip(args.iter())
 +            .enumerate()
 +            .map(|(_i, (expected_ty, &actual_val))| {
 +                let actual_ty = actual_val.get_type();
 +                if expected_ty != actual_ty {
 +                    /*debug!(
 +                        "type mismatch in function call of {:?}. \
 +                            Expected {:?} for param {}, got {:?}; injecting bitcast",
 +                        func, expected_ty, i, actual_ty
 +                    );*/
 +                    /*println!(
 +                        "type mismatch in function call of {:?}. \
 +                            Expected {:?} for param {}, got {:?}; injecting bitcast",
 +                        func, expected_ty, i, actual_ty
 +                    );*/
 +                    self.bitcast(actual_val, expected_ty)
 +                }
 +                else {
 +                    actual_val
 +                }
 +            })
 +            .collect();
 +
 +        Cow::Owned(casted_args)
 +    }
 +
 +    fn check_store(&mut self, val: RValue<'gcc>, ptr: RValue<'gcc>) -> RValue<'gcc> {
 +        let dest_ptr_ty = self.cx.val_ty(ptr).make_pointer(); // TODO: make sure make_pointer() is okay here.
 +        let stored_ty = self.cx.val_ty(val);
 +        let stored_ptr_ty = self.cx.type_ptr_to(stored_ty);
 +
 +        //assert_eq!(self.cx.type_kind(dest_ptr_ty), TypeKind::Pointer);
 +
 +        if dest_ptr_ty == stored_ptr_ty {
 +            ptr
 +        }
 +        else {
 +            /*debug!(
 +                "type mismatch in store. \
 +                    Expected {:?}, got {:?}; inserting bitcast",
 +                dest_ptr_ty, stored_ptr_ty
 +            );*/
 +            /*println!(
 +                "type mismatch in store. \
 +                    Expected {:?}, got {:?}; inserting bitcast",
 +                dest_ptr_ty, stored_ptr_ty
 +            );*/
 +            //ptr
 +            self.bitcast(ptr, stored_ptr_ty)
 +        }
 +    }
 +
 +    pub fn current_func(&self) -> Function<'gcc> {
 +        self.block.expect("block").get_function()
 +    }
 +
 +    fn function_call(&mut self, func: RValue<'gcc>, args: &[RValue<'gcc>], _funclet: Option<&Funclet>) -> RValue<'gcc> {
 +        //debug!("call {:?} with args ({:?})", func, args);
 +
 +        // TODO: remove when the API supports a different type for functions.
 +        let func: Function<'gcc> = self.cx.rvalue_as_function(func);
 +        let args = self.check_call("call", func, args);
 +        //let bundle = funclet.map(|funclet| funclet.bundle());
 +        //let bundle = bundle.as_ref().map(|b| &*b.raw);
 +
 +        // gccjit requires to use the result of functions, even when it's not used.
 +        // That's why we assign the result to a local or call add_eval().
 +        let return_type = func.get_return_type();
 +        let current_block = self.current_block.borrow().expect("block");
 +        let void_type = self.context.new_type::<()>();
 +        let current_func = current_block.get_function();
 +        if return_type != void_type {
 +            unsafe { RETURN_VALUE_COUNT += 1 };
 +            let result = current_func.new_local(None, return_type, &format!("returnValue{}", unsafe { RETURN_VALUE_COUNT }));
 +            current_block.add_assignment(None, result, self.cx.context.new_call(None, func, &args));
 +            result.to_rvalue()
 +        }
 +        else {
 +            current_block.add_eval(None, self.cx.context.new_call(None, func, &args));
 +            // Return dummy value when not having return value.
 +            self.context.new_rvalue_from_long(self.isize_type, 0)
 +        }
 +    }
 +
 +    fn function_ptr_call(&mut self, func_ptr: RValue<'gcc>, args: &[RValue<'gcc>], _funclet: Option<&Funclet>) -> RValue<'gcc> {
 +        //debug!("func ptr call {:?} with args ({:?})", func, args);
 +
 +        let args = self.check_ptr_call("call", func_ptr, args);
 +        //let bundle = funclet.map(|funclet| funclet.bundle());
 +        //let bundle = bundle.as_ref().map(|b| &*b.raw);
 +
 +        // gccjit requires to use the result of functions, even when it's not used.
 +        // That's why we assign the result to a local or call add_eval().
 +        let gcc_func = func_ptr.get_type().is_function_ptr_type().expect("function ptr");
 +        let mut return_type = gcc_func.get_return_type();
 +        let current_block = self.current_block.borrow().expect("block");
 +        let void_type = self.context.new_type::<()>();
 +        let current_func = current_block.get_function();
 +
 +        // FIXME: As a temporary workaround for unsupported LLVM intrinsics.
 +        if gcc_func.get_param_count() == 0 && format!("{:?}", func_ptr) == "__builtin_ia32_pmovmskb128" {
 +            return_type = self.int_type;
 +        }
 +
 +        if return_type != void_type {
 +            unsafe { RETURN_VALUE_COUNT += 1 };
 +            let result = current_func.new_local(None, return_type, &format!("returnValue{}", unsafe { RETURN_VALUE_COUNT }));
 +            current_block.add_assignment(None, result, self.cx.context.new_call_through_ptr(None, func_ptr, &args));
 +            result.to_rvalue()
 +        }
 +        else {
 +            if gcc_func.get_param_count() == 0 {
 +                // FIXME: As a temporary workaround for unsupported LLVM intrinsics.
 +                current_block.add_eval(None, self.cx.context.new_call_through_ptr(None, func_ptr, &[]));
 +            }
 +            else {
 +                current_block.add_eval(None, self.cx.context.new_call_through_ptr(None, func_ptr, &args));
 +            }
 +            // Return dummy value when not having return value.
 +            let result = current_func.new_local(None, self.isize_type, "dummyValueThatShouldNeverBeUsed");
 +            current_block.add_assignment(None, result, self.context.new_rvalue_from_long(self.isize_type, 0));
 +            result.to_rvalue()
 +        }
 +    }
 +
 +    pub fn overflow_call(&mut self, func: Function<'gcc>, args: &[RValue<'gcc>], _funclet: Option<&Funclet>) -> RValue<'gcc> {
 +        //debug!("overflow_call {:?} with args ({:?})", func, args);
 +
 +        //let bundle = funclet.map(|funclet| funclet.bundle());
 +        //let bundle = bundle.as_ref().map(|b| &*b.raw);
 +
 +        // gccjit requires to use the result of functions, even when it's not used.
 +        // That's why we assign the result to a local.
 +        let return_type = self.context.new_type::<bool>();
 +        let current_block = self.current_block.borrow().expect("block");
 +        let current_func = current_block.get_function();
 +        // TODO: return the new_call() directly? Since the overflow function has no side-effects.
 +        unsafe { RETURN_VALUE_COUNT += 1 };
 +        let result = current_func.new_local(None, return_type, &format!("returnValue{}", unsafe { RETURN_VALUE_COUNT }));
 +        current_block.add_assignment(None, result, self.cx.context.new_call(None, func, &args));
 +        result.to_rvalue()
 +    }
 +}
 +
 +impl<'gcc, 'tcx> HasCodegen<'tcx> for Builder<'_, 'gcc, 'tcx> {
 +    type CodegenCx = CodegenCx<'gcc, 'tcx>;
 +}
 +
 +impl<'tcx> HasTyCtxt<'tcx> for Builder<'_, '_, 'tcx> {
 +    fn tcx(&self) -> TyCtxt<'tcx> {
 +        self.cx.tcx()
 +    }
 +}
 +
 +impl HasDataLayout for Builder<'_, '_, '_> {
 +    fn data_layout(&self) -> &TargetDataLayout {
 +        self.cx.data_layout()
 +    }
 +}
 +
 +impl<'tcx> LayoutOf for Builder<'_, '_, 'tcx> {
 +    type Ty = Ty<'tcx>;
 +    type TyAndLayout = TyAndLayout<'tcx>;
 +
 +    fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyAndLayout {
 +        self.cx.layout_of(ty)
 +    }
 +}
 +
 +impl<'gcc, 'tcx> Deref for Builder<'_, 'gcc, 'tcx> {
 +    type Target = CodegenCx<'gcc, 'tcx>;
 +
 +    fn deref(&self) -> &Self::Target {
 +        self.cx
 +    }
 +}
 +
 +impl<'gcc, 'tcx> BackendTypes for Builder<'_, 'gcc, 'tcx> {
 +    type Value = <CodegenCx<'gcc, 'tcx> as BackendTypes>::Value;
 +    type Function = <CodegenCx<'gcc, 'tcx> as BackendTypes>::Function;
 +    type BasicBlock = <CodegenCx<'gcc, 'tcx> as BackendTypes>::BasicBlock;
 +    type Type = <CodegenCx<'gcc, 'tcx> as BackendTypes>::Type;
 +    type Funclet = <CodegenCx<'gcc, 'tcx> as BackendTypes>::Funclet;
 +
 +    type DIScope = <CodegenCx<'gcc, 'tcx> as BackendTypes>::DIScope;
 +    type DILocation = <CodegenCx<'gcc, 'tcx> as BackendTypes>::DILocation;
 +    type DIVariable = <CodegenCx<'gcc, 'tcx> as BackendTypes>::DIVariable;
 +}
 +
 +impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
 +    fn build(cx: &'a CodegenCx<'gcc, 'tcx>, block: Block<'gcc>) -> Self {
 +        let mut bx = Builder::with_cx(cx);
 +        *cx.current_block.borrow_mut() = Some(block);
 +        bx.block = Some(block);
 +        bx
 +    }
 +
 +    fn build_sibling_block(&mut self, name: &str) -> Self {
 +        let block = self.append_sibling_block(name);
 +        Self::build(self.cx, block)
 +    }
 +
 +    fn llbb(&self) -> Block<'gcc> {
 +        self.block.expect("block")
 +    }
 +
 +    fn append_block(cx: &'a CodegenCx<'gcc, 'tcx>, func: RValue<'gcc>, name: &str) -> Block<'gcc> {
 +        let func = cx.rvalue_as_function(func);
 +        func.new_block(name)
 +    }
 +
 +    fn append_sibling_block(&mut self, name: &str) -> Block<'gcc> {
 +        let func = self.current_func();
 +        func.new_block(name)
 +    }
 +
 +    fn ret_void(&mut self) {
 +        self.llbb().end_with_void_return(None)
 +    }
 +
 +    fn ret(&mut self, value: RValue<'gcc>) {
 +        let value =
 +            if self.structs_as_pointer.borrow().contains(&value) {
 +                // NOTE: hack to workaround a limitation of the rustc API: see comment on
 +                // CodegenCx.structs_as_pointer
 +                value.dereference(None).to_rvalue()
 +            }
 +            else {
 +                value
 +            };
 +        self.llbb().end_with_return(None, value);
 +    }
 +
 +    fn br(&mut self, dest: Block<'gcc>) {
 +        self.llbb().end_with_jump(None, dest)
 +    }
 +
 +    fn cond_br(&mut self, cond: RValue<'gcc>, then_block: Block<'gcc>, else_block: Block<'gcc>) {
 +        self.llbb().end_with_conditional(None, cond, then_block, else_block)
 +    }
 +
 +    fn switch(&mut self, value: RValue<'gcc>, default_block: Block<'gcc>, cases: impl ExactSizeIterator<Item = (u128, Block<'gcc>)>) {
 +        let mut gcc_cases = vec![];
 +        let typ = self.val_ty(value);
 +        for (on_val, dest) in cases {
 +            let on_val = self.const_uint_big(typ, on_val);
 +            gcc_cases.push(self.context.new_case(on_val, on_val, dest));
 +        }
 +        self.block.expect("block").end_with_switch(None, value, default_block, &gcc_cases);
 +    }
 +
-                     let llptr = self.struct_gep(place.llval, i as u64);
++    fn invoke(&mut self, _typ: Type<'gcc>, _func: RValue<'gcc>, _args: &[RValue<'gcc>], then: Block<'gcc>, catch: Block<'gcc>, _funclet: Option<&Funclet>) -> RValue<'gcc> {
++        let condition = self.context.new_rvalue_from_int(self.bool_type, 0);
++        self.llbb().end_with_conditional(None, condition, then, catch);
++        self.context.new_rvalue_from_int(self.int_type, 0)
++
++        // TODO
 +        /*debug!("invoke {:?} with args ({:?})", func, args);
 +
 +        let args = self.check_call("invoke", func, args);
 +        let bundle = funclet.map(|funclet| funclet.bundle());
 +        let bundle = bundle.as_ref().map(|b| &*b.raw);
 +
 +        unsafe {
 +            llvm::LLVMRustBuildInvoke(
 +                self.llbuilder,
 +                func,
 +                args.as_ptr(),
 +                args.len() as c_uint,
 +                then,
 +                catch,
 +                bundle,
 +                UNNAMED,
 +            )
 +        }*/
 +    }
 +
 +    fn unreachable(&mut self) {
 +        let func = self.context.get_builtin_function("__builtin_unreachable");
 +        let block = self.block.expect("block");
 +        block.add_eval(None, self.context.new_call(None, func, &[]));
 +        let return_type = block.get_function().get_return_type();
 +        let void_type = self.context.new_type::<()>();
 +        if return_type == void_type {
 +            block.end_with_void_return(None)
 +        }
 +        else {
 +            let return_value = self.current_func()
 +                .new_local(None, return_type, "unreachableReturn");
 +            block.end_with_return(None, return_value)
 +        }
 +    }
 +
 +    fn add(&mut self, a: RValue<'gcc>, mut b: RValue<'gcc>) -> RValue<'gcc> {
 +        // FIXME: this should not be required.
 +        if format!("{:?}", a.get_type()) != format!("{:?}", b.get_type()) {
 +            b = self.context.new_cast(None, b, a.get_type());
 +        }
 +        a + b
 +    }
 +
 +    fn fadd(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
 +        a + b
 +    }
 +
 +    fn sub(&mut self, a: RValue<'gcc>, mut b: RValue<'gcc>) -> RValue<'gcc> {
 +        if a.get_type() != b.get_type() {
 +            b = self.context.new_cast(None, b, a.get_type());
 +        }
 +        a - b
 +    }
 +
 +    fn fsub(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
 +        a - b
 +    }
 +
 +    fn mul(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
 +        a * b
 +    }
 +
 +    fn fmul(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
 +        a * b
 +    }
 +
 +    fn udiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
 +        // TODO: convert the arguments to unsigned?
 +        a / b
 +    }
 +
 +    fn exactudiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
 +        // TODO: convert the arguments to unsigned?
 +        // TODO: poison if not exact.
 +        a / b
 +    }
 +
 +    fn sdiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
 +        // TODO: convert the arguments to signed?
 +        a / b
 +    }
 +
 +    fn exactsdiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
 +        // TODO: posion if not exact.
 +        // FIXME: rustc_codegen_ssa::mir::intrinsic uses different types for a and b but they
 +        // should be the same.
 +        let typ = a.get_type().to_signed(self);
 +        let a = self.context.new_cast(None, a, typ);
 +        let b = self.context.new_cast(None, b, typ);
 +        a / b
 +    }
 +
 +    fn fdiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
 +        a / b
 +    }
 +
 +    fn urem(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
 +        a % b
 +    }
 +
 +    fn srem(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
 +        a % b
 +    }
 +
 +    fn frem(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
 +        if a.get_type() == self.cx.float_type {
 +            let fmodf = self.context.get_builtin_function("fmodf");
 +            // FIXME: this seems to produce the wrong result.
 +            return self.context.new_call(None, fmodf, &[a, b]);
 +        }
 +        assert_eq!(a.get_type(), self.cx.double_type);
 +
 +        let fmod = self.context.get_builtin_function("fmod");
 +        return self.context.new_call(None, fmod, &[a, b]);
 +    }
 +
 +    fn shl(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
 +        // FIXME: remove the casts when libgccjit can shift an unsigned number by an unsigned number.
 +        let a_type = a.get_type();
 +        let b_type = b.get_type();
 +        if a_type.is_unsigned(self) && b_type.is_signed(self) {
 +            //println!("shl: {:?} -> {:?}", a, b_type);
 +            let a = self.context.new_cast(None, a, b_type);
 +            let result = a << b;
 +            //println!("shl: {:?} -> {:?}", result, a_type);
 +            self.context.new_cast(None, result, a_type)
 +        }
 +        else if a_type.is_signed(self) && b_type.is_unsigned(self) {
 +            //println!("shl: {:?} -> {:?}", b, a_type);
 +            let b = self.context.new_cast(None, b, a_type);
 +            a << b
 +        }
 +        else {
 +            a << b
 +        }
 +    }
 +
 +    fn lshr(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
 +        // FIXME: remove the casts when libgccjit can shift an unsigned number by an unsigned number.
 +        // TODO: cast to unsigned to do a logical shift if that does not work.
 +        let a_type = a.get_type();
 +        let b_type = b.get_type();
 +        if a_type.is_unsigned(self) && b_type.is_signed(self) {
 +            //println!("lshl: {:?} -> {:?}", a, b_type);
 +            let a = self.context.new_cast(None, a, b_type);
 +            let result = a >> b;
 +            //println!("lshl: {:?} -> {:?}", result, a_type);
 +            self.context.new_cast(None, result, a_type)
 +        }
 +        else if a_type.is_signed(self) && b_type.is_unsigned(self) {
 +            //println!("lshl: {:?} -> {:?}", b, a_type);
 +            let b = self.context.new_cast(None, b, a_type);
 +            a >> b
 +        }
 +        else {
 +            a >> b
 +        }
 +    }
 +
 +    fn ashr(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
 +        // TODO: check whether behavior is an arithmetic shift for >> .
 +        // FIXME: remove the casts when libgccjit can shift an unsigned number by an unsigned number.
 +        let a_type = a.get_type();
 +        let b_type = b.get_type();
 +        if a_type.is_unsigned(self) && b_type.is_signed(self) {
 +            //println!("ashl: {:?} -> {:?}", a, b_type);
 +            let a = self.context.new_cast(None, a, b_type);
 +            let result = a >> b;
 +            //println!("ashl: {:?} -> {:?}", result, a_type);
 +            self.context.new_cast(None, result, a_type)
 +        }
 +        else if a_type.is_signed(self) && b_type.is_unsigned(self) {
 +            //println!("ashl: {:?} -> {:?}", b, a_type);
 +            let b = self.context.new_cast(None, b, a_type);
 +            a >> b
 +        }
 +        else {
 +            a >> b
 +        }
 +    }
 +
 +    fn and(&mut self, a: RValue<'gcc>, mut b: RValue<'gcc>) -> RValue<'gcc> {
 +        // FIXME: hack by putting the result in a variable to workaround this bug:
 +        // https://gcc.gnu.org/bugzilla//show_bug.cgi?id=95498
 +        if a.get_type() != b.get_type() {
 +            b = self.context.new_cast(None, b, a.get_type());
 +        }
 +        let res = self.current_func().new_local(None, b.get_type(), "andResult");
 +        self.llbb().add_assignment(None, res, a & b);
 +        res.to_rvalue()
 +    }
 +
 +    fn or(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
 +        // FIXME: hack by putting the result in a variable to workaround this bug:
 +        // https://gcc.gnu.org/bugzilla//show_bug.cgi?id=95498
 +        let res = self.current_func().new_local(None, b.get_type(), "orResult");
 +        self.llbb().add_assignment(None, res, a | b);
 +        res.to_rvalue()
 +    }
 +
 +    fn xor(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
 +        a ^ b
 +    }
 +
 +    fn neg(&mut self, a: RValue<'gcc>) -> RValue<'gcc> {
 +        // TODO: use new_unary_op()?
 +        self.cx.context.new_rvalue_from_long(a.get_type(), 0) - a
 +    }
 +
 +    fn fneg(&mut self, a: RValue<'gcc>) -> RValue<'gcc> {
 +        self.cx.context.new_unary_op(None, UnaryOp::Minus, a.get_type(), a)
 +    }
 +
 +    fn not(&mut self, a: RValue<'gcc>) -> RValue<'gcc> {
 +        let operation =
 +            if a.get_type().is_bool() {
 +                UnaryOp::LogicalNegate
 +            }
 +            else {
 +                UnaryOp::BitwiseNegate
 +            };
 +        self.cx.context.new_unary_op(None, operation, a.get_type(), a)
 +    }
 +
 +    fn unchecked_sadd(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
 +        a + b
 +    }
 +
 +    fn unchecked_uadd(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
 +        a + b
 +    }
 +
 +    fn unchecked_ssub(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
 +        a - b
 +    }
 +
 +    fn unchecked_usub(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
 +        // TODO: should generate poison value?
 +        a - b
 +    }
 +
 +    fn unchecked_smul(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
 +        a * b
 +    }
 +
 +    fn unchecked_umul(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
 +        a * b
 +    }
 +
 +    fn fadd_fast(&mut self, _lhs: RValue<'gcc>, _rhs: RValue<'gcc>) -> RValue<'gcc> {
 +        unimplemented!();
 +        /*unsafe {
 +            let instr = llvm::LLVMBuildFAdd(self.llbuilder, lhs, rhs, UNNAMED);
 +            llvm::LLVMRustSetHasUnsafeAlgebra(instr);
 +            instr
 +        }*/
 +    }
 +
 +    fn fsub_fast(&mut self, _lhs: RValue<'gcc>, _rhs: RValue<'gcc>) -> RValue<'gcc> {
 +        unimplemented!();
 +        /*unsafe {
 +            let instr = llvm::LLVMBuildFSub(self.llbuilder, lhs, rhs, UNNAMED);
 +            llvm::LLVMRustSetHasUnsafeAlgebra(instr);
 +            instr
 +        }*/
 +    }
 +
 +    fn fmul_fast(&mut self, _lhs: RValue<'gcc>, _rhs: RValue<'gcc>) -> RValue<'gcc> {
 +        unimplemented!();
 +        /*unsafe {
 +            let instr = llvm::LLVMBuildFMul(self.llbuilder, lhs, rhs, UNNAMED);
 +            llvm::LLVMRustSetHasUnsafeAlgebra(instr);
 +            instr
 +        }*/
 +    }
 +
 +    fn fdiv_fast(&mut self, _lhs: RValue<'gcc>, _rhs: RValue<'gcc>) -> RValue<'gcc> {
 +        unimplemented!();
 +        /*unsafe {
 +            let instr = llvm::LLVMBuildFDiv(self.llbuilder, lhs, rhs, UNNAMED);
 +            llvm::LLVMRustSetHasUnsafeAlgebra(instr);
 +            instr
 +        }*/
 +    }
 +
 +    fn frem_fast(&mut self, _lhs: RValue<'gcc>, _rhs: RValue<'gcc>) -> RValue<'gcc> {
 +        unimplemented!();
 +        /*unsafe {
 +            let instr = llvm::LLVMBuildFRem(self.llbuilder, lhs, rhs, UNNAMED);
 +            llvm::LLVMRustSetHasUnsafeAlgebra(instr);
 +            instr
 +        }*/
 +    }
 +
 +    fn checked_binop(&mut self, oop: OverflowOp, typ: Ty<'_>, lhs: Self::Value, rhs: Self::Value) -> (Self::Value, Self::Value) {
 +        use rustc_middle::ty::{Int, IntTy::*, Uint, UintTy::*};
 +
 +        let new_kind =
 +            match typ.kind() {
 +                Int(t @ Isize) => Int(t.normalize(self.tcx.sess.target.pointer_width)),
 +                Uint(t @ Usize) => Uint(t.normalize(self.tcx.sess.target.pointer_width)),
 +                t @ (Uint(_) | Int(_)) => t.clone(),
 +                _ => panic!("tried to get overflow intrinsic for op applied to non-int type"),
 +            };
 +
 +        // TODO: remove duplication with intrinsic?
 +        let name =
 +            match oop {
 +                OverflowOp::Add =>
 +                    match new_kind {
 +                        Int(I8) => "__builtin_add_overflow",
 +                        Int(I16) => "__builtin_add_overflow",
 +                        Int(I32) => "__builtin_sadd_overflow",
 +                        Int(I64) => "__builtin_saddll_overflow",
 +                        Int(I128) => "__builtin_add_overflow",
 +
 +                        Uint(U8) => "__builtin_add_overflow",
 +                        Uint(U16) => "__builtin_add_overflow",
 +                        Uint(U32) => "__builtin_uadd_overflow",
 +                        Uint(U64) => "__builtin_uaddll_overflow",
 +                        Uint(U128) => "__builtin_add_overflow",
 +
 +                        _ => unreachable!(),
 +                    },
 +                OverflowOp::Sub =>
 +                    match new_kind {
 +                        Int(I8) => "__builtin_sub_overflow",
 +                        Int(I16) => "__builtin_sub_overflow",
 +                        Int(I32) => "__builtin_ssub_overflow",
 +                        Int(I64) => "__builtin_ssubll_overflow",
 +                        Int(I128) => "__builtin_sub_overflow",
 +
 +                        Uint(U8) => "__builtin_sub_overflow",
 +                        Uint(U16) => "__builtin_sub_overflow",
 +                        Uint(U32) => "__builtin_usub_overflow",
 +                        Uint(U64) => "__builtin_usubll_overflow",
 +                        Uint(U128) => "__builtin_sub_overflow",
 +
 +                        _ => unreachable!(),
 +                    },
 +                OverflowOp::Mul =>
 +                    match new_kind {
 +                        Int(I8) => "__builtin_mul_overflow",
 +                        Int(I16) => "__builtin_mul_overflow",
 +                        Int(I32) => "__builtin_smul_overflow",
 +                        Int(I64) => "__builtin_smulll_overflow",
 +                        Int(I128) => "__builtin_mul_overflow",
 +
 +                        Uint(U8) => "__builtin_mul_overflow",
 +                        Uint(U16) => "__builtin_mul_overflow",
 +                        Uint(U32) => "__builtin_umul_overflow",
 +                        Uint(U64) => "__builtin_umulll_overflow",
 +                        Uint(U128) => "__builtin_mul_overflow",
 +
 +                        _ => unreachable!(),
 +                    },
 +            };
 +
 +        let intrinsic = self.context.get_builtin_function(&name);
 +        let res = self.current_func()
 +            // TODO: is it correct to use rhs type instead of the parameter typ?
 +            .new_local(None, rhs.get_type(), "binopResult")
 +            .get_address(None);
 +        let overflow = self.overflow_call(intrinsic, &[lhs, rhs, res], None);
 +        (res.dereference(None).to_rvalue(), overflow)
 +    }
 +
 +    fn alloca(&mut self, ty: Type<'gcc>, align: Align) -> RValue<'gcc> {
 +        // FIXME: this check that we don't call get_aligned() a second time on a time.
 +        // Ideally, we shouldn't need to do this check.
 +        let aligned_type =
 +            if ty == self.cx.u128_type || ty == self.cx.i128_type {
 +                ty
 +            }
 +            else {
 +                ty.get_aligned(align.bytes())
 +            };
 +        // TODO: It might be better to return a LValue, but fixing the rustc API is non-trivial.
 +        self.stack_var_count.set(self.stack_var_count.get() + 1);
 +        self.current_func().new_local(None, aligned_type, &format!("stack_var_{}", self.stack_var_count.get())).get_address(None)
 +    }
 +
 +    fn dynamic_alloca(&mut self, _ty: Type<'gcc>, _align: Align) -> RValue<'gcc> {
 +        unimplemented!();
 +        /*unsafe {
 +            let alloca = llvm::LLVMBuildAlloca(self.llbuilder, ty, UNNAMED);
 +            llvm::LLVMSetAlignment(alloca, align.bytes() as c_uint);
 +            alloca
 +        }*/
 +    }
 +
 +    fn array_alloca(&mut self, _ty: Type<'gcc>, _len: RValue<'gcc>, _align: Align) -> RValue<'gcc> {
 +        unimplemented!();
 +        /*unsafe {
 +            let alloca = llvm::LLVMBuildArrayAlloca(self.llbuilder, ty, len, UNNAMED);
 +            llvm::LLVMSetAlignment(alloca, align.bytes() as c_uint);
 +            alloca
 +        }*/
 +    }
 +
 +    fn load(&mut self, _ty: Type<'gcc>, ptr: RValue<'gcc>, _align: Align) -> RValue<'gcc> {
 +        // TODO: use ty.
 +        let block = self.llbb();
 +        let function = block.get_function();
 +        // NOTE: instead of returning the dereference here, we have to assign it to a variable in
 +        // the current basic block. Otherwise, it could be used in another basic block, causing a
 +        // dereference after a drop, for instance.
 +        // TODO: handle align.
 +        let deref = ptr.dereference(None).to_rvalue();
 +        let value_type = deref.get_type();
 +        unsafe { RETURN_VALUE_COUNT += 1 };
 +        let loaded_value = function.new_local(None, value_type, &format!("loadedValue{}", unsafe { RETURN_VALUE_COUNT }));
 +        block.add_assignment(None, loaded_value, deref);
 +        loaded_value.to_rvalue()
 +    }
 +
 +    fn volatile_load(&mut self, _ty: Type<'gcc>, ptr: RValue<'gcc>) -> RValue<'gcc> {
 +        // TODO: use ty.
 +        //println!("5: volatile load: {:?} to {:?}", ptr, ptr.get_type().make_volatile());
 +        let ptr = self.context.new_cast(None, ptr, ptr.get_type().make_volatile());
 +        //println!("6");
 +        ptr.dereference(None).to_rvalue()
 +    }
 +
 +    fn atomic_load(&mut self, _ty: Type<'gcc>, ptr: RValue<'gcc>, order: AtomicOrdering, size: Size) -> RValue<'gcc> {
 +        // TODO: use ty.
 +        // TODO: handle alignment.
 +        let atomic_load = self.context.get_builtin_function(&format!("__atomic_load_{}", size.bytes()));
 +        let ordering = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc());
 +
 +        let volatile_const_void_ptr_type = self.context.new_type::<*mut ()>().make_const().make_volatile();
 +        let ptr = self.context.new_cast(None, ptr, volatile_const_void_ptr_type);
 +        self.context.new_call(None, atomic_load, &[ptr, ordering])
 +    }
 +
 +    fn load_operand(&mut self, place: PlaceRef<'tcx, RValue<'gcc>>) -> OperandRef<'tcx, RValue<'gcc>> {
 +        //debug!("PlaceRef::load: {:?}", place);
 +
 +        assert_eq!(place.llextra.is_some(), place.layout.is_unsized());
 +
 +        if place.layout.is_zst() {
 +            return OperandRef::new_zst(self, place.layout);
 +        }
 +
 +        fn scalar_load_metadata<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, load: RValue<'gcc>, scalar: &abi::Scalar) {
 +            let vr = scalar.valid_range.clone();
 +            match scalar.value {
 +                abi::Int(..) => {
 +                    let range = scalar.valid_range_exclusive(bx);
 +                    if range.start != range.end {
 +                        bx.range_metadata(load, range);
 +                    }
 +                }
 +                abi::Pointer if vr.start() < vr.end() && !vr.contains(&0) => {
 +                    bx.nonnull_metadata(load);
 +                }
 +                _ => {}
 +            }
 +        }
 +
 +        let val =
 +            if let Some(llextra) = place.llextra {
 +                OperandValue::Ref(place.llval, Some(llextra), place.align)
 +            }
 +            else if place.layout.is_gcc_immediate() {
 +                let const_llval = None;
 +                /*unsafe {
 +                    if let Some(global) = llvm::LLVMIsAGlobalVariable(place.llval) {
 +                        if llvm::LLVMIsGlobalConstant(global) == llvm::True {
 +                            const_llval = llvm::LLVMGetInitializer(global);
 +                        }
 +                    }
 +                }*/
 +                let llval = const_llval.unwrap_or_else(|| {
 +                    let load = self.load(place.llval.get_type(), place.llval, place.align);
 +                    if let abi::Abi::Scalar(ref scalar) = place.layout.abi {
 +                        scalar_load_metadata(self, load, scalar);
 +                    }
 +                    load
 +                });
 +                OperandValue::Immediate(self.to_immediate(llval, place.layout))
 +            }
 +            else if let abi::Abi::ScalarPair(ref a, ref b) = place.layout.abi {
 +                let b_offset = a.value.size(self).align_to(b.value.align(self).abi);
++                let pair_type = place.layout.gcc_type(self, false);
 +
 +                let mut load = |i, scalar: &abi::Scalar, align| {
-         let next = body_bx.inbounds_gep(current.to_rvalue(), &[self.const_usize(1)]);
++                    let llptr = self.struct_gep(pair_type, place.llval, i as u64);
 +                    let load = self.load(llptr.get_type(), llptr, align);
 +                    scalar_load_metadata(self, load, scalar);
 +                    if scalar.is_bool() { self.trunc(load, self.type_i1()) } else { load }
 +                };
 +
 +                OperandValue::Pair(
 +                    load(0, a, place.align),
 +                    load(1, b, place.align.restrict_for_offset(b_offset)),
 +                )
 +            }
 +            else {
 +                OperandValue::Ref(place.llval, None, place.align)
 +            };
 +
 +        OperandRef { val, layout: place.layout }
 +    }
 +
 +    fn write_operand_repeatedly(mut self, cg_elem: OperandRef<'tcx, RValue<'gcc>>, count: u64, dest: PlaceRef<'tcx, RValue<'gcc>>) -> Self {
 +        let zero = self.const_usize(0);
 +        let count = self.const_usize(count);
 +        let start = dest.project_index(&mut self, zero).llval;
 +        let end = dest.project_index(&mut self, count).llval;
 +
 +        let mut header_bx = self.build_sibling_block("repeat_loop_header");
 +        let mut body_bx = self.build_sibling_block("repeat_loop_body");
 +        let next_bx = self.build_sibling_block("repeat_loop_next");
 +
 +        let ptr_type = start.get_type();
 +        let current = self.llbb().get_function().new_local(None, ptr_type, "loop_var");
 +        let current_val = current.to_rvalue();
 +        self.assign(current, start);
 +
 +        self.br(header_bx.llbb());
 +
 +        let keep_going = header_bx.icmp(IntPredicate::IntNE, current_val, end);
 +        header_bx.cond_br(keep_going, body_bx.llbb(), next_bx.llbb());
 +
 +        let align = dest.align.restrict_for_offset(dest.layout.field(self.cx(), 0).size);
 +        cg_elem.val.store(&mut body_bx, PlaceRef::new_sized_aligned(current_val, cg_elem.layout, align));
 +
-     fn gep(&mut self, ptr: RValue<'gcc>, indices: &[RValue<'gcc>]) -> RValue<'gcc> {
++        let next = body_bx.inbounds_gep(self.backend_type(cg_elem.layout), current.to_rvalue(), &[self.const_usize(1)]);
 +        body_bx.llbb().add_assignment(None, current, next);
 +        body_bx.br(header_bx.llbb());
 +
 +        next_bx
 +    }
 +
 +    fn range_metadata(&mut self, _load: RValue<'gcc>, _range: Range<u128>) {
 +        // TODO
 +        /*if self.sess().target.target.arch == "amdgpu" {
 +            // amdgpu/LLVM does something weird and thinks a i64 value is
 +            // split into a v2i32, halving the bitwidth LLVM expects,
 +            // tripping an assertion. So, for now, just disable this
 +            // optimization.
 +            return;
 +        }
 +
 +        unsafe {
 +            let llty = self.cx.val_ty(load);
 +            let v = [
 +                self.cx.const_uint_big(llty, range.start),
 +                self.cx.const_uint_big(llty, range.end),
 +            ];
 +
 +            llvm::LLVMSetMetadata(
 +                load,
 +                llvm::MD_range as c_uint,
 +                llvm::LLVMMDNodeInContext(self.cx.llcx, v.as_ptr(), v.len() as c_uint),
 +            );
 +        }*/
 +    }
 +
 +    fn nonnull_metadata(&mut self, _load: RValue<'gcc>) {
 +        // TODO
 +        /*unsafe {
 +            llvm::LLVMSetMetadata(
 +                load,
 +                llvm::MD_nonnull as c_uint,
 +                llvm::LLVMMDNodeInContext(self.cx.llcx, ptr::null(), 0),
 +            );
 +        }*/
 +    }
 +
 +    fn store(&mut self, val: RValue<'gcc>, ptr: RValue<'gcc>, align: Align) -> RValue<'gcc> {
 +        self.store_with_flags(val, ptr, align, MemFlags::empty())
 +    }
 +
 +    fn store_with_flags(&mut self, val: RValue<'gcc>, ptr: RValue<'gcc>, _align: Align, _flags: MemFlags) -> RValue<'gcc> {
 +        //debug!("Store {:?} -> {:?} ({:?})", val, ptr, flags);
 +        let ptr = self.check_store(val, ptr);
 +        self.llbb().add_assignment(None, ptr.dereference(None), val);
 +        /*let align =
 +            if flags.contains(MemFlags::UNALIGNED) { 1 } else { align.bytes() as c_uint };
 +        llvm::LLVMSetAlignment(store, align);
 +        if flags.contains(MemFlags::VOLATILE) {
 +            llvm::LLVMSetVolatile(store, llvm::True);
 +        }
 +        if flags.contains(MemFlags::NONTEMPORAL) {
 +            // According to LLVM [1] building a nontemporal store must
 +            // *always* point to a metadata value of the integer 1.
 +            //
 +            // [1]: http://llvm.org/docs/LangRef.html#store-instruction
 +            let one = self.cx.const_i32(1);
 +            let node = llvm::LLVMMDNodeInContext(self.cx.llcx, &one, 1);
 +            llvm::LLVMSetMetadata(store, llvm::MD_nontemporal as c_uint, node);
 +        }*/
 +        // NOTE: dummy value here since it's never used. FIXME: API should not return a value here?
 +        self.cx.context.new_rvalue_zero(self.type_i32())
 +    }
 +
 +    fn atomic_store(&mut self, value: RValue<'gcc>, ptr: RValue<'gcc>, order: AtomicOrdering, size: Size) {
 +        // TODO: handle alignment.
 +        let atomic_store = self.context.get_builtin_function(&format!("__atomic_store_{}", size.bytes()));
 +        let ordering = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc());
 +        let volatile_const_void_ptr_type = self.context.new_type::<*mut ()>().make_const().make_volatile();
 +        let ptr = self.context.new_cast(None, ptr, volatile_const_void_ptr_type);
 +
 +        // FIXME: fix libgccjit to allow comparing an integer type with an aligned integer type because
 +        // the following cast is required to avoid this error:
 +        // gcc_jit_context_new_call: mismatching types for argument 2 of function "__atomic_store_4": assignment to param arg1 (type: int) from loadedValue3577 (type: unsigned int  __attribute__((aligned(4))))
 +        let int_type = atomic_store.get_param(1).to_rvalue().get_type();
 +        let value = self.context.new_cast(None, value, int_type);
 +        self.llbb()
 +            .add_eval(None, self.context.new_call(None, atomic_store, &[ptr, value, ordering]));
 +    }
 +
-     fn inbounds_gep(&mut self, ptr: RValue<'gcc>, indices: &[RValue<'gcc>]) -> RValue<'gcc> {
++    fn gep(&mut self, _typ: Type<'gcc>, ptr: RValue<'gcc>, indices: &[RValue<'gcc>]) -> RValue<'gcc> {
 +        let mut result = ptr;
 +        for index in indices {
 +            result = self.context.new_array_access(None, result, *index).get_address(None).to_rvalue();
 +        }
 +        result
 +    }
 +
-     fn struct_gep(&mut self, ptr: RValue<'gcc>, idx: u64) -> RValue<'gcc> {
++    fn inbounds_gep(&mut self, _typ: Type<'gcc>, ptr: RValue<'gcc>, indices: &[RValue<'gcc>]) -> RValue<'gcc> {
 +        // FIXME: would be safer if doing the same thing (loop) as gep.
 +        // TODO: specify inbounds somehow.
 +        match indices.len() {
 +            1 => {
 +                self.context.new_array_access(None, ptr, indices[0]).get_address(None)
 +            },
 +            2 => {
 +                let array = ptr.dereference(None); // TODO: assert that first index is 0?
 +                self.context.new_array_access(None, array, indices[1]).get_address(None)
 +            },
 +            _ => unimplemented!(),
 +        }
 +    }
 +
-         let value_type = value.get_type();
++    fn struct_gep(&mut self, value_type: Type<'gcc>, ptr: RValue<'gcc>, idx: u64) -> RValue<'gcc> {
 +        // FIXME: it would be better if the API only called this on struct, not on arrays.
 +        assert_eq!(idx as usize as u64, idx);
 +        let value = ptr.dereference(None).to_rvalue();
-         unimplemented!();
 +
 +        if value_type.is_array().is_some() {
 +            let index = self.context.new_rvalue_from_long(self.u64_type, i64::try_from(idx).expect("i64::try_from"));
 +            let element = self.context.new_array_access(None, value, index);
 +            element.get_address(None)
 +        }
 +        else if let Some(vector_type) = value_type.is_vector() {
 +            let array_type = vector_type.get_element_type().make_pointer();
 +            let array = self.bitcast(ptr, array_type);
 +            let index = self.context.new_rvalue_from_long(self.u64_type, i64::try_from(idx).expect("i64::try_from"));
 +            let element = self.context.new_array_access(None, array, index);
 +            element.get_address(None)
 +        }
 +        else if let Some(struct_type) = value_type.is_struct() {
 +            ptr.dereference_field(None, struct_type.get_field(idx as i32)).get_address(None)
 +        }
 +        else {
 +            panic!("Unexpected type {:?}", value_type);
 +        }
 +    }
 +
 +    /* Casts */
 +    fn trunc(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
 +        // TODO: check that it indeed truncate the value.
 +        //println!("trunc: {:?} -> {:?}", value, dest_ty);
 +        self.context.new_cast(None, value, dest_ty)
 +    }
 +
 +    fn sext(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
 +        // TODO: check that it indeed sign extend the value.
 +        //println!("Sext {:?} to {:?}", value, dest_ty);
 +        //if let Some(vector_type) = value.get_type().is_vector() {
 +        if dest_ty.is_vector().is_some() {
 +            // TODO: nothing to do as it is only for LLVM?
 +            return value;
 +            /*let dest_type = self.context.new_vector_type(dest_ty, vector_type.get_num_units() as u64);
 +            println!("Casting {:?} to {:?}", value, dest_type);
 +            return self.context.new_cast(None, value, dest_type);*/
 +        }
 +        self.context.new_cast(None, value, dest_ty)
 +    }
 +
 +    fn fptoui(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
 +        //println!("7: fptoui: {:?} to {:?}", value, dest_ty);
 +        let ret = self.context.new_cast(None, value, dest_ty);
 +        //println!("8");
 +        ret
 +        //unsafe { llvm::LLVMBuildFPToUI(self.llbuilder, val, dest_ty, UNNAMED) }
 +    }
 +
 +    fn fptosi(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
 +        self.context.new_cast(None, value, dest_ty)
 +    }
 +
 +    fn uitofp(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
 +        //println!("1: uitofp: {:?} -> {:?}", value, dest_ty);
 +        let ret = self.context.new_cast(None, value, dest_ty);
 +        //println!("2");
 +        ret
 +    }
 +
 +    fn sitofp(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
 +        //println!("3: sitofp: {:?} -> {:?}", value, dest_ty);
 +        let ret = self.context.new_cast(None, value, dest_ty);
 +        //println!("4");
 +        ret
 +    }
 +
 +    fn fptrunc(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
 +        // TODO: make sure it trancates.
 +        self.context.new_cast(None, value, dest_ty)
 +    }
 +
 +    fn fpext(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
 +        self.context.new_cast(None, value, dest_ty)
 +    }
 +
 +    fn ptrtoint(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
 +        self.cx.ptrtoint(self.block.expect("block"), value, dest_ty)
 +    }
 +
 +    fn inttoptr(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
 +        self.cx.inttoptr(self.block.expect("block"), value, dest_ty)
 +    }
 +
 +    fn bitcast(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
 +        self.cx.const_bitcast(value, dest_ty)
 +    }
 +
 +    fn intcast(&mut self, value: RValue<'gcc>, dest_typ: Type<'gcc>, _is_signed: bool) -> RValue<'gcc> {
 +        // NOTE: is_signed is for value, not dest_typ.
 +        //println!("intcast: {:?} ({:?}) -> {:?}", value, value.get_type(), dest_typ);
 +        self.cx.context.new_cast(None, value, dest_typ)
 +    }
 +
 +    fn pointercast(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
 +        //println!("pointercast: {:?} ({:?}) -> {:?}", value, value.get_type(), dest_ty);
 +        let val_type = value.get_type();
 +        match (type_is_pointer(val_type), type_is_pointer(dest_ty)) {
 +            (false, true) => {
 +                // NOTE: Projecting a field of a pointer type will attemp a cast from a signed char to
 +                // a pointer, which is not supported by gccjit.
 +                return self.cx.context.new_cast(None, self.inttoptr(value, val_type.make_pointer()), dest_ty);
 +            },
 +            (false, false) => {
 +                // When they are not pointers, we want a transmute (or reinterpret_cast).
 +                //self.cx.context.new_cast(None, value, dest_ty)
 +                self.bitcast(value, dest_ty)
 +            },
 +            (true, true) => self.cx.context.new_cast(None, value, dest_ty),
 +            (true, false) => unimplemented!(),
 +        }
 +    }
 +
 +    /* Comparisons */
 +    fn icmp(&mut self, op: IntPredicate, lhs: RValue<'gcc>, mut rhs: RValue<'gcc>) -> RValue<'gcc> {
 +        if lhs.get_type() != rhs.get_type() {
 +            // NOTE: hack because we try to cast a vector type to the same vector type.
 +            if format!("{:?}", lhs.get_type()) != format!("{:?}", rhs.get_type()) {
 +                rhs = self.context.new_cast(None, rhs, lhs.get_type());
 +            }
 +        }
 +        self.context.new_comparison(None, op.to_gcc_comparison(), lhs, rhs)
 +    }
 +
 +    fn fcmp(&mut self, op: RealPredicate, lhs: RValue<'gcc>, rhs: RValue<'gcc>) -> RValue<'gcc> {
 +        self.context.new_comparison(None, op.to_gcc_comparison(), lhs, rhs)
 +    }
 +
 +    /* Miscellaneous instructions */
 +    fn memcpy(&mut self, dst: RValue<'gcc>, dst_align: Align, src: RValue<'gcc>, src_align: Align, size: RValue<'gcc>, flags: MemFlags) {
 +        if flags.contains(MemFlags::NONTEMPORAL) {
 +            // HACK(nox): This is inefficient but there is no nontemporal memcpy.
 +            let val = self.load(src.get_type(), src, src_align);
 +            let ptr = self.pointercast(dst, self.type_ptr_to(self.val_ty(val)));
 +            self.store_with_flags(val, ptr, dst_align, flags);
 +            return;
 +        }
 +        let size = self.intcast(size, self.type_size_t(), false);
 +        let _is_volatile = flags.contains(MemFlags::VOLATILE);
 +        let dst = self.pointercast(dst, self.type_i8p());
 +        let src = self.pointercast(src, self.type_ptr_to(self.type_void()));
 +        let memcpy = self.context.get_builtin_function("memcpy");
 +        let block = self.block.expect("block");
 +        // TODO: handle aligns and is_volatile.
 +        block.add_eval(None, self.context.new_call(None, memcpy, &[dst, src, size]));
 +    }
 +
 +    fn memmove(&mut self, dst: RValue<'gcc>, dst_align: Align, src: RValue<'gcc>, src_align: Align, size: RValue<'gcc>, flags: MemFlags) {
 +        if flags.contains(MemFlags::NONTEMPORAL) {
 +            // HACK(nox): This is inefficient but there is no nontemporal memmove.
 +            let val = self.load(src.get_type(), src, src_align);
 +            let ptr = self.pointercast(dst, self.type_ptr_to(self.val_ty(val)));
 +            self.store_with_flags(val, ptr, dst_align, flags);
 +            return;
 +        }
 +        let size = self.intcast(size, self.type_size_t(), false);
 +        let _is_volatile = flags.contains(MemFlags::VOLATILE);
 +        let dst = self.pointercast(dst, self.type_i8p());
 +        let src = self.pointercast(src, self.type_ptr_to(self.type_void()));
 +
 +        let memmove = self.context.get_builtin_function("memmove");
 +        let block = self.block.expect("block");
 +        // TODO: handle is_volatile.
 +        block.add_eval(None, self.context.new_call(None, memmove, &[dst, src, size]));
 +    }
 +
 +    fn memset(&mut self, ptr: RValue<'gcc>, fill_byte: RValue<'gcc>, size: RValue<'gcc>, _align: Align, flags: MemFlags) {
 +        let _is_volatile = flags.contains(MemFlags::VOLATILE);
 +        let ptr = self.pointercast(ptr, self.type_i8p());
 +        let memset = self.context.get_builtin_function("memset");
 +        let block = self.block.expect("block");
 +        // TODO: handle aligns and is_volatile.
 +        //println!("memset: {:?} -> {:?}", fill_byte, self.i32_type);
 +        let fill_byte = self.context.new_cast(None, fill_byte, self.i32_type);
 +        let size = self.intcast(size, self.type_size_t(), false);
 +        block.add_eval(None, self.context.new_call(None, memset, &[ptr, fill_byte, size]));
 +    }
 +
 +    fn select(&mut self, cond: RValue<'gcc>, then_val: RValue<'gcc>, mut else_val: RValue<'gcc>) -> RValue<'gcc> {
 +        let func = self.current_func();
 +        let variable = func.new_local(None, then_val.get_type(), "selectVar");
 +        let then_block = func.new_block("then");
 +        let else_block = func.new_block("else");
 +        let after_block = func.new_block("after");
 +        self.llbb().end_with_conditional(None, cond, then_block, else_block);
 +
 +        then_block.add_assignment(None, variable, then_val);
 +        then_block.end_with_jump(None, after_block);
 +
 +        if then_val.get_type() != else_val.get_type() {
 +            else_val = self.context.new_cast(None, else_val, then_val.get_type());
 +        }
 +        else_block.add_assignment(None, variable, else_val);
 +        else_block.end_with_jump(None, after_block);
 +
 +        // NOTE: since jumps were added in a place rustc does not expect, the current blocks in the
 +        // state need to be updated.
 +        self.block = Some(after_block);
 +        *self.cx.current_block.borrow_mut() = Some(after_block);
 +
 +        variable.to_rvalue()
 +    }
 +
 +    #[allow(dead_code)]
 +    fn va_arg(&mut self, _list: RValue<'gcc>, _ty: Type<'gcc>) -> RValue<'gcc> {
 +        unimplemented!();
 +        //unsafe { llvm::LLVMBuildVAArg(self.llbuilder, list, ty, UNNAMED) }
 +    }
 +
 +    fn extract_element(&mut self, _vec: RValue<'gcc>, _idx: RValue<'gcc>) -> RValue<'gcc> {
 +        unimplemented!();
 +        //unsafe { llvm::LLVMBuildExtractElement(self.llbuilder, vec, idx, UNNAMED) }
 +    }
 +
 +    fn vector_splat(&mut self, _num_elts: usize, _elt: RValue<'gcc>) -> RValue<'gcc> {
 +        unimplemented!();
 +        /*unsafe {
 +            let elt_ty = self.cx.val_ty(elt);
 +            let undef = llvm::LLVMGetUndef(self.type_vector(elt_ty, num_elts as u64));
 +            let vec = self.insert_element(undef, elt, self.cx.const_i32(0));
 +            let vec_i32_ty = self.type_vector(self.type_i32(), num_elts as u64);
 +            self.shuffle_vector(vec, undef, self.const_null(vec_i32_ty))
 +        }*/
 +    }
 +
 +    fn extract_value(&mut self, aggregate_value: RValue<'gcc>, idx: u64) -> RValue<'gcc> {
 +        // FIXME: it would be better if the API only called this on struct, not on arrays.
 +        assert_eq!(idx as usize as u64, idx);
 +        let value_type = aggregate_value.get_type();
 +
 +        if value_type.is_array().is_some() {
 +            let index = self.context.new_rvalue_from_long(self.u64_type, i64::try_from(idx).expect("i64::try_from"));
 +            let element = self.context.new_array_access(None, aggregate_value, index);
 +            element.get_address(None)
 +        }
 +        else if value_type.is_vector().is_some() {
 +            panic!();
 +        }
 +        else if let Some(pointer_type) = value_type.get_pointee() {
 +            if let Some(struct_type) = pointer_type.is_struct() {
 +                // NOTE: hack to workaround a limitation of the rustc API: see comment on
 +                // CodegenCx.structs_as_pointer
 +                aggregate_value.dereference_field(None, struct_type.get_field(idx as i32)).to_rvalue()
 +            }
 +            else {
 +                panic!("Unexpected type {:?}", value_type);
 +            }
 +        }
 +        else if let Some(struct_type) = value_type.is_struct() {
 +            aggregate_value.access_field(None, struct_type.get_field(idx as i32)).to_rvalue()
 +        }
 +        else {
 +            panic!("Unexpected type {:?}", value_type);
 +        }
 +        /*assert_eq!(idx as c_uint as u64, idx);
 +        unsafe { llvm::LLVMBuildExtractValue(self.llbuilder, agg_val, idx as c_uint, UNNAMED) }*/
 +    }
 +
 +    fn insert_value(&mut self, aggregate_value: RValue<'gcc>, value: RValue<'gcc>, idx: u64) -> RValue<'gcc> {
 +        // FIXME: it would be better if the API only called this on struct, not on arrays.
 +        assert_eq!(idx as usize as u64, idx);
 +        let value_type = aggregate_value.get_type();
 +
 +        let lvalue =
 +            if value_type.is_array().is_some() {
 +                let index = self.context.new_rvalue_from_long(self.u64_type, i64::try_from(idx).expect("i64::try_from"));
 +                self.context.new_array_access(None, aggregate_value, index)
 +            }
 +            else if value_type.is_vector().is_some() {
 +                panic!();
 +            }
 +            else if let Some(pointer_type) = value_type.get_pointee() {
 +                if let Some(struct_type) = pointer_type.is_struct() {
 +                    // NOTE: hack to workaround a limitation of the rustc API: see comment on
 +                    // CodegenCx.structs_as_pointer
 +                    aggregate_value.dereference_field(None, struct_type.get_field(idx as i32))
 +                }
 +                else {
 +                    panic!("Unexpected type {:?}", value_type);
 +                }
 +            }
 +            else {
 +                panic!("Unexpected type {:?}", value_type);
 +            };
 +        self.llbb().add_assignment(None, lvalue, value);
 +
 +        aggregate_value
 +    }
 +
 +    fn landing_pad(&mut self, _ty: Type<'gcc>, _pers_fn: RValue<'gcc>, _num_clauses: usize) -> RValue<'gcc> {
-         unimplemented!();
++        let field1 = self.context.new_field(None, self.u8_type, "landing_pad_field_1");
++        let field2 = self.context.new_field(None, self.i32_type, "landing_pad_field_1");
++        let struct_type = self.context.new_struct_type(None, "landing_pad", &[field1, field2]);
++        self.current_func().new_local(None, struct_type.as_type(), "landing_pad")
++            .to_rvalue()
++        // TODO
 +        /*unsafe {
 +            llvm::LLVMBuildLandingPad(self.llbuilder, ty, pers_fn, num_clauses as c_uint, UNNAMED)
 +        }*/
 +    }
 +
 +    fn set_cleanup(&mut self, _landing_pad: RValue<'gcc>) {
-         unimplemented!();
++        // TODO
 +        /*unsafe {
 +            llvm::LLVMSetCleanup(landing_pad, llvm::True);
 +        }*/
 +    }
 +
 +    fn resume(&mut self, _exn: RValue<'gcc>) -> RValue<'gcc> {
 +        unimplemented!();
 +        //unsafe { llvm::LLVMBuildResume(self.llbuilder, exn) }
 +    }
 +
 +    fn cleanup_pad(&mut self, _parent: Option<RValue<'gcc>>, _args: &[RValue<'gcc>]) -> Funclet {
 +        unimplemented!();
 +        /*let name = const_cstr!("cleanuppad");
 +        let ret = unsafe {
 +            llvm::LLVMRustBuildCleanupPad(
 +                self.llbuilder,
 +                parent,
 +                args.len() as c_uint,
 +                args.as_ptr(),
 +                name.as_ptr(),
 +            )
 +        };
 +        Funclet::new(ret.expect("LLVM does not have support for cleanuppad"))*/
 +    }
 +
 +    fn cleanup_ret(&mut self, _funclet: &Funclet, _unwind: Option<Block<'gcc>>) -> RValue<'gcc> {
 +        unimplemented!();
 +        /*let ret =
 +            unsafe { llvm::LLVMRustBuildCleanupRet(self.llbuilder, funclet.cleanuppad(), unwind) };
 +        ret.expect("LLVM does not have support for cleanupret")*/
 +    }
 +
 +    fn catch_pad(&mut self, _parent: RValue<'gcc>, _args: &[RValue<'gcc>]) -> Funclet {
 +        unimplemented!();
 +        /*let name = const_cstr!("catchpad");
 +        let ret = unsafe {
 +            llvm::LLVMRustBuildCatchPad(
 +                self.llbuilder,
 +                parent,
 +                args.len() as c_uint,
 +                args.as_ptr(),
 +                name.as_ptr(),
 +            )
 +        };
 +        Funclet::new(ret.expect("LLVM does not have support for catchpad"))*/
 +    }
 +
 +    fn catch_switch(&mut self, _parent: Option<RValue<'gcc>>, _unwind: Option<Block<'gcc>>, _num_handlers: usize) -> RValue<'gcc> {
 +        unimplemented!();
 +        /*let name = const_cstr!("catchswitch");
 +        let ret = unsafe {
 +            llvm::LLVMRustBuildCatchSwitch(
 +                self.llbuilder,
 +                parent,
 +                unwind,
 +                num_handlers as c_uint,
 +                name.as_ptr(),
 +            )
 +        };
 +        ret.expect("LLVM does not have support for catchswitch")*/
 +    }
 +
 +    fn add_handler(&mut self, _catch_switch: RValue<'gcc>, _handler: Block<'gcc>) {
 +        unimplemented!();
 +        /*unsafe {
 +            llvm::LLVMRustAddHandler(catch_switch, handler);
 +        }*/
 +    }
 +
 +    fn set_personality_fn(&mut self, _personality: RValue<'gcc>) {
-     fn call(&mut self, func: RValue<'gcc>, args: &[RValue<'gcc>], funclet: Option<&Funclet>) -> RValue<'gcc> {
++        // TODO
 +        /*unsafe {
 +            llvm::LLVMSetPersonalityFn(self.llfn(), personality);
 +        }*/
 +    }
 +
 +    // Atomic Operations
 +    fn atomic_cmpxchg(&mut self, dst: RValue<'gcc>, cmp: RValue<'gcc>, src: RValue<'gcc>, order: AtomicOrdering, failure_order: AtomicOrdering, weak: bool) -> RValue<'gcc> {
 +        let expected = self.current_func().new_local(None, cmp.get_type(), "expected");
 +        self.llbb().add_assignment(None, expected, cmp);
 +        let success = self.compare_exchange(dst, expected, src, order, failure_order, weak);
 +
 +        let pair_type = self.cx.type_struct(&[src.get_type(), self.bool_type], false);
 +        let result = self.current_func().new_local(None, pair_type, "atomic_cmpxchg_result");
 +        let align = Align::from_bits(64).expect("align"); // TODO: use good align.
 +
 +        let value_type = result.to_rvalue().get_type();
 +        if let Some(struct_type) = value_type.is_struct() {
 +            self.store(success, result.access_field(None, struct_type.get_field(1)).get_address(None), align);
 +            // NOTE: since success contains the call to the intrinsic, it must be stored before
 +            // expected so that we store expected after the call.
 +            self.store(expected.to_rvalue(), result.access_field(None, struct_type.get_field(0)).get_address(None), align);
 +        }
 +        // TODO: handle when value is not a struct.
 +
 +        result.to_rvalue()
 +    }
 +
 +    fn atomic_rmw(&mut self, op: AtomicRmwBinOp, dst: RValue<'gcc>, src: RValue<'gcc>, order: AtomicOrdering) -> RValue<'gcc> {
 +        let size = self.cx.int_width(src.get_type()) / 8;
 +        let name =
 +            match op {
 +                AtomicRmwBinOp::AtomicXchg => format!("__atomic_exchange_{}", size),
 +                AtomicRmwBinOp::AtomicAdd => format!("__atomic_fetch_add_{}", size),
 +                AtomicRmwBinOp::AtomicSub => format!("__atomic_fetch_sub_{}", size),
 +                AtomicRmwBinOp::AtomicAnd => format!("__atomic_fetch_and_{}", size),
 +                AtomicRmwBinOp::AtomicNand => format!("__atomic_fetch_nand_{}", size),
 +                AtomicRmwBinOp::AtomicOr => format!("__atomic_fetch_or_{}", size),
 +                AtomicRmwBinOp::AtomicXor => format!("__atomic_fetch_xor_{}", size),
 +                AtomicRmwBinOp::AtomicMax => return self.atomic_extremum(ExtremumOperation::Max, dst, src, order),
 +                AtomicRmwBinOp::AtomicMin => return self.atomic_extremum(ExtremumOperation::Min, dst, src, order),
 +                AtomicRmwBinOp::AtomicUMax => return self.atomic_extremum(ExtremumOperation::Max, dst, src, order),
 +                AtomicRmwBinOp::AtomicUMin => return self.atomic_extremum(ExtremumOperation::Min, dst, src, order),
 +            };
 +
 +
 +        let atomic_function = self.context.get_builtin_function(name);
 +        let order = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc());
 +
 +        let void_ptr_type = self.context.new_type::<*mut ()>();
 +        let volatile_void_ptr_type = void_ptr_type.make_volatile();
 +        let dst = self.context.new_cast(None, dst, volatile_void_ptr_type);
 +        // NOTE: not sure why, but we have the wrong type here.
 +        let new_src_type = atomic_function.get_param(1).to_rvalue().get_type();
 +        let src = self.context.new_cast(None, src, new_src_type);
 +        let res = self.context.new_call(None, atomic_function, &[dst, src, order]);
 +        self.context.new_cast(None, res, src.get_type())
 +    }
 +
 +    fn atomic_fence(&mut self, order: AtomicOrdering, scope: SynchronizationScope) {
 +        let name =
 +            match scope {
 +                SynchronizationScope::SingleThread => "__atomic_signal_fence",
 +                SynchronizationScope::CrossThread => "__atomic_thread_fence",
 +            };
 +        let thread_fence = self.context.get_builtin_function(name);
 +        let order = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc());
 +        self.llbb().add_eval(None, self.context.new_call(None, thread_fence, &[order]));
 +    }
 +
 +    fn set_invariant_load(&mut self, load: RValue<'gcc>) {
 +        // NOTE: Hack to consider vtable function pointer as non-global-variable function pointer.
 +        self.normal_function_addresses.borrow_mut().insert(load);
 +        // TODO
 +        /*unsafe {
 +            llvm::LLVMSetMetadata(
 +                load,
 +                llvm::MD_invariant_load as c_uint,
 +                llvm::LLVMMDNodeInContext(self.cx.llcx, ptr::null(), 0),
 +            );
 +        }*/
 +    }
 +
 +    fn lifetime_start(&mut self, _ptr: RValue<'gcc>, _size: Size) {
 +        // TODO
 +        //self.call_lifetime_intrinsic("llvm.lifetime.start.p0i8", ptr, size);
 +    }
 +
 +    fn lifetime_end(&mut self, _ptr: RValue<'gcc>, _size: Size) {
 +        // TODO
 +        //self.call_lifetime_intrinsic("llvm.lifetime.end.p0i8", ptr, size);
 +    }
 +
++    fn call(&mut self, _typ: Type<'gcc>, func: RValue<'gcc>, args: &[RValue<'gcc>], funclet: Option<&Funclet>) -> RValue<'gcc> {
 +        // FIXME: remove when having a proper API.
 +        let gcc_func = unsafe { std::mem::transmute(func) };
 +        if self.functions.borrow().values().find(|value| **value == gcc_func).is_some() {
 +            self.function_call(func, args, funclet)
 +        }
 +        else {
 +            // If it's a not function that was defined, it's a function pointer.
 +            self.function_ptr_call(func, args, funclet)
 +        }
 +    }
 +
 +    fn zext(&mut self, value: RValue<'gcc>, dest_typ: Type<'gcc>) -> RValue<'gcc> {
 +        // FIXME: this does not zero-extend.
 +        if value.get_type().is_bool() && dest_typ.is_i8(&self.cx) {
 +            // FIXME: hack because base::from_immediate converts i1 to i8.
 +            // Fix the code in codegen_ssa::base::from_immediate.
 +            return value;
 +        }
 +        //println!("zext: {:?} -> {:?}", value, dest_typ);
 +        self.context.new_cast(None, value, dest_typ)
 +    }
 +
 +    fn cx(&self) -> &CodegenCx<'gcc, 'tcx> {
 +        self.cx
 +    }
 +
 +    fn do_not_inline(&mut self, _llret: RValue<'gcc>) {
 +        unimplemented!();
 +        //llvm::Attribute::NoInline.apply_callsite(llvm::AttributePlace::Function, llret);
 +    }
 +
 +    fn set_span(&mut self, _span: Span) {}
 +
 +    fn from_immediate(&mut self, val: Self::Value) -> Self::Value {
 +        if self.cx().val_ty(val) == self.cx().type_i1() {
 +            self.zext(val, self.cx().type_i8())
 +        }
 +        else {
 +            val
 +        }
 +    }
 +
 +    fn to_immediate_scalar(&mut self, val: Self::Value, scalar: &abi::Scalar) -> Self::Value {
 +        if scalar.is_bool() {
 +            return self.trunc(val, self.cx().type_i1());
 +        }
 +        val
 +    }
 +
 +    fn fptoui_sat(&mut self, _val: RValue<'gcc>, _dest_ty: Type<'gcc>) -> Option<RValue<'gcc>> {
 +        None
 +    }
 +
 +    fn fptosi_sat(&mut self, _val: RValue<'gcc>, _dest_ty: Type<'gcc>) -> Option<RValue<'gcc>> {
 +        None
 +    }
 +
 +    fn instrprof_increment(&mut self, _fn_name: RValue<'gcc>, _hash: RValue<'gcc>, _num_counters: RValue<'gcc>, _index: RValue<'gcc>) {
 +        unimplemented!();
 +        /*debug!(
 +            "instrprof_increment() with args ({:?}, {:?}, {:?}, {:?})",
 +            fn_name, hash, num_counters, index
 +        );
 +
 +        let llfn = unsafe { llvm::LLVMRustGetInstrProfIncrementIntrinsic(self.cx().llmod) };
 +        let args = &[fn_name, hash, num_counters, index];
 +        let args = self.check_call("call", llfn, args);
 +
 +        unsafe {
 +            let _ = llvm::LLVMRustBuildCall(
 +                self.llbuilder,
 +                llfn,
 +                args.as_ptr() as *const &llvm::Value,
 +                args.len() as c_uint,
 +                None,
 +            );
 +        }*/
 +    }
 +}
 +
 +impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
 +    pub fn shuffle_vector(&mut self, v1: RValue<'gcc>, v2: RValue<'gcc>, mask: RValue<'gcc>) -> RValue<'gcc> {
 +        let return_type = v1.get_type();
 +        let params = [
 +            self.context.new_parameter(None, return_type, "v1"),
 +            self.context.new_parameter(None, return_type, "v2"),
 +            self.context.new_parameter(None, mask.get_type(), "mask"),
 +        ];
 +        let shuffle = self.context.new_function(None, FunctionType::Extern, return_type, &params, "_mm_shuffle_epi8", false);
 +        self.context.new_call(None, shuffle, &[v1, v2, mask])
 +    }
 +}
 +
 +impl<'a, 'gcc, 'tcx> StaticBuilderMethods for Builder<'a, 'gcc, 'tcx> {
 +    fn get_static(&mut self, def_id: DefId) -> RValue<'gcc> {
 +        // Forward to the `get_static` method of `CodegenCx`
 +        self.cx().get_static(def_id)
 +    }
 +}
 +
 +impl<'tcx> HasParamEnv<'tcx> for Builder<'_, '_, 'tcx> {
 +    fn param_env(&self) -> ParamEnv<'tcx> {
 +        self.cx.param_env()
 +    }
 +}
 +
 +impl<'tcx> HasTargetSpec for Builder<'_, '_, 'tcx> {
 +    fn target_spec(&self) -> &Target {
 +        &self.cx.target_spec()
 +    }
 +}
 +
 +trait ToGccComp {
 +    fn to_gcc_comparison(&self) -> ComparisonOp;
 +}
 +
 +impl ToGccComp for IntPredicate {
 +    fn to_gcc_comparison(&self) -> ComparisonOp {
 +        match *self {
 +            IntPredicate::IntEQ => ComparisonOp::Equals,
 +            IntPredicate::IntNE => ComparisonOp::NotEquals,
 +            IntPredicate::IntUGT => ComparisonOp::GreaterThan,
 +            IntPredicate::IntUGE => ComparisonOp::GreaterThanEquals,
 +            IntPredicate::IntULT => ComparisonOp::LessThan,
 +            IntPredicate::IntULE => ComparisonOp::LessThanEquals,
 +            IntPredicate::IntSGT => ComparisonOp::GreaterThan,
 +            IntPredicate::IntSGE => ComparisonOp::GreaterThanEquals,
 +            IntPredicate::IntSLT => ComparisonOp::LessThan,
 +            IntPredicate::IntSLE => ComparisonOp::LessThanEquals,
 +        }
 +    }
 +}
 +
 +impl ToGccComp for RealPredicate {
 +    fn to_gcc_comparison(&self) -> ComparisonOp {
 +        // TODO: check that ordered vs non-ordered is respected.
 +        match *self {
 +            RealPredicate::RealPredicateFalse => unreachable!(),
 +            RealPredicate::RealOEQ => ComparisonOp::Equals,
 +            RealPredicate::RealOGT => ComparisonOp::GreaterThan,
 +            RealPredicate::RealOGE => ComparisonOp::GreaterThanEquals,
 +            RealPredicate::RealOLT => ComparisonOp::LessThan,
 +            RealPredicate::RealOLE => ComparisonOp::LessThanEquals,
 +            RealPredicate::RealONE => ComparisonOp::NotEquals,
 +            RealPredicate::RealORD => unreachable!(),
 +            RealPredicate::RealUNO => unreachable!(),
 +            RealPredicate::RealUEQ => ComparisonOp::Equals,
 +            RealPredicate::RealUGT => ComparisonOp::GreaterThan,
 +            RealPredicate::RealUGE => ComparisonOp::GreaterThan,
 +            RealPredicate::RealULT => ComparisonOp::LessThan,
 +            RealPredicate::RealULE => ComparisonOp::LessThan,
 +            RealPredicate::RealUNE => ComparisonOp::NotEquals,
 +            RealPredicate::RealPredicateTrue => unreachable!(),
 +        }
 +    }
 +}
 +
 +#[repr(C)]
 +#[allow(non_camel_case_types)]
 +enum MemOrdering {
 +    __ATOMIC_RELAXED,
 +    __ATOMIC_CONSUME,
 +    __ATOMIC_ACQUIRE,
 +    __ATOMIC_RELEASE,
 +    __ATOMIC_ACQ_REL,
 +    __ATOMIC_SEQ_CST,
 +}
 +
 +trait ToGccOrdering {
 +    fn to_gcc(self) -> i32;
 +}
 +
 +impl ToGccOrdering for AtomicOrdering {
 +    fn to_gcc(self) -> i32 {
 +        use MemOrdering::*;
 +
 +        let ordering =
 +            match self {
 +                AtomicOrdering::NotAtomic => __ATOMIC_RELAXED, // TODO: check if that's the same.
 +                AtomicOrdering::Unordered => __ATOMIC_RELAXED,
 +                AtomicOrdering::Monotonic => __ATOMIC_RELAXED, // TODO: check if that's the same.
 +                AtomicOrdering::Acquire => __ATOMIC_ACQUIRE,
 +                AtomicOrdering::Release => __ATOMIC_RELEASE,
 +                AtomicOrdering::AcquireRelease => __ATOMIC_ACQ_REL,
 +                AtomicOrdering::SequentiallyConsistent => __ATOMIC_SEQ_CST,
 +            };
 +        ordering as i32
 +    }
 +}
index 9cbbee772c5fb345f101daed695e7c0a973d1724,0000000000000000000000000000000000000000..19243b0cbce5d3bb2533164f885b719450c22e09
mode 100644,000000..100644
--- /dev/null
@@@ -1,491 -1,0 +1,493 @@@
-     BaseTypeMethods,
 +use std::cell::{Cell, RefCell};
 +
 +use gccjit::{
 +    Block,
 +    Context,
 +    CType,
 +    Function,
 +    FunctionType,
 +    LValue,
 +    RValue,
 +    Struct,
 +    Type,
 +};
 +use rustc_codegen_ssa::base::wants_msvc_seh;
 +use rustc_codegen_ssa::traits::{
 +    BackendTypes,
-                 let name = if wants_msvc_seh(self.sess()) {
 +    MiscMethods,
 +};
 +use rustc_data_structures::base_n;
 +use rustc_data_structures::fx::{FxHashMap, FxHashSet};
 +use rustc_middle::bug;
 +use rustc_middle::mir::mono::CodegenUnit;
 +use rustc_middle::ty::{self, Instance, ParamEnv, PolyExistentialTraitRef, Ty, TyCtxt};
 +use rustc_middle::ty::layout::{HasParamEnv, HasTyCtxt, LayoutError, TyAndLayout};
 +use rustc_session::Session;
 +use rustc_span::{Span, Symbol, DUMMY_SP};
 +use rustc_target::abi::{HasDataLayout, LayoutOf, PointeeInfo, Size, TargetDataLayout, VariantIdx};
 +use rustc_target::spec::{HasTargetSpec, Target, TlsModel};
 +
 +use crate::callee::get_fn;
 +use crate::declare::mangle_name;
 +
 +#[derive(Clone)]
 +pub struct FuncSig<'gcc> {
 +    pub params: Vec<Type<'gcc>>,
 +    pub return_type: Type<'gcc>,
 +}
 +
 +pub struct CodegenCx<'gcc, 'tcx> {
 +    pub check_overflow: bool,
 +    pub codegen_unit: &'tcx CodegenUnit<'tcx>,
 +    pub context: &'gcc Context<'gcc>,
 +
 +    // TODO: First set it to a dummy block to avoid using Option?
 +    pub current_block: RefCell<Option<Block<'gcc>>>,
 +    pub current_func: RefCell<Option<Function<'gcc>>>,
 +    pub normal_function_addresses: RefCell<FxHashSet<RValue<'gcc>>>,
 +
 +    /// The function where globals are initialized.
 +    pub global_init_func: Function<'gcc>,
 +    pub global_init_block: Block<'gcc>,
 +
 +    pub functions: RefCell<FxHashMap<String, Function<'gcc>>>,
 +
 +    pub tls_model: gccjit::TlsModel,
 +
 +    pub bool_type: Type<'gcc>,
 +    pub i8_type: Type<'gcc>,
 +    pub i16_type: Type<'gcc>,
 +    pub i32_type: Type<'gcc>,
 +    pub i64_type: Type<'gcc>,
 +    pub i128_type: Type<'gcc>,
 +    pub isize_type: Type<'gcc>,
 +
 +    pub u8_type: Type<'gcc>,
 +    pub u16_type: Type<'gcc>,
 +    pub u32_type: Type<'gcc>,
 +    pub u64_type: Type<'gcc>,
 +    pub u128_type: Type<'gcc>,
 +    pub usize_type: Type<'gcc>,
 +
 +    pub int_type: Type<'gcc>,
 +    pub uint_type: Type<'gcc>,
 +    pub long_type: Type<'gcc>,
 +    pub ulong_type: Type<'gcc>,
 +    pub ulonglong_type: Type<'gcc>,
 +    pub sizet_type: Type<'gcc>,
 +
 +    pub float_type: Type<'gcc>,
 +    pub double_type: Type<'gcc>,
 +
 +    pub linkage: Cell<FunctionType>,
 +    pub scalar_types: RefCell<FxHashMap<Ty<'tcx>, Type<'gcc>>>,
 +    pub types: RefCell<FxHashMap<(Ty<'tcx>, Option<VariantIdx>), Type<'gcc>>>,
 +    pub tcx: TyCtxt<'tcx>,
 +
 +    pub struct_types: RefCell<FxHashMap<Vec<Type<'gcc>>, Type<'gcc>>>,
 +
 +    pub types_with_fields_to_set: RefCell<FxHashMap<Type<'gcc>, (Struct<'gcc>, TyAndLayout<'tcx>)>>,
 +
 +    /// Cache instances of monomorphic and polymorphic items
 +    pub instances: RefCell<FxHashMap<Instance<'tcx>, RValue<'gcc>>>,
 +    /// Cache generated vtables
 +    pub vtables: RefCell<FxHashMap<(Ty<'tcx>, Option<ty::PolyExistentialTraitRef<'tcx>>), RValue<'gcc>>>,
 +
 +    /// Cache of emitted const globals (value -> global)
 +    pub const_globals: RefCell<FxHashMap<RValue<'gcc>, RValue<'gcc>>>,
 +
 +    pub init_argv_var: RefCell<String>,
 +    pub argv_initialized: Cell<bool>,
 +
 +    /// Cache of constant strings,
 +    pub const_cstr_cache: RefCell<FxHashMap<Symbol, LValue<'gcc>>>,
 +
 +    /// Cache of globals.
 +    pub globals: RefCell<FxHashMap<String, RValue<'gcc>>>,
 +    // TODO: remove global_names.
 +    pub global_names: RefCell<FxHashMap<RValue<'gcc>, String>>,
 +
 +    /// A counter that is used for generating local symbol names
 +    local_gen_sym_counter: Cell<usize>,
 +    pub global_gen_sym_counter: Cell<usize>,
 +
 +    eh_personality: Cell<Option<RValue<'gcc>>>,
 +
 +    pub pointee_infos: RefCell<FxHashMap<(Ty<'tcx>, Size), Option<PointeeInfo>>>,
 +
 +    /// NOTE: a hack is used because the rustc API is not suitable to libgccjit and as such,
 +    /// `const_undef()` returns struct as pointer so that they can later be assigned a value.
 +    /// As such, this set remembers which of these pointers were returned by this function so that
 +    /// they can be derefered later.
 +    /// FIXME: fix the rustc API to avoid having this hack.
 +    pub structs_as_pointer: RefCell<FxHashSet<RValue<'gcc>>>,
 +
 +    /// Store the pointer of different types for safety.
 +    /// When casting the values back to their original types, check that they are indeed that type
 +    /// with these sets.
 +    /// FIXME: remove when the API supports more types.
 +    #[cfg(debug_assertions)]
 +    lvalues: RefCell<FxHashSet<LValue<'gcc>>>,
 +}
 +
 +impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
 +    pub fn new(context: &'gcc Context<'gcc>, codegen_unit: &'tcx CodegenUnit<'tcx>, tcx: TyCtxt<'tcx>) -> Self {
 +        let check_overflow = tcx.sess.overflow_checks();
 +        // TODO: fix this mess. libgccjit seems to return random type when using new_int_type().
 +        //let isize_type = context.new_int_type((tcx.data_layout.pointer_size.bits() / 8) as i32, true);
 +        let isize_type = context.new_c_type(CType::LongLong);
 +        //let usize_type = context.new_int_type((tcx.data_layout.pointer_size.bits() / 8) as i32, false);
 +        let usize_type = context.new_c_type(CType::ULongLong);
 +        let bool_type = context.new_type::<bool>();
 +        let i8_type = context.new_type::<i8>();
 +        let i16_type = context.new_type::<i16>();
 +        let i32_type = context.new_type::<i32>();
 +        let i64_type = context.new_c_type(CType::LongLong);
 +        let i128_type = context.new_c_type(CType::Int128t).get_aligned(8); // TODO: should this be hard-coded?
 +        let u8_type = context.new_type::<u8>();
 +        let u16_type = context.new_type::<u16>();
 +        let u32_type = context.new_type::<u32>();
 +        let u64_type = context.new_c_type(CType::ULongLong);
 +        let u128_type = context.new_c_type(CType::UInt128t).get_aligned(8); // TODO: should this be hard-coded?
 +
 +        let tls_model = to_gcc_tls_mode(tcx.sess.tls_model());
 +
 +        let float_type = context.new_type::<f32>();
 +        let double_type = context.new_type::<f64>();
 +
 +        let int_type = context.new_c_type(CType::Int);
 +        let uint_type = context.new_c_type(CType::UInt);
 +        let long_type = context.new_c_type(CType::Long);
 +        let ulong_type = context.new_c_type(CType::ULong);
 +        let ulonglong_type = context.new_c_type(CType::ULongLong);
 +        let sizet_type = context.new_c_type(CType::SizeT);
 +
 +        assert_eq!(isize_type, i64_type);
 +        assert_eq!(usize_type, u64_type);
 +
 +        let mut functions = FxHashMap::default();
 +        let builtins = [
 +            "__builtin_unreachable", "abort", "__builtin_expect", "__builtin_add_overflow", "__builtin_mul_overflow",
 +            "__builtin_saddll_overflow", /*"__builtin_sadd_overflow",*/ "__builtin_smulll_overflow", /*"__builtin_smul_overflow",*/
 +            "__builtin_ssubll_overflow", /*"__builtin_ssub_overflow",*/ "__builtin_sub_overflow", "__builtin_uaddll_overflow",
 +            "__builtin_uadd_overflow", "__builtin_umulll_overflow", "__builtin_umul_overflow", "__builtin_usubll_overflow",
 +            "__builtin_usub_overflow", "sqrtf", "sqrt", "__builtin_powif", "__builtin_powi", "sinf", "sin", "cosf", "cos",
 +            "powf", "pow", "expf", "exp", "exp2f", "exp2", "logf", "log", "log10f", "log10", "log2f", "log2", "fmaf",
 +            "fma", "fabsf", "fabs", "fminf", "fmin", "fmaxf", "fmax", "copysignf", "copysign", "floorf", "floor", "ceilf",
 +            "ceil", "truncf", "trunc", "rintf", "rint", "nearbyintf", "nearbyint", "roundf", "round",
 +            "__builtin_expect_with_probability",
 +        ];
 +
 +        for builtin in builtins.iter() {
 +            functions.insert(builtin.to_string(), context.get_builtin_function(builtin));
 +        }
 +
 +        let global_init_func = context.new_function(None, FunctionType::Exported, context.new_type::<()>(), &[],
 +            &format!("__gccGlobalInit{}", unit_name(&codegen_unit)), false);
 +        let global_init_block = global_init_func.new_block("initial");
 +
 +        Self {
 +            check_overflow,
 +            codegen_unit,
 +            context,
 +            current_block: RefCell::new(None),
 +            current_func: RefCell::new(None),
 +            normal_function_addresses: Default::default(),
 +            functions: RefCell::new(functions),
 +            global_init_func,
 +            global_init_block,
 +
 +            tls_model,
 +
 +            bool_type,
 +            i8_type,
 +            i16_type,
 +            i32_type,
 +            i64_type,
 +            i128_type,
 +            isize_type,
 +            usize_type,
 +            u8_type,
 +            u16_type,
 +            u32_type,
 +            u64_type,
 +            u128_type,
 +            int_type,
 +            uint_type,
 +            long_type,
 +            ulong_type,
 +            ulonglong_type,
 +            sizet_type,
 +
 +            float_type,
 +            double_type,
 +
 +            linkage: Cell::new(FunctionType::Internal),
 +            #[cfg(debug_assertions)]
 +            lvalues: Default::default(),
 +            instances: Default::default(),
 +            vtables: Default::default(),
 +            const_globals: Default::default(),
 +            init_argv_var: RefCell::new(String::new()),
 +            argv_initialized: Cell::new(false),
 +            const_cstr_cache: Default::default(),
 +            global_names: Default::default(),
 +            globals: Default::default(),
 +            scalar_types: Default::default(),
 +            types: Default::default(),
 +            tcx,
 +            struct_types: Default::default(),
 +            types_with_fields_to_set: Default::default(),
 +            local_gen_sym_counter: Cell::new(0),
 +            global_gen_sym_counter: Cell::new(0),
 +            eh_personality: Cell::new(None),
 +            pointee_infos: Default::default(),
 +            structs_as_pointer: Default::default(),
 +        }
 +    }
 +
 +    pub fn lvalue_to_rvalue(&self, value: LValue<'gcc>) -> RValue<'gcc> {
 +        #[cfg(debug_assertions)]
 +        self.lvalues.borrow_mut().insert(value);
 +        unsafe { std::mem::transmute(value) }
 +    }
 +
 +    pub fn rvalue_as_function(&self, value: RValue<'gcc>) -> Function<'gcc> {
 +        let function: Function<'gcc> = unsafe { std::mem::transmute(value) };
 +        debug_assert!(self.functions.borrow().values().find(|value| **value == function).is_some(),
 +            "{:?} ({:?}) is not a function", value, value.get_type());
 +        function
 +    }
 +
 +    pub fn rvalue_as_lvalue(&self, value: RValue<'gcc>) -> LValue<'gcc> {
 +        let lvalue: LValue<'gcc> = unsafe { std::mem::transmute(value) };
 +        //debug_assert!(self.lvalues.borrow().contains(&lvalue), "{:?} is not an lvalue", value);
 +        lvalue
 +    }
 +
 +    pub fn sess(&self) -> &Session {
 +        &self.tcx.sess
 +    }
 +}
 +
 +impl<'gcc, 'tcx> BackendTypes for CodegenCx<'gcc, 'tcx> {
 +    type Value = RValue<'gcc>;
 +    type Function = RValue<'gcc>;
 +
 +    type BasicBlock = Block<'gcc>;
 +    type Type = Type<'gcc>;
 +    type Funclet = (); // TODO
 +
 +    type DIScope = (); // TODO
 +    type DILocation = (); // TODO
 +    type DIVariable = (); // TODO
 +}
 +
 +impl<'gcc, 'tcx> MiscMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
 +    fn vtables(&self) -> &RefCell<FxHashMap<(Ty<'tcx>, Option<PolyExistentialTraitRef<'tcx>>), RValue<'gcc>>> {
 +        &self.vtables
 +    }
 +
 +    fn get_fn(&self, instance: Instance<'tcx>) -> RValue<'gcc> {
 +        let func = get_fn(self, instance);
 +        *self.current_func.borrow_mut() = Some(self.rvalue_as_function(func));
 +        func
 +    }
 +
 +    fn get_fn_addr(&self, instance: Instance<'tcx>) -> RValue<'gcc> {
 +        //let symbol = self.tcx.symbol_name(instance).name;
 +
 +        let func = get_fn(self, instance);
 +        let func = self.rvalue_as_function(func);
 +        let ptr = func.get_address(None);
 +
 +        // TODO: don't do this twice: i.e. in declare_fn and here.
 +        //let fn_abi = FnAbi::of_instance(self, instance, &[]);
 +        //let (return_type, params, _) = fn_abi.gcc_type(self);
 +        // FIXME: the rustc API seems to call get_fn_addr() when not needed (e.g. for FFI).
 +        //let pointer_type = ptr.get_type();
 +
 +        self.normal_function_addresses.borrow_mut().insert(ptr);
 +
 +        ptr
 +    }
 +
 +    fn eh_personality(&self) -> RValue<'gcc> {
 +        // The exception handling personality function.
 +        //
 +        // If our compilation unit has the `eh_personality` lang item somewhere
 +        // within it, then we just need to codegen that. Otherwise, we're
 +        // building an rlib which will depend on some upstream implementation of
 +        // this function, so we just codegen a generic reference to it. We don't
 +        // specify any of the types for the function, we just make it a symbol
 +        // that LLVM can later use.
 +        //
 +        // Note that MSVC is a little special here in that we don't use the
 +        // `eh_personality` lang item at all. Currently LLVM has support for
 +        // both Dwarf and SEH unwind mechanisms for MSVC targets and uses the
 +        // *name of the personality function* to decide what kind of unwind side
 +        // tables/landing pads to emit. It looks like Dwarf is used by default,
 +        // injecting a dependency on the `_Unwind_Resume` symbol for resuming
 +        // an "exception", but for MSVC we want to force SEH. This means that we
 +        // can't actually have the personality function be our standard
 +        // `rust_eh_personality` function, but rather we wired it up to the
 +        // CRT's custom personality function, which forces LLVM to consider
 +        // landing pads as "landing pads for SEH".
 +        if let Some(llpersonality) = self.eh_personality.get() {
 +            return llpersonality;
 +        }
 +        let tcx = self.tcx;
 +        let llfn = match tcx.lang_items().eh_personality() {
 +            Some(def_id) if !wants_msvc_seh(self.sess()) => self.get_fn_addr(
 +                ty::Instance::resolve(
 +                    tcx,
 +                    ty::ParamEnv::reveal_all(),
 +                    def_id,
 +                    tcx.intern_substs(&[]),
 +                )
 +                .unwrap().unwrap(),
 +            ),
 +            _ => {
-                 self.declare_func(name, self.type_i32(), &[], true)
++                let _name = if wants_msvc_seh(self.sess()) {
 +                    "__CxxFrameHandler3"
 +                } else {
 +                    "rust_eh_personality"
 +                };
++                //let func = self.declare_func(name, self.type_i32(), &[], true);
++                // FIXME: this hack should not be needed. That will probably be removed when
++                // unwinding support is added.
++                self.context.new_rvalue_from_int(self.int_type, 0)
 +            }
 +        };
 +        //attributes::apply_target_cpu_attr(self, llfn);
 +        self.eh_personality.set(Some(llfn));
 +        llfn
 +    }
 +
 +    fn sess(&self) -> &Session {
 +        &self.tcx.sess
 +    }
 +
 +    fn check_overflow(&self) -> bool {
 +        self.check_overflow
 +    }
 +
 +    fn codegen_unit(&self) -> &'tcx CodegenUnit<'tcx> {
 +        self.codegen_unit
 +    }
 +
 +    fn used_statics(&self) -> &RefCell<Vec<RValue<'gcc>>> {
 +        unimplemented!();
 +        //&self.used_statics
 +    }
 +
 +    fn set_frame_pointer_type(&self, _llfn: RValue<'gcc>) {
 +        // TODO
 +        //attributes::set_frame_pointer_type(self, llfn)
 +    }
 +
 +    fn apply_target_cpu_attr(&self, _llfn: RValue<'gcc>) {
 +        // TODO
 +        //attributes::apply_target_cpu_attr(self, llfn)
 +    }
 +
 +    fn create_used_variable(&self) {
 +        unimplemented!();
 +        /*let name = const_cstr!("llvm.used");
 +        let section = const_cstr!("llvm.metadata");
 +        let array =
 +            self.const_array(&self.type_ptr_to(self.type_i8()), &*self.used_statics.borrow());
 +
 +        unsafe {
 +            let g = llvm::LLVMAddGlobal(self.llmod, self.val_ty(array), name.as_ptr());
 +            llvm::LLVMSetInitializer(g, array);
 +            llvm::LLVMRustSetLinkage(g, llvm::Linkage::AppendingLinkage);
 +            llvm::LLVMSetSection(g, section.as_ptr());
 +        }*/
 +    }
 +
 +    fn declare_c_main(&self, fn_type: Self::Type) -> Option<Self::Function> {
 +        if self.get_declared_value("main").is_none() {
 +            Some(self.declare_cfn("main", fn_type))
 +        }
 +        else {
 +            // If the symbol already exists, it is an error: for example, the user wrote
 +            // #[no_mangle] extern "C" fn main(..) {..}
 +            // instead of #[start]
 +            None
 +        }
 +    }
 +}
 +
 +impl<'gcc, 'tcx> HasTyCtxt<'tcx> for CodegenCx<'gcc, 'tcx> {
 +    fn tcx(&self) -> TyCtxt<'tcx> {
 +        self.tcx
 +    }
 +}
 +
 +impl<'gcc, 'tcx> HasDataLayout for CodegenCx<'gcc, 'tcx> {
 +    fn data_layout(&self) -> &TargetDataLayout {
 +        &self.tcx.data_layout
 +    }
 +}
 +
 +impl<'gcc, 'tcx> HasTargetSpec for CodegenCx<'gcc, 'tcx> {
 +    fn target_spec(&self) -> &Target {
 +        &self.tcx.sess.target
 +    }
 +}
 +
 +impl<'gcc, 'tcx> LayoutOf for CodegenCx<'gcc, 'tcx> {
 +    type Ty = Ty<'tcx>;
 +    type TyAndLayout = TyAndLayout<'tcx>;
 +
 +    fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyAndLayout {
 +        self.spanned_layout_of(ty, DUMMY_SP)
 +    }
 +
 +    fn spanned_layout_of(&self, ty: Ty<'tcx>, span: Span) -> Self::TyAndLayout {
 +        self.tcx.layout_of(ParamEnv::reveal_all().and(ty)).unwrap_or_else(|e| {
 +            if let LayoutError::SizeOverflow(_) = e {
 +                self.sess().span_fatal(span, &e.to_string())
 +            } else {
 +                bug!("failed to get layout for `{}`: {}", ty, e)
 +            }
 +        })
 +    }
 +}
 +
 +impl<'tcx, 'gcc> HasParamEnv<'tcx> for CodegenCx<'gcc, 'tcx> {
 +    fn param_env(&self) -> ParamEnv<'tcx> {
 +        ParamEnv::reveal_all()
 +    }
 +}
 +
 +impl<'b, 'tcx> CodegenCx<'b, 'tcx> {
 +    /// Generates a new symbol name with the given prefix. This symbol name must
 +    /// only be used for definitions with `internal` or `private` linkage.
 +    pub fn generate_local_symbol_name(&self, prefix: &str) -> String {
 +        let idx = self.local_gen_sym_counter.get();
 +        self.local_gen_sym_counter.set(idx + 1);
 +        // Include a '.' character, so there can be no accidental conflicts with
 +        // user defined names
 +        let mut name = String::with_capacity(prefix.len() + 6);
 +        name.push_str(prefix);
 +        name.push_str(".");
 +        base_n::push_str(idx as u128, base_n::ALPHANUMERIC_ONLY, &mut name);
 +        name
 +    }
 +}
 +
 +pub fn unit_name<'tcx>(codegen_unit: &CodegenUnit<'tcx>) -> String {
 +    let name = &codegen_unit.name().to_string();
 +    mangle_name(&name.replace('-', "_"))
 +}
 +
 +fn to_gcc_tls_mode(tls_model: TlsModel) -> gccjit::TlsModel {
 +    match tls_model {
 +        TlsModel::GeneralDynamic => gccjit::TlsModel::GlobalDynamic,
 +        TlsModel::LocalDynamic => gccjit::TlsModel::LocalDynamic,
 +        TlsModel::InitialExec => gccjit::TlsModel::InitialExec,
 +        TlsModel::LocalExec => gccjit::TlsModel::LocalExec,
 +    }
 +}
index 083f7e01c80d07bbf0d21dae5d2f53771ec3dd75,0000000000000000000000000000000000000000..ad6dfbffbac96ebb055e1f118d885cfa3900b09c
mode 100644,000000..100644
--- /dev/null
@@@ -1,1286 -1,0 +1,1286 @@@
-                     self.call(func, &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(), None)
 +pub mod llvm;
 +mod simd;
 +
 +use gccjit::{ComparisonOp, Function, RValue, ToRValue, Type, UnaryOp};
 +use rustc_codegen_ssa::MemFlags;
 +use rustc_codegen_ssa::base::wants_msvc_seh;
 +use rustc_codegen_ssa::common::{IntPredicate, span_invalid_monomorphization_error};
 +use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue};
 +use rustc_codegen_ssa::mir::place::PlaceRef;
 +use rustc_codegen_ssa::traits::{ArgAbiMethods, BaseTypeMethods, BuilderMethods, ConstMethods, IntrinsicCallMethods};
 +use rustc_middle::bug;
 +use rustc_middle::ty::{self, Instance, Ty};
 +use rustc_span::{Span, Symbol, symbol::kw, sym};
 +use rustc_target::abi::{HasDataLayout, LayoutOf};
 +use rustc_target::abi::call::{ArgAbi, FnAbi, PassMode};
 +use rustc_target::spec::PanicStrategy;
 +
 +use crate::abi::GccType;
 +use crate::builder::Builder;
 +use crate::common::TypeReflection;
 +use crate::context::CodegenCx;
 +use crate::type_of::LayoutGccExt;
 +use crate::intrinsic::simd::generic_simd_intrinsic;
 +
 +fn get_simple_intrinsic<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, name: Symbol) -> Option<Function<'gcc>> {
 +    let gcc_name = match name {
 +        sym::sqrtf32 => "sqrtf",
 +        sym::sqrtf64 => "sqrt",
 +        sym::powif32 => "__builtin_powif",
 +        sym::powif64 => "__builtin_powi",
 +        sym::sinf32 => "sinf",
 +        sym::sinf64 => "sin",
 +        sym::cosf32 => "cosf",
 +        sym::cosf64 => "cos",
 +        sym::powf32 => "powf",
 +        sym::powf64 => "pow",
 +        sym::expf32 => "expf",
 +        sym::expf64 => "exp",
 +        sym::exp2f32 => "exp2f",
 +        sym::exp2f64 => "exp2",
 +        sym::logf32 => "logf",
 +        sym::logf64 => "log",
 +        sym::log10f32 => "log10f",
 +        sym::log10f64 => "log10",
 +        sym::log2f32 => "log2f",
 +        sym::log2f64 => "log2",
 +        sym::fmaf32 => "fmaf",
 +        sym::fmaf64 => "fma",
 +        sym::fabsf32 => "fabsf",
 +        sym::fabsf64 => "fabs",
 +        sym::minnumf32 => "fminf",
 +        sym::minnumf64 => "fmin",
 +        sym::maxnumf32 => "fmaxf",
 +        sym::maxnumf64 => "fmax",
 +        sym::copysignf32 => "copysignf",
 +        sym::copysignf64 => "copysign",
 +        sym::floorf32 => "floorf",
 +        sym::floorf64 => "floor",
 +        sym::ceilf32 => "ceilf",
 +        sym::ceilf64 => "ceil",
 +        sym::truncf32 => "truncf",
 +        sym::truncf64 => "trunc",
 +        sym::rintf32 => "rintf",
 +        sym::rintf64 => "rint",
 +        sym::nearbyintf32 => "nearbyintf",
 +        sym::nearbyintf64 => "nearbyint",
 +        sym::roundf32 => "roundf",
 +        sym::roundf64 => "round",
 +        sym::abort => "abort",
 +        _ => return None,
 +    };
 +    Some(cx.context.get_builtin_function(&gcc_name))
 +}
 +
 +impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
 +    fn codegen_intrinsic_call(&mut self, instance: Instance<'tcx>, fn_abi: &FnAbi<'tcx, Ty<'tcx>>, args: &[OperandRef<'tcx, RValue<'gcc>>], llresult: RValue<'gcc>, span: Span) {
 +        let tcx = self.tcx;
 +        let callee_ty = instance.ty(tcx, ty::ParamEnv::reveal_all());
 +
 +        let (def_id, substs) = match *callee_ty.kind() {
 +            ty::FnDef(def_id, substs) => (def_id, substs),
 +            _ => bug!("expected fn item type, found {}", callee_ty),
 +        };
 +
 +        let sig = callee_ty.fn_sig(tcx);
 +        let sig = tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), sig);
 +        let arg_tys = sig.inputs();
 +        let ret_ty = sig.output();
 +        let name = tcx.item_name(def_id);
 +        let name_str = &*name.as_str();
 +
 +        let llret_ty = self.layout_of(ret_ty).gcc_type(self, true);
 +        let result = PlaceRef::new_sized(llresult, fn_abi.ret.layout);
 +
 +        let simple = get_simple_intrinsic(self, name);
 +        let llval =
 +            match name {
 +                _ if simple.is_some() => {
 +                    // FIXME: remove this cast when the API supports function.
 +                    let func = unsafe { std::mem::transmute(simple.expect("simple")) };
-         self.call(func, &[], None);
++                    self.call(self.type_void(), func, &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(), None)
 +                },
 +                sym::likely => {
 +                    self.expect(args[0].immediate(), true)
 +                }
 +                sym::unlikely => {
 +                    self.expect(args[0].immediate(), false)
 +                }
 +                kw::Try => {
 +                    try_intrinsic(
 +                        self,
 +                        args[0].immediate(),
 +                        args[1].immediate(),
 +                        args[2].immediate(),
 +                        llresult,
 +                    );
 +                    return;
 +                }
 +                sym::breakpoint => {
 +                    unimplemented!();
 +                    /*let llfn = self.get_intrinsic(&("llvm.debugtrap"));
 +                    self.call(llfn, &[], None)*/
 +                }
 +                sym::va_copy => {
 +                    unimplemented!();
 +                    /*let intrinsic = self.cx().get_intrinsic(&("llvm.va_copy"));
 +                    self.call(intrinsic, &[args[0].immediate(), args[1].immediate()], None)*/
 +                }
 +                sym::va_arg => {
 +                    unimplemented!();
 +                    /*match fn_abi.ret.layout.abi {
 +                        abi::Abi::Scalar(ref scalar) => {
 +                            match scalar.value {
 +                                Primitive::Int(..) => {
 +                                    if self.cx().size_of(ret_ty).bytes() < 4 {
 +                                        // `va_arg` should not be called on a integer type
 +                                        // less than 4 bytes in length. If it is, promote
 +                                        // the integer to a `i32` and truncate the result
 +                                        // back to the smaller type.
 +                                        let promoted_result = emit_va_arg(self, args[0], tcx.types.i32);
 +                                        self.trunc(promoted_result, llret_ty)
 +                                    } else {
 +                                        emit_va_arg(self, args[0], ret_ty)
 +                                    }
 +                                }
 +                                Primitive::F64 | Primitive::Pointer => {
 +                                    emit_va_arg(self, args[0], ret_ty)
 +                                }
 +                                // `va_arg` should never be used with the return type f32.
 +                                Primitive::F32 => bug!("the va_arg intrinsic does not work with `f32`"),
 +                            }
 +                        }
 +                        _ => bug!("the va_arg intrinsic does not work with non-scalar types"),
 +                    }*/
 +                }
 +
 +                sym::volatile_load | sym::unaligned_volatile_load => {
 +                    let tp_ty = substs.type_at(0);
 +                    let mut ptr = args[0].immediate();
 +                    if let PassMode::Cast(ty) = fn_abi.ret.mode {
 +                        ptr = self.pointercast(ptr, self.type_ptr_to(ty.gcc_type(self)));
 +                    }
 +                    let load = self.volatile_load(ptr.get_type(), ptr);
 +                    // TODO
 +                    /*let align = if name == sym::unaligned_volatile_load {
 +                        1
 +                    } else {
 +                        self.align_of(tp_ty).bytes() as u32
 +                    };
 +                    unsafe {
 +                      llvm::LLVMSetAlignment(load, align);
 +                      }*/
 +                    self.to_immediate(load, self.layout_of(tp_ty))
 +                }
 +                sym::volatile_store => {
 +                    let dst = args[0].deref(self.cx());
 +                    args[1].val.volatile_store(self, dst);
 +                    return;
 +                }
 +                sym::unaligned_volatile_store => {
 +                    let dst = args[0].deref(self.cx());
 +                    args[1].val.unaligned_volatile_store(self, dst);
 +                    return;
 +                }
 +                sym::prefetch_read_data
 +                    | sym::prefetch_write_data
 +                    | sym::prefetch_read_instruction
 +                    | sym::prefetch_write_instruction => {
 +                        unimplemented!();
 +                        /*let expect = self.get_intrinsic(&("llvm.prefetch"));
 +                        let (rw, cache_type) = match name {
 +                            sym::prefetch_read_data => (0, 1),
 +                            sym::prefetch_write_data => (1, 1),
 +                            sym::prefetch_read_instruction => (0, 0),
 +                            sym::prefetch_write_instruction => (1, 0),
 +                            _ => bug!(),
 +                        };
 +                        self.call(
 +                            expect,
 +                            &[
 +                            args[0].immediate(),
 +                            self.const_i32(rw),
 +                            args[1].immediate(),
 +                            self.const_i32(cache_type),
 +                            ],
 +                            None,
 +                        )*/
 +                    }
 +                sym::ctlz
 +                    | sym::ctlz_nonzero
 +                    | sym::cttz
 +                    | sym::cttz_nonzero
 +                    | sym::ctpop
 +                    | sym::bswap
 +                    | sym::bitreverse
 +                    | sym::rotate_left
 +                    | sym::rotate_right
 +                    | sym::saturating_add
 +                    | sym::saturating_sub => {
 +                        let ty = arg_tys[0];
 +                        match int_type_width_signed(ty, self) {
 +                            Some((width, signed)) => match name {
 +                                sym::ctlz | sym::cttz => {
 +                                    let func = self.current_func.borrow().expect("func");
 +                                    let then_block = func.new_block("then");
 +                                    let else_block = func.new_block("else");
 +                                    let after_block = func.new_block("after");
 +
 +                                    let arg = args[0].immediate();
 +                                    let result = func.new_local(None, arg.get_type(), "zeros");
 +                                    let zero = self.cx.context.new_rvalue_zero(arg.get_type());
 +                                    let cond = self.cx.context.new_comparison(None, ComparisonOp::Equals, arg, zero);
 +                                    self.block.expect("block").end_with_conditional(None, cond, then_block, else_block);
 +
 +                                    let zero_result = self.cx.context.new_rvalue_from_long(arg.get_type(), width as i64);
 +                                    then_block.add_assignment(None, result, zero_result);
 +                                    then_block.end_with_jump(None, after_block);
 +
 +                                    // NOTE: since jumps were added in a place
 +                                    // count_leading_zeroes() does not expect, the current blocks
 +                                    // in the state need to be updated.
 +                                    *self.current_block.borrow_mut() = Some(else_block);
 +                                    self.block = Some(else_block);
 +
 +                                    let zeros =
 +                                        match name {
 +                                            sym::ctlz => self.count_leading_zeroes(width, arg),
 +                                            sym::cttz => self.count_trailing_zeroes(width, arg),
 +                                            _ => unreachable!(),
 +                                        };
 +                                    else_block.add_assignment(None, result, zeros);
 +                                    else_block.end_with_jump(None, after_block);
 +
 +                                    // NOTE: since jumps were added in a place rustc does not
 +                                    // expect, the current blocks in the state need to be updated.
 +                                    *self.current_block.borrow_mut() = Some(after_block);
 +                                    self.block = Some(after_block);
 +
 +                                    result.to_rvalue()
 +
 +                                    /*let y = self.const_bool(false);
 +                                    let llfn = self.get_intrinsic(&format!("llvm.{}.i{}", name, width));
 +                                    self.call(llfn, &[args[0].immediate(), y], None)*/
 +                                }
 +                                sym::ctlz_nonzero => {
 +                                    self.count_leading_zeroes(width, args[0].immediate())
 +                                },
 +                                sym::cttz_nonzero => {
 +                                    self.count_trailing_zeroes(width, args[0].immediate())
 +                                }
 +                                sym::ctpop => self.pop_count(args[0].immediate()),
 +                                sym::bswap => {
 +                                    if width == 8 {
 +                                        args[0].immediate() // byte swap a u8/i8 is just a no-op
 +                                    }
 +                                    else {
 +                                        // TODO: check if it's faster to use string literals and a
 +                                        // match instead of format!.
 +                                        let bswap = self.cx.context.get_builtin_function(&format!("__builtin_bswap{}", width));
 +                                        let mut arg = args[0].immediate();
 +                                        // FIXME: this cast should not be necessary. Remove
 +                                        // when having proper sized integer types.
 +                                        let param_type = bswap.get_param(0).to_rvalue().get_type();
 +                                        if param_type != arg.get_type() {
 +                                            arg = self.bitcast(arg, param_type);
 +                                        }
 +                                        self.cx.context.new_call(None, bswap, &[arg])
 +                                    }
 +                                },
 +                                sym::bitreverse => self.bit_reverse(width, args[0].immediate()),
 +                                sym::rotate_left | sym::rotate_right => {
 +                                    // TODO: implement using algorithm from:
 +                                    // https://blog.regehr.org/archives/1063
 +                                    // for other platforms.
 +                                    let is_left = name == sym::rotate_left;
 +                                    let val = args[0].immediate();
 +                                    let raw_shift = args[1].immediate();
 +                                    if is_left {
 +                                        self.rotate_left(val, raw_shift, width)
 +                                    }
 +                                    else {
 +                                        self.rotate_right(val, raw_shift, width)
 +                                    }
 +                                },
 +                                sym::saturating_add => {
 +                                    self.saturating_add(args[0].immediate(), args[1].immediate(), signed, width)
 +                                },
 +                                sym::saturating_sub => {
 +                                    self.saturating_sub(args[0].immediate(), args[1].immediate(), signed, width)
 +                                },
 +                                _ => bug!(),
 +                            },
 +                            None => {
 +                                span_invalid_monomorphization_error(
 +                                    tcx.sess,
 +                                    span,
 +                                    &format!(
 +                                        "invalid monomorphization of `{}` intrinsic: \
 +                                      expected basic integer type, found `{}`",
 +                                      name, ty
 +                                    ),
 +                                );
 +                                return;
 +                            }
 +                        }
 +                    }
 +
 +                sym::raw_eq => {
 +                    use rustc_target::abi::Abi::*;
 +                    let tp_ty = substs.type_at(0);
 +                    let layout = self.layout_of(tp_ty).layout;
 +                    let use_integer_compare = match layout.abi {
 +                        Scalar(_) | ScalarPair(_, _) => true,
 +                        Uninhabited | Vector { .. } => false,
 +                        Aggregate { .. } => {
 +                            // For rusty ABIs, small aggregates are actually passed
 +                            // as `RegKind::Integer` (see `FnAbi::adjust_for_abi`),
 +                            // so we re-use that same threshold here.
 +                            layout.size <= self.data_layout().pointer_size * 2
 +                        }
 +                    };
 +
 +                    let a = args[0].immediate();
 +                    let b = args[1].immediate();
 +                    if layout.size.bytes() == 0 {
 +                        self.const_bool(true)
 +                    }
 +                    /*else if use_integer_compare {
 +                        let integer_ty = self.type_ix(layout.size.bits()); // FIXME: LLVM creates an integer of 96 bits for [i32; 3], but gcc doesn't support this, so it creates an integer of 128 bits.
 +                        let ptr_ty = self.type_ptr_to(integer_ty);
 +                        let a_ptr = self.bitcast(a, ptr_ty);
 +                        let a_val = self.load(integer_ty, a_ptr, layout.align.abi);
 +                        let b_ptr = self.bitcast(b, ptr_ty);
 +                        let b_val = self.load(integer_ty, b_ptr, layout.align.abi);
 +                        self.icmp(IntPredicate::IntEQ, a_val, b_val)
 +                    }*/
 +                    else {
 +                        let void_ptr_type = self.context.new_type::<*const ()>();
 +                        let a_ptr = self.bitcast(a, void_ptr_type);
 +                        let b_ptr = self.bitcast(b, void_ptr_type);
 +                        let n = self.context.new_cast(None, self.const_usize(layout.size.bytes()), self.sizet_type);
 +                        let builtin = self.context.get_builtin_function("memcmp");
 +                        let cmp = self.context.new_call(None, builtin, &[a_ptr, b_ptr, n]);
 +                        self.icmp(IntPredicate::IntEQ, cmp, self.const_i32(0))
 +                    }
 +                }
 +
 +                _ if name_str.starts_with("simd_") => {
 +                    match generic_simd_intrinsic(self, name, callee_ty, args, ret_ty, llret_ty, span) {
 +                        Ok(llval) => llval,
 +                        Err(()) => return,
 +                    }
 +                }
 +
 +                _ => bug!("unknown intrinsic '{}'", name),
 +            };
 +
 +        if !fn_abi.ret.is_ignore() {
 +            if let PassMode::Cast(ty) = fn_abi.ret.mode {
 +                let ptr_llty = self.type_ptr_to(ty.gcc_type(self));
 +                let ptr = self.pointercast(result.llval, ptr_llty);
 +                self.store(llval, ptr, result.align);
 +            }
 +            else {
 +                OperandRef::from_immediate_or_packed_pair(self, llval, result.layout)
 +                    .val
 +                    .store(self, result);
 +            }
 +        }
 +    }
 +
 +    fn abort(&mut self) {
 +        let func = self.context.get_builtin_function("abort");
 +        let func: RValue<'gcc> = unsafe { std::mem::transmute(func) };
-         bx.call(try_func, &[data], None);
++        self.call(self.type_void(), func, &[], None);
 +    }
 +
 +    fn assume(&mut self, value: Self::Value) {
 +        // TODO: switch to asumme when it exists.
 +        // Or use something like this:
 +        // #define __assume(cond) do { if (!(cond)) __builtin_unreachable(); } while (0)
 +        self.expect(value, true);
 +    }
 +
 +    fn expect(&mut self, cond: Self::Value, _expected: bool) -> Self::Value {
 +        // TODO
 +        /*let expect = self.context.get_builtin_function("__builtin_expect");
 +        let expect: RValue<'gcc> = unsafe { std::mem::transmute(expect) };
 +        self.call(expect, &[cond, self.const_bool(expected)], None)*/
 +        cond
 +    }
 +
 +    fn sideeffect(&mut self) {
 +        // TODO
 +        /*if self.tcx().sess.opts.debugging_opts.insert_sideeffect {
 +            let fnname = self.get_intrinsic(&("llvm.sideeffect"));
 +            self.call(fnname, &[], None);
 +        }*/
 +    }
 +
 +    fn va_start(&mut self, _va_list: RValue<'gcc>) -> RValue<'gcc> {
 +        unimplemented!();
 +        /*let intrinsic = self.cx().get_intrinsic("llvm.va_start");
 +        self.call(intrinsic, &[va_list], None)*/
 +    }
 +
 +    fn va_end(&mut self, _va_list: RValue<'gcc>) -> RValue<'gcc> {
 +        unimplemented!();
 +        /*let intrinsic = self.cx().get_intrinsic("llvm.va_end");
 +        self.call(intrinsic, &[va_list], None)*/
 +    }
 +}
 +
 +impl<'a, 'gcc, 'tcx> ArgAbiMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
 +    fn store_fn_arg(&mut self, arg_abi: &ArgAbi<'tcx, Ty<'tcx>>, idx: &mut usize, dst: PlaceRef<'tcx, Self::Value>) {
 +        arg_abi.store_fn_arg(self, idx, dst)
 +    }
 +
 +    fn store_arg(&mut self, arg_abi: &ArgAbi<'tcx, Ty<'tcx>>, val: RValue<'gcc>, dst: PlaceRef<'tcx, RValue<'gcc>>) {
 +        arg_abi.store(self, val, dst)
 +    }
 +
 +    fn arg_memory_ty(&self, arg_abi: &ArgAbi<'tcx, Ty<'tcx>>) -> Type<'gcc> {
 +        arg_abi.memory_ty(self)
 +    }
 +}
 +
 +pub trait ArgAbiExt<'gcc, 'tcx> {
 +    fn memory_ty(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc>;
 +    fn store(&self, bx: &mut Builder<'_, 'gcc, 'tcx>, val: RValue<'gcc>, dst: PlaceRef<'tcx, RValue<'gcc>>);
 +    fn store_fn_arg(&self, bx: &mut Builder<'_, 'gcc, 'tcx>, idx: &mut usize, dst: PlaceRef<'tcx, RValue<'gcc>>);
 +}
 +
 +impl<'gcc, 'tcx> ArgAbiExt<'gcc, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
 +    /// Gets the LLVM type for a place of the original Rust type of
 +    /// this argument/return, i.e., the result of `type_of::type_of`.
 +    fn memory_ty(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc> {
 +        self.layout.gcc_type(cx, true)
 +    }
 +
 +    /// Stores a direct/indirect value described by this ArgAbi into a
 +    /// place for the original Rust type of this argument/return.
 +    /// Can be used for both storing formal arguments into Rust variables
 +    /// or results of call/invoke instructions into their destinations.
 +    fn store(&self, bx: &mut Builder<'_, 'gcc, 'tcx>, val: RValue<'gcc>, dst: PlaceRef<'tcx, RValue<'gcc>>) {
 +        if self.is_ignore() {
 +            return;
 +        }
 +        if self.is_sized_indirect() {
 +            OperandValue::Ref(val, None, self.layout.align.abi).store(bx, dst)
 +        }
 +        else if self.is_unsized_indirect() {
 +            bug!("unsized `ArgAbi` must be handled through `store_fn_arg`");
 +        }
 +        else if let PassMode::Cast(cast) = self.mode {
 +            // FIXME(eddyb): Figure out when the simpler Store is safe, clang
 +            // uses it for i16 -> {i8, i8}, but not for i24 -> {i8, i8, i8}.
 +            let can_store_through_cast_ptr = false;
 +            if can_store_through_cast_ptr {
 +                let cast_ptr_llty = bx.type_ptr_to(cast.gcc_type(bx));
 +                let cast_dst = bx.pointercast(dst.llval, cast_ptr_llty);
 +                bx.store(val, cast_dst, self.layout.align.abi);
 +            }
 +            else {
 +                // The actual return type is a struct, but the ABI
 +                // adaptation code has cast it into some scalar type.  The
 +                // code that follows is the only reliable way I have
 +                // found to do a transform like i64 -> {i32,i32}.
 +                // Basically we dump the data onto the stack then memcpy it.
 +                //
 +                // Other approaches I tried:
 +                // - Casting rust ret pointer to the foreign type and using Store
 +                //   is (a) unsafe if size of foreign type > size of rust type and
 +                //   (b) runs afoul of strict aliasing rules, yielding invalid
 +                //   assembly under -O (specifically, the store gets removed).
 +                // - Truncating foreign type to correct integral type and then
 +                //   bitcasting to the struct type yields invalid cast errors.
 +
 +                // We instead thus allocate some scratch space...
 +                let scratch_size = cast.size(bx);
 +                let scratch_align = cast.align(bx);
 +                let llscratch = bx.alloca(cast.gcc_type(bx), scratch_align);
 +                bx.lifetime_start(llscratch, scratch_size);
 +
 +                // ... where we first store the value...
 +                bx.store(val, llscratch, scratch_align);
 +
 +                // ... and then memcpy it to the intended destination.
 +                bx.memcpy(
 +                    dst.llval,
 +                    self.layout.align.abi,
 +                    llscratch,
 +                    scratch_align,
 +                    bx.const_usize(self.layout.size.bytes()),
 +                    MemFlags::empty(),
 +                );
 +
 +                bx.lifetime_end(llscratch, scratch_size);
 +            }
 +        }
 +        else {
 +            OperandValue::Immediate(val).store(bx, dst);
 +        }
 +    }
 +
 +    fn store_fn_arg<'a>(&self, bx: &mut Builder<'a, 'gcc, 'tcx>, idx: &mut usize, dst: PlaceRef<'tcx, RValue<'gcc>>) {
 +        let mut next = || {
 +            let val = bx.current_func().get_param(*idx as i32);
 +            *idx += 1;
 +            val.to_rvalue()
 +        };
 +        match self.mode {
 +            PassMode::Ignore => {}
 +            PassMode::Pair(..) => {
 +                OperandValue::Pair(next(), next()).store(bx, dst);
 +            }
 +            PassMode::Indirect { extra_attrs: Some(_), .. } => {
 +                OperandValue::Ref(next(), Some(next()), self.layout.align.abi).store(bx, dst);
 +            }
 +            PassMode::Direct(_) | PassMode::Indirect { extra_attrs: None, .. } | PassMode::Cast(_) => {
 +                let next_arg = next();
 +                self.store(bx, next_arg.to_rvalue(), dst);
 +            }
 +        }
 +    }
 +}
 +
 +fn int_type_width_signed<'gcc, 'tcx>(ty: Ty<'tcx>, cx: &CodegenCx<'gcc, 'tcx>) -> Option<(u64, bool)> {
 +    match ty.kind() {
 +        ty::Int(t) => Some((
 +            match t {
 +                rustc_middle::ty::IntTy::Isize => u64::from(cx.tcx.sess.target.pointer_width),
 +                rustc_middle::ty::IntTy::I8 => 8,
 +                rustc_middle::ty::IntTy::I16 => 16,
 +                rustc_middle::ty::IntTy::I32 => 32,
 +                rustc_middle::ty::IntTy::I64 => 64,
 +                rustc_middle::ty::IntTy::I128 => 128,
 +            },
 +            true,
 +        )),
 +        ty::Uint(t) => Some((
 +            match t {
 +                rustc_middle::ty::UintTy::Usize => u64::from(cx.tcx.sess.target.pointer_width),
 +                rustc_middle::ty::UintTy::U8 => 8,
 +                rustc_middle::ty::UintTy::U16 => 16,
 +                rustc_middle::ty::UintTy::U32 => 32,
 +                rustc_middle::ty::UintTy::U64 => 64,
 +                rustc_middle::ty::UintTy::U128 => 128,
 +            },
 +            false,
 +        )),
 +        _ => None,
 +    }
 +}
 +
 +impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
 +    fn bit_reverse(&mut self, width: u64, value: RValue<'gcc>) -> RValue<'gcc> {
 +        let typ = value.get_type();
 +        let context = &self.cx.context;
 +        match width {
 +            8 => {
 +                // First step.
 +                let left = self.and(value, context.new_rvalue_from_int(typ, 0xF0));
 +                let left = self.lshr(left, context.new_rvalue_from_int(typ, 4));
 +                let right = self.and(value, context.new_rvalue_from_int(typ, 0x0F));
 +                let right = self.shl(right, context.new_rvalue_from_int(typ, 4));
 +                let step1 = self.or(left, right);
 +
 +                // Second step.
 +                let left = self.and(step1, context.new_rvalue_from_int(typ, 0xCC));
 +                let left = self.lshr(left, context.new_rvalue_from_int(typ, 2));
 +                let right = self.and(step1, context.new_rvalue_from_int(typ, 0x33));
 +                let right = self.shl(right, context.new_rvalue_from_int(typ, 2));
 +                let step2 = self.or(left, right);
 +
 +                // Third step.
 +                let left = self.and(step2, context.new_rvalue_from_int(typ, 0xAA));
 +                let left = self.lshr(left, context.new_rvalue_from_int(typ, 1));
 +                let right = self.and(step2, context.new_rvalue_from_int(typ, 0x55));
 +                let right = self.shl(right, context.new_rvalue_from_int(typ, 1));
 +                let step3 = self.or(left, right);
 +
 +                step3
 +            },
 +            16 => {
 +                // First step.
 +                let left = self.and(value, context.new_rvalue_from_int(typ, 0x5555));
 +                let left = self.shl(left, context.new_rvalue_from_int(typ, 1));
 +                let right = self.and(value, context.new_rvalue_from_int(typ, 0xAAAA));
 +                let right = self.lshr(right, context.new_rvalue_from_int(typ, 1));
 +                let step1 = self.or(left, right);
 +
 +                // Second step.
 +                let left = self.and(step1, context.new_rvalue_from_int(typ, 0x3333));
 +                let left = self.shl(left, context.new_rvalue_from_int(typ, 2));
 +                let right = self.and(step1, context.new_rvalue_from_int(typ, 0xCCCC));
 +                let right = self.lshr(right, context.new_rvalue_from_int(typ, 2));
 +                let step2 = self.or(left, right);
 +
 +                // Third step.
 +                let left = self.and(step2, context.new_rvalue_from_int(typ, 0x0F0F));
 +                let left = self.shl(left, context.new_rvalue_from_int(typ, 4));
 +                let right = self.and(step2, context.new_rvalue_from_int(typ, 0xF0F0));
 +                let right = self.lshr(right, context.new_rvalue_from_int(typ, 4));
 +                let step3 = self.or(left, right);
 +
 +                // Fourth step.
 +                let left = self.and(step3, context.new_rvalue_from_int(typ, 0x00FF));
 +                let left = self.shl(left, context.new_rvalue_from_int(typ, 8));
 +                let right = self.and(step3, context.new_rvalue_from_int(typ, 0xFF00));
 +                let right = self.lshr(right, context.new_rvalue_from_int(typ, 8));
 +                let step4 = self.or(left, right);
 +
 +                step4
 +            },
 +            32 => {
 +                // TODO: Refactor with other implementations.
 +                // First step.
 +                let left = self.and(value, context.new_rvalue_from_long(typ, 0x55555555));
 +                let left = self.shl(left, context.new_rvalue_from_long(typ, 1));
 +                let right = self.and(value, context.new_rvalue_from_long(typ, 0xAAAAAAAA));
 +                let right = self.lshr(right, context.new_rvalue_from_long(typ, 1));
 +                let step1 = self.or(left, right);
 +
 +                // Second step.
 +                let left = self.and(step1, context.new_rvalue_from_long(typ, 0x33333333));
 +                let left = self.shl(left, context.new_rvalue_from_long(typ, 2));
 +                let right = self.and(step1, context.new_rvalue_from_long(typ, 0xCCCCCCCC));
 +                let right = self.lshr(right, context.new_rvalue_from_long(typ, 2));
 +                let step2 = self.or(left, right);
 +
 +                // Third step.
 +                let left = self.and(step2, context.new_rvalue_from_long(typ, 0x0F0F0F0F));
 +                let left = self.shl(left, context.new_rvalue_from_long(typ, 4));
 +                let right = self.and(step2, context.new_rvalue_from_long(typ, 0xF0F0F0F0));
 +                let right = self.lshr(right, context.new_rvalue_from_long(typ, 4));
 +                let step3 = self.or(left, right);
 +
 +                // Fourth step.
 +                let left = self.and(step3, context.new_rvalue_from_long(typ, 0x00FF00FF));
 +                let left = self.shl(left, context.new_rvalue_from_long(typ, 8));
 +                let right = self.and(step3, context.new_rvalue_from_long(typ, 0xFF00FF00));
 +                let right = self.lshr(right, context.new_rvalue_from_long(typ, 8));
 +                let step4 = self.or(left, right);
 +
 +                // Fifth step.
 +                let left = self.and(step4, context.new_rvalue_from_long(typ, 0x0000FFFF));
 +                let left = self.shl(left, context.new_rvalue_from_long(typ, 16));
 +                let right = self.and(step4, context.new_rvalue_from_long(typ, 0xFFFF0000));
 +                let right = self.lshr(right, context.new_rvalue_from_long(typ, 16));
 +                let step5 = self.or(left, right);
 +
 +                step5
 +            },
 +            64 => {
 +                // First step.
 +                let left = self.shl(value, context.new_rvalue_from_long(typ, 32));
 +                let right = self.lshr(value, context.new_rvalue_from_long(typ, 32));
 +                let step1 = self.or(left, right);
 +
 +                // Second step.
 +                let left = self.and(step1, context.new_rvalue_from_long(typ, 0x0001FFFF0001FFFF));
 +                let left = self.shl(left, context.new_rvalue_from_long(typ, 15));
 +                let right = self.and(step1, context.new_rvalue_from_long(typ, 0xFFFE0000FFFE0000u64 as i64)); // TODO: transmute the number instead?
 +                let right = self.lshr(right, context.new_rvalue_from_long(typ, 17));
 +                let step2 = self.or(left, right);
 +
 +                // Third step.
 +                let left = self.lshr(step2, context.new_rvalue_from_long(typ, 10));
 +                let left = self.xor(step2, left);
 +                let temp = self.and(left, context.new_rvalue_from_long(typ, 0x003F801F003F801F));
 +
 +                let left = self.shl(temp, context.new_rvalue_from_long(typ, 10));
 +                let left = self.or(temp, left);
 +                let step3 = self.xor(left, step2);
 +
 +                // Fourth step.
 +                let left = self.lshr(step3, context.new_rvalue_from_long(typ, 4));
 +                let left = self.xor(step3, left);
 +                let temp = self.and(left, context.new_rvalue_from_long(typ, 0x0E0384210E038421));
 +
 +                let left = self.shl(temp, context.new_rvalue_from_long(typ, 4));
 +                let left = self.or(temp, left);
 +                let step4 = self.xor(left, step3);
 +
 +                // Fifth step.
 +                let left = self.lshr(step4, context.new_rvalue_from_long(typ, 2));
 +                let left = self.xor(step4, left);
 +                let temp = self.and(left, context.new_rvalue_from_long(typ, 0x2248884222488842));
 +
 +                let left = self.shl(temp, context.new_rvalue_from_long(typ, 2));
 +                let left = self.or(temp, left);
 +                let step5 = self.xor(left, step4);
 +
 +                step5
 +            },
 +            128 => {
 +                // TODO: find a more efficient implementation?
 +                let sixty_four = self.context.new_rvalue_from_long(typ, 64);
 +                let high = self.context.new_cast(None, value >> sixty_four, self.u64_type);
 +                let low = self.context.new_cast(None, value, self.u64_type);
 +
 +                let reversed_high = self.bit_reverse(64, high);
 +                let reversed_low = self.bit_reverse(64, low);
 +
 +                let new_low = self.context.new_cast(None, reversed_high, typ);
 +                let new_high = self.context.new_cast(None, reversed_low, typ) << sixty_four;
 +
 +                new_low | new_high
 +            },
 +            _ => {
 +                panic!("cannot bit reverse with width = {}", width);
 +            },
 +        }
 +    }
 +
 +    fn count_leading_zeroes(&self, width: u64, arg: RValue<'gcc>) -> RValue<'gcc> {
 +        // TODO: use width?
 +        let arg_type = arg.get_type();
 +        let count_leading_zeroes =
 +            if arg_type.is_uint(&self.cx) {
 +                "__builtin_clz"
 +            }
 +            else if arg_type.is_ulong(&self.cx) {
 +                "__builtin_clzl"
 +            }
 +            else if arg_type.is_ulonglong(&self.cx) {
 +                "__builtin_clzll"
 +            }
 +            else if width == 128 {
 +                // Algorithm from: https://stackoverflow.com/a/28433850/389119
 +                let array_type = self.context.new_array_type(None, arg_type, 3);
 +                let result = self.current_func()
 +                    .new_local(None, array_type, "count_loading_zeroes_results");
 +
 +                let sixty_four = self.context.new_rvalue_from_long(arg_type, 64);
 +                let high = self.context.new_cast(None, arg >> sixty_four, self.u64_type);
 +                let low = self.context.new_cast(None, arg, self.u64_type);
 +
 +                let zero = self.context.new_rvalue_zero(self.usize_type);
 +                let one = self.context.new_rvalue_one(self.usize_type);
 +                let two = self.context.new_rvalue_from_long(self.usize_type, 2);
 +
 +                let clzll = self.context.get_builtin_function("__builtin_clzll");
 +
 +                let first_elem = self.context.new_array_access(None, result, zero);
 +                let first_value = self.context.new_cast(None, self.context.new_call(None, clzll, &[high]), arg_type);
 +                self.llbb()
 +                    .add_assignment(None, first_elem, first_value);
 +
 +                let second_elem = self.context.new_array_access(None, result, one);
 +                let second_value = self.context.new_cast(None, self.context.new_call(None, clzll, &[low]), arg_type) + sixty_four;
 +                self.llbb()
 +                    .add_assignment(None, second_elem, second_value);
 +
 +                let third_elem = self.context.new_array_access(None, result, two);
 +                let third_value = self.context.new_rvalue_from_long(arg_type, 128);
 +                self.llbb()
 +                    .add_assignment(None, third_elem, third_value);
 +
 +                let not_high = self.context.new_unary_op(None, UnaryOp::LogicalNegate, self.u64_type, high);
 +                let not_low = self.context.new_unary_op(None, UnaryOp::LogicalNegate, self.u64_type, low);
 +                let not_low_and_not_high = not_low & not_high;
 +                let index = not_high + not_low_and_not_high;
 +
 +                let res = self.context.new_array_access(None, result, index);
 +
 +                return self.context.new_cast(None, res, arg_type);
 +            }
 +            else {
 +                let count_leading_zeroes = self.context.get_builtin_function("__builtin_clz");
 +                let arg = self.context.new_cast(None, arg, self.uint_type);
 +                let diff = self.int_width(self.uint_type) - self.int_width(arg_type);
 +                let diff = self.context.new_rvalue_from_long(self.int_type, diff);
 +                let res = self.context.new_call(None, count_leading_zeroes, &[arg]) - diff;
 +                return self.context.new_cast(None, res, arg_type);
 +            };
 +        let count_leading_zeroes = self.context.get_builtin_function(count_leading_zeroes);
 +        let res = self.context.new_call(None, count_leading_zeroes, &[arg]);
 +        self.context.new_cast(None, res, arg_type)
 +    }
 +
 +    fn count_trailing_zeroes(&self, _width: u64, arg: RValue<'gcc>) -> RValue<'gcc> {
 +        let arg_type = arg.get_type();
 +        let (count_trailing_zeroes, expected_type) =
 +            if arg_type.is_uchar(&self.cx) || arg_type.is_ushort(&self.cx) || arg_type.is_uint(&self.cx) {
 +                // NOTE: we don't need to & 0xFF for uchar because the result is undefined on zero.
 +                ("__builtin_ctz", self.cx.uint_type)
 +            }
 +            else if arg_type.is_ulong(&self.cx) {
 +                ("__builtin_ctzl", self.cx.ulong_type)
 +            }
 +            else if arg_type.is_ulonglong(&self.cx) {
 +                ("__builtin_ctzll", self.cx.ulonglong_type)
 +            }
 +            else if arg_type.is_u128(&self.cx) {
 +                // Adapted from the algorithm to count leading zeroes from: https://stackoverflow.com/a/28433850/389119
 +                let array_type = self.context.new_array_type(None, arg_type, 3);
 +                let result = self.current_func()
 +                    .new_local(None, array_type, "count_loading_zeroes_results");
 +
 +                let sixty_four = self.context.new_rvalue_from_long(arg_type, 64);
 +                let high = self.context.new_cast(None, arg >> sixty_four, self.u64_type);
 +                let low = self.context.new_cast(None, arg, self.u64_type);
 +
 +                let zero = self.context.new_rvalue_zero(self.usize_type);
 +                let one = self.context.new_rvalue_one(self.usize_type);
 +                let two = self.context.new_rvalue_from_long(self.usize_type, 2);
 +
 +                let ctzll = self.context.get_builtin_function("__builtin_ctzll");
 +
 +                let first_elem = self.context.new_array_access(None, result, zero);
 +                let first_value = self.context.new_cast(None, self.context.new_call(None, ctzll, &[low]), arg_type);
 +                self.llbb()
 +                    .add_assignment(None, first_elem, first_value);
 +
 +                let second_elem = self.context.new_array_access(None, result, one);
 +                let second_value = self.context.new_cast(None, self.context.new_call(None, ctzll, &[high]), arg_type) + sixty_four;
 +                self.llbb()
 +                    .add_assignment(None, second_elem, second_value);
 +
 +                let third_elem = self.context.new_array_access(None, result, two);
 +                let third_value = self.context.new_rvalue_from_long(arg_type, 128);
 +                self.llbb()
 +                    .add_assignment(None, third_elem, third_value);
 +
 +                let not_low = self.context.new_unary_op(None, UnaryOp::LogicalNegate, self.u64_type, low);
 +                let not_high = self.context.new_unary_op(None, UnaryOp::LogicalNegate, self.u64_type, high);
 +                let not_low_and_not_high = not_low & not_high;
 +                let index = not_low + not_low_and_not_high;
 +
 +                let res = self.context.new_array_access(None, result, index);
 +
 +                return self.context.new_cast(None, res, arg_type);
 +            }
 +            else {
 +                unimplemented!("count_trailing_zeroes for {:?}", arg_type);
 +            };
 +        let count_trailing_zeroes = self.context.get_builtin_function(count_trailing_zeroes);
 +        let arg =
 +            if arg_type != expected_type {
 +                self.context.new_cast(None, arg, expected_type)
 +            }
 +            else {
 +                arg
 +            };
 +        let res = self.context.new_call(None, count_trailing_zeroes, &[arg]);
 +        self.context.new_cast(None, res, arg_type)
 +    }
 +
 +    fn int_width(&self, typ: Type<'gcc>) -> i64 {
 +        self.cx.int_width(typ) as i64
 +    }
 +
 +    fn pop_count(&self, value: RValue<'gcc>) -> RValue<'gcc> {
 +        // TODO: use the optimized version with fewer operations.
 +        let value_type = value.get_type();
 +
 +        if value_type.is_u128(&self.cx) {
 +            // TODO: implement in the normal algorithm below to have a more efficient
 +            // implementation (that does not require a call to __popcountdi2).
 +            let popcount = self.context.get_builtin_function("__builtin_popcountll");
 +            let sixty_four = self.context.new_rvalue_from_long(value_type, 64);
 +            let high = self.context.new_cast(None, value >> sixty_four, self.cx.ulonglong_type);
 +            let high = self.context.new_call(None, popcount, &[high]);
 +            let low = self.context.new_cast(None, value, self.cx.ulonglong_type);
 +            let low = self.context.new_call(None, popcount, &[low]);
 +            return high + low;
 +        }
 +
 +        // First step.
 +        let mask = self.context.new_rvalue_from_long(value_type, 0x5555555555555555);
 +        let left = value & mask;
 +        let shifted = value >> self.context.new_rvalue_from_int(value_type, 1);
 +        let right = shifted & mask;
 +        let value = left + right;
 +
 +        // Second step.
 +        let mask = self.context.new_rvalue_from_long(value_type, 0x3333333333333333);
 +        let left = value & mask;
 +        let shifted = value >> self.context.new_rvalue_from_int(value_type, 2);
 +        let right = shifted & mask;
 +        let value = left + right;
 +
 +        // Third step.
 +        let mask = self.context.new_rvalue_from_long(value_type, 0x0F0F0F0F0F0F0F0F);
 +        let left = value & mask;
 +        let shifted = value >> self.context.new_rvalue_from_int(value_type, 4);
 +        let right = shifted & mask;
 +        let value = left + right;
 +
 +        if value_type.is_u8(&self.cx) {
 +            return value;
 +        }
 +
 +        // Fourth step.
 +        let mask = self.context.new_rvalue_from_long(value_type, 0x00FF00FF00FF00FF);
 +        let left = value & mask;
 +        let shifted = value >> self.context.new_rvalue_from_int(value_type, 8);
 +        let right = shifted & mask;
 +        let value = left + right;
 +
 +        if value_type.is_u16(&self.cx) {
 +            return value;
 +        }
 +
 +        // Fifth step.
 +        let mask = self.context.new_rvalue_from_long(value_type, 0x0000FFFF0000FFFF);
 +        let left = value & mask;
 +        let shifted = value >> self.context.new_rvalue_from_int(value_type, 16);
 +        let right = shifted & mask;
 +        let value = left + right;
 +
 +        if value_type.is_u32(&self.cx) {
 +            return value;
 +        }
 +
 +        // Sixth step.
 +        let mask = self.context.new_rvalue_from_long(value_type, 0x00000000FFFFFFFF);
 +        let left = value & mask;
 +        let shifted = value >> self.context.new_rvalue_from_int(value_type, 32);
 +        let right = shifted & mask;
 +        let value = left + right;
 +
 +        value
 +    }
 +
 +    // Algorithm from: https://blog.regehr.org/archives/1063
 +    fn rotate_left(&mut self, value: RValue<'gcc>, shift: RValue<'gcc>, width: u64) -> RValue<'gcc> {
 +        let max = self.context.new_rvalue_from_long(shift.get_type(), width as i64);
 +        let shift = shift % max;
 +        let lhs = self.shl(value, shift);
 +        let result_and =
 +            self.and(
 +                self.context.new_unary_op(None, UnaryOp::Minus, shift.get_type(), shift),
 +                self.context.new_rvalue_from_long(shift.get_type(), width as i64 - 1),
 +            );
 +        let rhs = self.lshr(value, result_and);
 +        self.or(lhs, rhs)
 +    }
 +
 +    // Algorithm from: https://blog.regehr.org/archives/1063
 +    fn rotate_right(&mut self, value: RValue<'gcc>, shift: RValue<'gcc>, width: u64) -> RValue<'gcc> {
 +        let max = self.context.new_rvalue_from_long(shift.get_type(), width as i64);
 +        let shift = shift % max;
 +        let lhs = self.lshr(value, shift);
 +        let result_and =
 +            self.and(
 +                self.context.new_unary_op(None, UnaryOp::Minus, shift.get_type(), shift),
 +                self.context.new_rvalue_from_long(shift.get_type(), width as i64 - 1),
 +            );
 +        let rhs = self.shl(value, result_and);
 +        self.or(lhs, rhs)
 +    }
 +
 +    fn saturating_add(&mut self, lhs: RValue<'gcc>, rhs: RValue<'gcc>, signed: bool, width: u64) -> RValue<'gcc> {
 +        let func = self.current_func.borrow().expect("func");
 +
 +        if signed {
 +            // Algorithm from: https://stackoverflow.com/a/56531252/389119
 +            let after_block = func.new_block("after");
 +            let func_name =
 +                match width {
 +                    8 => "__builtin_add_overflow",
 +                    16 => "__builtin_add_overflow",
 +                    32 => "__builtin_sadd_overflow",
 +                    64 => "__builtin_saddll_overflow",
 +                    128 => "__builtin_add_overflow",
 +                    _ => unreachable!(),
 +                };
 +            let overflow_func = self.context.get_builtin_function(func_name);
 +            let result_type = lhs.get_type();
 +            let res = func.new_local(None, result_type, "saturating_sum");
 +            let overflow = self.overflow_call(overflow_func, &[lhs, rhs, res.get_address(None)], None);
 +
 +            let then_block = func.new_block("then");
 +
 +            let unsigned_type = self.context.new_int_type(width as i32 / 8, false);
 +            let shifted = self.context.new_cast(None, lhs, unsigned_type) >> self.context.new_rvalue_from_int(unsigned_type, width as i32 - 1);
 +            let uint_max = self.context.new_unary_op(None, UnaryOp::BitwiseNegate, unsigned_type,
 +                self.context.new_rvalue_from_int(unsigned_type, 0)
 +            );
 +            let int_max = uint_max >> self.context.new_rvalue_one(unsigned_type);
 +            then_block.add_assignment(None, res, self.context.new_cast(None, shifted + int_max, result_type));
 +            then_block.end_with_jump(None, after_block);
 +
 +            self.block.expect("block").end_with_conditional(None, overflow, then_block, after_block);
 +
 +            // NOTE: since jumps were added in a place rustc does not
 +            // expect, the current blocks in the state need to be updated.
 +            *self.current_block.borrow_mut() = Some(after_block);
 +            self.block = Some(after_block);
 +
 +            res.to_rvalue()
 +        }
 +        else {
 +            // Algorithm from: http://locklessinc.com/articles/sat_arithmetic/
 +            let res = lhs + rhs;
 +            let res_type = res.get_type();
 +            let cond = self.context.new_comparison(None, ComparisonOp::LessThan, res, lhs);
 +            let value = self.context.new_unary_op(None, UnaryOp::Minus, res_type, self.context.new_cast(None, cond, res_type));
 +            res | value
 +        }
 +    }
 +
 +    // Algorithm from: https://locklessinc.com/articles/sat_arithmetic/
 +    fn saturating_sub(&mut self, lhs: RValue<'gcc>, rhs: RValue<'gcc>, signed: bool, width: u64) -> RValue<'gcc> {
 +        if signed {
 +            // Also based on algorithm from: https://stackoverflow.com/a/56531252/389119
 +            let func_name =
 +                match width {
 +                    8 => "__builtin_sub_overflow",
 +                    16 => "__builtin_sub_overflow",
 +                    32 => "__builtin_ssub_overflow",
 +                    64 => "__builtin_ssubll_overflow",
 +                    128 => "__builtin_sub_overflow",
 +                    _ => unreachable!(),
 +                };
 +            let overflow_func = self.context.get_builtin_function(func_name);
 +            let result_type = lhs.get_type();
 +            let func = self.current_func.borrow().expect("func");
 +            let res = func.new_local(None, result_type, "saturating_diff");
 +            let overflow = self.overflow_call(overflow_func, &[lhs, rhs, res.get_address(None)], None);
 +
 +            let then_block = func.new_block("then");
 +            let after_block = func.new_block("after");
 +
 +            let unsigned_type = self.context.new_int_type(width as i32 / 8, false);
 +            let shifted = self.context.new_cast(None, lhs, unsigned_type) >> self.context.new_rvalue_from_int(unsigned_type, width as i32 - 1);
 +            let uint_max = self.context.new_unary_op(None, UnaryOp::BitwiseNegate, unsigned_type,
 +                self.context.new_rvalue_from_int(unsigned_type, 0)
 +            );
 +            let int_max = uint_max >> self.context.new_rvalue_one(unsigned_type);
 +            then_block.add_assignment(None, res, self.context.new_cast(None, shifted + int_max, result_type));
 +            then_block.end_with_jump(None, after_block);
 +
 +            self.block.expect("block").end_with_conditional(None, overflow, then_block, after_block);
 +
 +            // NOTE: since jumps were added in a place rustc does not
 +            // expect, the current blocks in the state need to be updated.
 +            *self.current_block.borrow_mut() = Some(after_block);
 +            self.block = Some(after_block);
 +
 +            res.to_rvalue()
 +        }
 +        else {
 +            let res = lhs - rhs;
 +            let comparison = self.context.new_comparison(None, ComparisonOp::LessThanEquals, res, lhs);
 +            let comparison = self.context.new_cast(None, comparison, lhs.get_type());
 +            let unary_op = self.context.new_unary_op(None, UnaryOp::Minus, comparison.get_type(), comparison);
 +            self.and(res, unary_op)
 +        }
 +    }
 +}
 +
 +fn try_intrinsic<'gcc, 'tcx>(bx: &mut Builder<'_, 'gcc, 'tcx>, try_func: RValue<'gcc>, data: RValue<'gcc>, _catch_func: RValue<'gcc>, dest: RValue<'gcc>) {
 +    if bx.sess().panic_strategy() == PanicStrategy::Abort {
++        bx.call(bx.type_void(), try_func, &[data], None);
 +        // Return 0 unconditionally from the intrinsic call;
 +        // we can never unwind.
 +        let ret_align = bx.tcx.data_layout.i32_align.abi;
 +        bx.store(bx.const_i32(0), dest, ret_align);
 +    }
 +    else if wants_msvc_seh(bx.sess()) {
 +        unimplemented!();
 +        //codegen_msvc_try(bx, try_func, data, catch_func, dest);
 +    }
 +    else {
 +        unimplemented!();
 +        //codegen_gnu_try(bx, try_func, data, catch_func, dest);
 +    }
 +}
 +
 +// MSVC's definition of the `rust_try` function.
 +//
 +// This implementation uses the new exception handling instructions in LLVM
 +// which have support in LLVM for SEH on MSVC targets. Although these
 +// instructions are meant to work for all targets, as of the time of this
 +// writing, however, LLVM does not recommend the usage of these new instructions
 +// as the old ones are still more optimized.
 +/*fn codegen_msvc_try<'a, 'gcc, 'tcx>(_bx: &mut Builder<'a, 'gcc, 'tcx>, _try_func: RValue<'gcc>, _data: RValue<'gcc>, _catch_func: RValue<'gcc>, _dest: RValue<'gcc>) {
 +    unimplemented!();
 +    /*let llfn = get_rust_try_fn(bx, &mut |mut bx| {
 +        bx.set_personality_fn(bx.eh_personality());
 +        bx.sideeffect();
 +
 +        let mut normal = bx.build_sibling_block("normal");
 +        let mut catchswitch = bx.build_sibling_block("catchswitch");
 +        let mut catchpad = bx.build_sibling_block("catchpad");
 +        let mut caught = bx.build_sibling_block("caught");
 +
 +        let try_func = llvm::get_param(bx.llfn(), 0);
 +        let data = llvm::get_param(bx.llfn(), 1);
 +        let catch_func = llvm::get_param(bx.llfn(), 2);
 +
 +        // We're generating an IR snippet that looks like:
 +        //
 +        //   declare i32 @rust_try(%try_func, %data, %catch_func) {
 +        //      %slot = alloca u8*
 +        //      invoke %try_func(%data) to label %normal unwind label %catchswitch
 +        //
 +        //   normal:
 +        //      ret i32 0
 +        //
 +        //   catchswitch:
 +        //      %cs = catchswitch within none [%catchpad] unwind to caller
 +        //
 +        //   catchpad:
 +        //      %tok = catchpad within %cs [%type_descriptor, 0, %slot]
 +        //      %ptr = load %slot
 +        //      call %catch_func(%data, %ptr)
 +        //      catchret from %tok to label %caught
 +        //
 +        //   caught:
 +        //      ret i32 1
 +        //   }
 +        //
 +        // This structure follows the basic usage of throw/try/catch in LLVM.
 +        // For example, compile this C++ snippet to see what LLVM generates:
 +        //
 +        //      #include <stdint.h>
 +        //
 +        //      struct rust_panic {
 +        //          rust_panic(const rust_panic&);
 +        //          ~rust_panic();
 +        //
 +        //          uint64_t x[2];
 +        //      };
 +        //
 +        //      int __rust_try(
 +        //          void (*try_func)(void*),
 +        //          void *data,
 +        //          void (*catch_func)(void*, void*) noexcept
 +        //      ) {
 +        //          try {
 +        //              try_func(data);
 +        //              return 0;
 +        //          } catch(rust_panic& a) {
 +        //              catch_func(data, &a);
 +        //              return 1;
 +        //          }
 +        //      }
 +        //
 +        // More information can be found in libstd's seh.rs implementation.
 +        let ptr_align = bx.tcx().data_layout.pointer_align.abi;
 +        let slot = bx.alloca(bx.type_i8p(), ptr_align);
 +        bx.invoke(try_func, &[data], normal.llbb(), catchswitch.llbb(), None);
 +
 +        normal.ret(bx.const_i32(0));
 +
 +        let cs = catchswitch.catch_switch(None, None, 1);
 +        catchswitch.add_handler(cs, catchpad.llbb());
 +
 +        // We can't use the TypeDescriptor defined in libpanic_unwind because it
 +        // might be in another DLL and the SEH encoding only supports specifying
 +        // a TypeDescriptor from the current module.
 +        //
 +        // However this isn't an issue since the MSVC runtime uses string
 +        // comparison on the type name to match TypeDescriptors rather than
 +        // pointer equality.
 +        //
 +        // So instead we generate a new TypeDescriptor in each module that uses
 +        // `try` and let the linker merge duplicate definitions in the same
 +        // module.
 +        //
 +        // When modifying, make sure that the type_name string exactly matches
 +        // the one used in src/libpanic_unwind/seh.rs.
 +        let type_info_vtable = bx.declare_global("??_7type_info@@6B@", bx.type_i8p());
 +        let type_name = bx.const_bytes(b"rust_panic\0");
 +        let type_info =
 +            bx.const_struct(&[type_info_vtable, bx.const_null(bx.type_i8p()), type_name], false);
 +        let tydesc = bx.declare_global("__rust_panic_type_info", bx.val_ty(type_info));
 +        unsafe {
 +            llvm::LLVMRustSetLinkage(tydesc, llvm::Linkage::LinkOnceODRLinkage);
 +            llvm::SetUniqueComdat(bx.llmod, tydesc);
 +            llvm::LLVMSetInitializer(tydesc, type_info);
 +        }
 +
 +        // The flag value of 8 indicates that we are catching the exception by
 +        // reference instead of by value. We can't use catch by value because
 +        // that requires copying the exception object, which we don't support
 +        // since our exception object effectively contains a Box.
 +        //
 +        // Source: MicrosoftCXXABI::getAddrOfCXXCatchHandlerType in clang
 +        let flags = bx.const_i32(8);
 +        let funclet = catchpad.catch_pad(cs, &[tydesc, flags, slot]);
 +        let ptr = catchpad.load(slot, ptr_align);
 +        catchpad.call(catch_func, &[data, ptr], Some(&funclet));
 +
 +        catchpad.catch_ret(&funclet, caught.llbb());
 +
 +        caught.ret(bx.const_i32(1));
 +    });
 +
 +    // Note that no invoke is used here because by definition this function
 +    // can't panic (that's what it's catching).
 +    let ret = bx.call(llfn, &[try_func, data, catch_func], None);
 +    let i32_align = bx.tcx().data_layout.i32_align.abi;
 +    bx.store(ret, dest, i32_align);*/
 +}*/
 +
 +// Definition of the standard `try` function for Rust using the GNU-like model
 +// of exceptions (e.g., the normal semantics of LLVM's `landingpad` and `invoke`
 +// instructions).
 +//
 +// This codegen is a little surprising because we always call a shim
 +// function instead of inlining the call to `invoke` manually here. This is done
 +// because in LLVM we're only allowed to have one personality per function
 +// definition. The call to the `try` intrinsic is being inlined into the
 +// function calling it, and that function may already have other personality
 +// functions in play. By calling a shim we're guaranteed that our shim will have
 +// the right personality function.
 +/*fn codegen_gnu_try<'a, 'gcc, 'tcx>(_bx: &mut Builder<'a, 'gcc, 'tcx>, _try_func: RValue<'gcc>, _data: RValue<'gcc>, _catch_func: RValue<'gcc>, _dest: RValue<'gcc>) {
 +    unimplemented!();
 +    /*let llfn = get_rust_try_fn(bx, &mut |mut bx| {
 +        // Codegens the shims described above:
 +        //
 +        //   bx:
 +        //      invoke %try_func(%data) normal %normal unwind %catch
 +        //
 +        //   normal:
 +        //      ret 0
 +        //
 +        //   catch:
 +        //      (%ptr, _) = landingpad
 +        //      call %catch_func(%data, %ptr)
 +        //      ret 1
 +
 +        bx.sideeffect();
 +
 +        let mut then = bx.build_sibling_block("then");
 +        let mut catch = bx.build_sibling_block("catch");
 +
 +        let try_func = llvm::get_param(bx.llfn(), 0);
 +        let data = llvm::get_param(bx.llfn(), 1);
 +        let catch_func = llvm::get_param(bx.llfn(), 2);
 +        bx.invoke(try_func, &[data], then.llbb(), catch.llbb(), None);
 +        then.ret(bx.const_i32(0));
 +
 +        // Type indicator for the exception being thrown.
 +        //
 +        // The first value in this tuple is a pointer to the exception object
 +        // being thrown.  The second value is a "selector" indicating which of
 +        // the landing pad clauses the exception's type had been matched to.
 +        // rust_try ignores the selector.
 +        let lpad_ty = bx.type_struct(&[bx.type_i8p(), bx.type_i32()], false);
 +        let vals = catch.landing_pad(lpad_ty, bx.eh_personality(), 1);
 +        let tydesc = match bx.tcx().lang_items().eh_catch_typeinfo() {
 +            Some(tydesc) => {
 +                let tydesc = bx.get_static(tydesc);
 +                bx.bitcast(tydesc, bx.type_i8p())
 +            }
 +            None => bx.const_null(bx.type_i8p()),
 +        };
 +        catch.add_clause(vals, tydesc);
 +        let ptr = catch.extract_value(vals, 0);
 +        catch.call(catch_func, &[data, ptr], None);
 +        catch.ret(bx.const_i32(1));
 +    });
 +
 +    // Note that no invoke is used here because by definition this function
 +    // can't panic (that's what it's catching).
 +    let ret = bx.call(llfn, &[try_func, data, catch_func], None);
 +    let i32_align = bx.tcx().data_layout.i32_align.abi;
 +    bx.store(ret, dest, i32_align);*/
 +}*/
index 90f7d9d9bbace8c7f6e0b5f91626c85934422180,0000000000000000000000000000000000000000..2ef90bca5ac20d7db69d6581ee1ff72e589874d1
mode 100644,000000..100644
--- /dev/null
@@@ -1,355 -1,0 +1,360 @@@
 +use std::convert::TryInto;
 +
 +use gccjit::{RValue, Struct, Type};
 +use rustc_codegen_ssa::traits::{BaseTypeMethods, DerivedTypeMethods};
 +use rustc_codegen_ssa::common::TypeKind;
 +use rustc_middle::bug;
 +use rustc_middle::ty::layout::TyAndLayout;
 +use rustc_target::abi::{AddressSpace, Align, Integer, Size};
 +
 +use crate::common::TypeReflection;
 +use crate::context::CodegenCx;
 +use crate::type_of::LayoutGccExt;
 +
 +impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
 +    pub fn type_ix(&self, num_bits: u64) -> Type<'gcc> {
 +        // gcc only supports 1, 2, 4 or 8-byte integers.
 +        let bytes = (num_bits / 8).next_power_of_two() as i32;
 +        match bytes {
 +            1 => self.i8_type,
 +            2 => self.i16_type,
 +            4 => self.i32_type,
 +            8 => self.i64_type,
 +            16 => self.i128_type,
 +            _ => panic!("unexpected num_bits: {}", num_bits),
 +        }
 +        /*
 +        let bytes = (num_bits / 8).next_power_of_two() as i32;
 +        println!("num_bits: {}, bytes: {}", num_bits, bytes);
 +        self.context.new_int_type(bytes, true) // TODO: check if it is indeed a signed integer.
 +        */
 +    }
 +
 +    /*pub fn type_bool(&self) -> Type<'gcc> {
 +        self.bool_type
 +    }*/
 +
 +    pub fn type_void(&self) -> Type<'gcc> {
 +        self.context.new_type::<()>()
 +    }
 +
 +    pub fn type_size_t(&self) -> Type<'gcc> {
 +        self.context.new_type::<usize>()
 +    }
 +
 +    pub fn type_u8(&self) -> Type<'gcc> {
 +        self.u8_type
 +    }
 +
 +    pub fn type_u16(&self) -> Type<'gcc> {
 +        self.u16_type
 +    }
 +
 +    pub fn type_u32(&self) -> Type<'gcc> {
 +        self.u32_type
 +    }
 +
 +    pub fn type_u64(&self) -> Type<'gcc> {
 +        self.u64_type
 +    }
 +
 +    pub fn type_u128(&self) -> Type<'gcc> {
 +        self.u128_type
 +    }
 +
 +    pub fn type_pointee_for_align(&self, align: Align) -> Type<'gcc> {
 +        // FIXME(eddyb) We could find a better approximation if ity.align < align.
 +        let ity = Integer::approximate_align(self, align);
 +        self.type_from_integer(ity)
 +    }
 +
 +    /*pub fn type_int_from_ty(&self, t: ty::IntTy) -> Type<'gcc> {
 +        match t {
 +            ty::IntTy::Isize => self.type_isize(),
 +            ty::IntTy::I8 => self.type_i8(),
 +            ty::IntTy::I16 => self.type_i16(),
 +            ty::IntTy::I32 => self.type_i32(),
 +            ty::IntTy::I64 => self.type_i64(),
 +            ty::IntTy::I128 => self.type_i128(),
 +        }
 +    }
 +
 +    pub fn type_uint_from_ty(&self, t: ty::UintTy) -> Type<'gcc> {
 +        match t {
 +            ty::UintTy::Usize => self.type_isize(),
 +            ty::UintTy::U8 => self.type_i8(),
 +            ty::UintTy::U16 => self.type_i16(),
 +            ty::UintTy::U32 => self.type_i32(),
 +            ty::UintTy::U64 => self.type_i64(),
 +            ty::UintTy::U128 => self.type_i128(),
 +        }
 +    }
 +
 +    pub fn type_float_from_ty(&self, t: ty::FloatTy) -> Type<'gcc> {
 +        match t {
 +            ty::FloatTy::F32 => self.type_f32(),
 +            ty::FloatTy::F64 => self.type_f64(),
 +        }
 +    }
 +
 +    pub fn type_vector(&self, ty: Type<'gcc>, len: u64) -> Type<'gcc> {
 +        self.context.new_vector_type(ty, len)
 +    }*/
 +}
 +
 +impl<'gcc, 'tcx> BaseTypeMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
 +    fn type_i1(&self) -> Type<'gcc> {
 +        self.bool_type
 +    }
 +
 +    fn type_i8(&self) -> Type<'gcc> {
 +        self.i8_type
 +    }
 +
 +    fn type_i16(&self) -> Type<'gcc> {
 +        self.i16_type
 +    }
 +
 +    fn type_i32(&self) -> Type<'gcc> {
 +        self.i32_type
 +    }
 +
 +    fn type_i64(&self) -> Type<'gcc> {
 +        self.i64_type
 +    }
 +
 +    fn type_i128(&self) -> Type<'gcc> {
 +        self.i128_type
 +    }
 +
 +    fn type_isize(&self) -> Type<'gcc> {
 +        self.isize_type
 +    }
 +
 +    fn type_f32(&self) -> Type<'gcc> {
 +        self.context.new_type::<f32>()
 +    }
 +
 +    fn type_f64(&self) -> Type<'gcc> {
 +        self.context.new_type::<f64>()
 +    }
 +
 +    fn type_func(&self, params: &[Type<'gcc>], return_type: Type<'gcc>) -> Type<'gcc> {
 +        self.context.new_function_pointer_type(None, return_type, params, false)
 +    }
 +
 +    fn type_struct(&self, fields: &[Type<'gcc>], _packed: bool) -> Type<'gcc> {
 +        let types = fields.to_vec();
 +        if let Some(typ) = self.struct_types.borrow().get(fields) {
 +            return typ.clone();
 +        }
 +        let fields: Vec<_> = fields.iter().enumerate()
 +            .map(|(index, field)| self.context.new_field(None, *field, &format!("field{}_TODO", index)))
 +            .collect();
 +        // TODO: use packed.
 +        //let name = types.iter().map(|typ| format!("{:?}", typ)).collect::<Vec<_>>().join("_");
 +        //let typ = self.context.new_struct_type(None, format!("struct{}", name), &fields).as_type();
 +        let typ = self.context.new_struct_type(None, "struct", &fields).as_type();
 +        self.struct_types.borrow_mut().insert(types, typ);
 +        typ
 +    }
 +
 +    fn type_kind(&self, typ: Type<'gcc>) -> TypeKind {
 +        if typ.is_integral() {
 +            TypeKind::Integer
 +        }
 +        else if typ.is_vector().is_some() {
 +            TypeKind::Vector
 +        }
 +        else {
 +            // TODO
 +            TypeKind::Void
 +        }
 +    }
 +
 +    fn type_ptr_to(&self, ty: Type<'gcc>) -> Type<'gcc> {
 +        // TODO
 +        /*assert_ne!(self.type_kind(ty), TypeKind::Function,
 +            "don't call ptr_to on function types, use ptr_to_gcc_type on FnAbi instead"
 +        );*/
 +        ty.make_pointer()
 +    }
 +
 +    fn type_ptr_to_ext(&self, ty: Type<'gcc>, _address_space: AddressSpace) -> Type<'gcc> {
 +        // TODO: use address_space
 +        ty.make_pointer()
 +    }
 +
 +    fn element_type(&self, ty: Type<'gcc>) -> Type<'gcc> {
 +        if let Some(typ) = ty.is_array() {
 +            typ
 +        }
 +        else if let Some(vector_type) = ty.is_vector() {
 +            vector_type.get_element_type()
 +        }
 +        else if let Some(typ) = ty.get_pointee() {
 +            typ
 +        }
 +        else {
 +            unreachable!()
 +        }
 +    }
 +
 +    fn vector_length(&self, _ty: Type<'gcc>) -> usize {
 +        unimplemented!();
 +        //unsafe { llvm::LLVMGetVectorSize(ty) as usize }
 +    }
 +
 +    fn float_width(&self, typ: Type<'gcc>) -> usize {
 +        let f32 = self.context.new_type::<f32>();
 +        let f64 = self.context.new_type::<f64>();
 +        if typ == f32 {
 +            32
 +        }
 +        else if typ == f64 {
 +            64
 +        }
 +        else {
 +            panic!("Cannot get width of float type {:?}", typ);
 +        }
 +        // TODO: support other sizes.
 +        /*match self.type_kind(ty) {
 +            TypeKind::Float => 32,
 +            TypeKind::Double => 64,
 +            TypeKind::X86_FP80 => 80,
 +            TypeKind::FP128 | TypeKind::PPC_FP128 => 128,
 +            _ => bug!("llvm_float_width called on a non-float type"),
 +        }*/
 +    }
 +
 +    fn int_width(&self, typ: Type<'gcc>) -> u64 {
 +        if typ.is_i8(self) || typ.is_u8(self) {
 +            8
 +        }
 +        else if typ.is_i16(self) || typ.is_u16(self) {
 +            16
 +        }
 +        else if typ.is_i32(self) || typ.is_u32(self) {
 +            32
 +        }
 +        else if typ.is_i64(self) || typ.is_u64(self) {
 +            64
 +        }
 +        else if typ.is_i128(self) || typ.is_u128(self) {
 +            128
 +        }
 +        else {
 +            panic!("Cannot get width of int type {:?}", typ);
 +        }
 +    }
 +
 +    fn val_ty(&self, value: RValue<'gcc>) -> Type<'gcc> {
 +        value.get_type()
 +    }
 +}
 +
 +impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
 +    pub fn type_padding_filler(&self, size: Size, align: Align) -> Type<'gcc> {
 +        let unit = Integer::approximate_align(self, align);
 +        let size = size.bytes();
 +        let unit_size = unit.size().bytes();
 +        assert_eq!(size % unit_size, 0);
 +        self.type_array(self.type_from_integer(unit), size / unit_size)
 +    }
 +
 +    pub fn set_struct_body(&self, typ: Struct<'gcc>, fields: &[Type<'gcc>], _packed: bool) {
 +        // TODO: use packed.
 +        let fields: Vec<_> = fields.iter().enumerate()
 +            .map(|(index, field)| self.context.new_field(None, *field, &format!("field_{}", index)))
 +            .collect();
 +        typ.set_fields(None, &fields);
 +    }
 +
 +    /*fn type_struct(&self, fields: &[Type<'gcc>], packed: bool) -> Type<'gcc> {
 +        // TODO: use packed.
 +        let fields: Vec<_> = fields.iter().enumerate()
 +            .map(|(index, field)| self.context.new_field(None, *field, &format!("field_{}", index)))
 +            .collect();
 +        return self.context.new_struct_type(None, "unnamedStruct", &fields).as_type();
 +    }*/
 +
 +    pub fn type_named_struct(&self, name: &str) -> Struct<'gcc> {
 +        self.context.new_opaque_struct_type(None, name)
 +    }
 +
 +    pub fn type_array(&self, ty: Type<'gcc>, mut len: u64) -> Type<'gcc> {
 +        if let Some(struct_type) = ty.is_struct() {
 +            if struct_type.get_field_count() == 0 {
 +                // NOTE: since gccjit only supports i32 for the array size and libcore's tests uses a
 +                // size of usize::MAX in test_binary_search, we workaround this by setting the size to
 +                // zero for ZSTs.
 +                // FIXME: fix gccjit API.
 +                len = 0;
 +            }
 +        }
 +
++        // NOTE: see note above. Some other test uses usize::MAX.
++        if len == u64::MAX {
++            len = 0;
++        }
++
 +        let len: i32 = len.try_into().expect("array len");
 +
 +        self.context.new_array_type(None, ty, len)
 +    }
 +}
 +
 +pub fn struct_fields<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, layout: TyAndLayout<'tcx>) -> (Vec<Type<'gcc>>, bool) {
 +    //debug!("struct_fields: {:#?}", layout);
 +    let field_count = layout.fields.count();
 +
 +    let mut packed = false;
 +    let mut offset = Size::ZERO;
 +    let mut prev_effective_align = layout.align.abi;
 +    let mut result: Vec<_> = Vec::with_capacity(1 + field_count * 2);
 +    for i in layout.fields.index_by_increasing_offset() {
 +        let target_offset = layout.fields.offset(i as usize);
 +        let field = layout.field(cx, i);
 +        let effective_field_align =
 +            layout.align.abi.min(field.align.abi).restrict_for_offset(target_offset);
 +        packed |= effective_field_align < field.align.abi;
 +
 +        /*debug!(
 +            "struct_fields: {}: {:?} offset: {:?} target_offset: {:?} \
 +                effective_field_align: {}",
 +            i,
 +            field,
 +            offset,
 +            target_offset,
 +            effective_field_align.bytes()
 +        );*/
 +        assert!(target_offset >= offset);
 +        let padding = target_offset - offset;
 +        let padding_align = prev_effective_align.min(effective_field_align);
 +        assert_eq!(offset.align_to(padding_align) + padding, target_offset);
 +        result.push(cx.type_padding_filler(padding, padding_align));
 +        //debug!("    padding before: {:?}", padding);
 +
 +        result.push(field.gcc_type(cx, !field.ty.is_any_ptr())); // FIXME: might need to check if the type is inside another, like Box<Type>.
 +        offset = target_offset + field.size;
 +        prev_effective_align = effective_field_align;
 +    }
 +    if !layout.is_unsized() && field_count > 0 {
 +        if offset > layout.size {
 +            bug!("layout: {:#?} stride: {:?} offset: {:?}", layout, layout.size, offset);
 +        }
 +        let padding = layout.size - offset;
 +        let padding_align = prev_effective_align;
 +        assert_eq!(offset.align_to(padding_align) + padding, layout.size);
 +        /*debug!(
 +            "struct_fields: pad_bytes: {:?} offset: {:?} stride: {:?}",
 +            padding, offset, layout.size
 +        );*/
 +        result.push(cx.type_padding_filler(padding, padding_align));
 +        assert_eq!(result.len(), 1 + field_count * 2);
 +    } else {
 +        //debug!("struct_fields: offset: {:?} stride: {:?}", offset, layout.size);
 +    }
 +
 +    (result, packed)
 +}
index c5db0a1b2e4599a62d98ea20d990ada0ba5062b5,0000000000000000000000000000000000000000..010805808d2a34cc49cc92ad668e04b17d0f0190
mode 100644,000000..100644
--- /dev/null
@@@ -1,366 -1,0 +1,372 @@@
 +use std::fmt::Write;
 +
 +use gccjit::{Struct, Type};
 +use crate::rustc_codegen_ssa::traits::{BaseTypeMethods, DerivedTypeMethods, LayoutTypeMethods};
 +use rustc_middle::bug;
 +use rustc_middle::ty::{self, Ty, TypeFoldable};
 +use rustc_middle::ty::layout::{FnAbiExt, TyAndLayout};
 +use rustc_middle::ty::print::with_no_trimmed_paths;
 +use rustc_target::abi::{self, Abi, F32, F64, FieldsShape, Int, Integer, LayoutOf, Pointer, PointeeInfo, Size, TyAndLayoutMethods, Variants};
 +use rustc_target::abi::call::{CastTarget, FnAbi, Reg};
 +
 +use crate::abi::{FnAbiGccExt, GccType};
 +use crate::context::CodegenCx;
 +use crate::type_::struct_fields;
 +
 +impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
 +    fn type_from_unsigned_integer(&self, i: Integer) -> Type<'gcc> {
 +        use Integer::*;
 +        match i {
 +            I8 => self.type_u8(),
 +            I16 => self.type_u16(),
 +            I32 => self.type_u32(),
 +            I64 => self.type_u64(),
 +            I128 => self.type_u128(),
 +        }
 +    }
 +}
 +
 +pub fn uncached_gcc_type<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, layout: TyAndLayout<'tcx>, defer: &mut Option<(Struct<'gcc>, TyAndLayout<'tcx>)>) -> Type<'gcc> {
 +    match layout.abi {
 +        Abi::Scalar(_) => bug!("handled elsewhere"),
 +        Abi::Vector { ref element, count } => {
 +            let element = layout.scalar_gcc_type_at(cx, element, Size::ZERO);
 +            return cx.context.new_vector_type(element, count);
 +        },
 +        Abi::ScalarPair(..) => {
 +            return cx.type_struct(
 +                &[
 +                    layout.scalar_pair_element_gcc_type(cx, 0, false),
 +                    layout.scalar_pair_element_gcc_type(cx, 1, false),
 +                ],
 +                false,
 +            );
 +        }
 +        Abi::Uninhabited | Abi::Aggregate { .. } => {}
 +    }
 +
 +    let name = match layout.ty.kind() {
 +        // FIXME(eddyb) producing readable type names for trait objects can result
 +        // in problematically distinct types due to HRTB and subtyping (see #47638).
 +        // ty::Dynamic(..) |
 +        ty::Adt(..) | ty::Closure(..) | ty::Foreign(..) | ty::Generator(..) | ty::Str
 +            if !cx.sess().fewer_names() =>
 +        {
 +            let mut name = with_no_trimmed_paths(|| layout.ty.to_string());
 +            if let (&ty::Adt(def, _), &Variants::Single { index }) =
 +                (layout.ty.kind(), &layout.variants)
 +            {
 +                if def.is_enum() && !def.variants.is_empty() {
 +                    write!(&mut name, "::{}", def.variants[index].ident).unwrap();
 +                }
 +            }
 +            if let (&ty::Generator(_, _, _), &Variants::Single { index }) =
 +                (layout.ty.kind(), &layout.variants)
 +            {
 +                write!(&mut name, "::{}", ty::GeneratorSubsts::variant_name(index)).unwrap();
 +            }
 +            Some(name)
 +        }
 +        ty::Adt(..) => {
 +            // If `Some` is returned then a named struct is created in LLVM. Name collisions are
 +            // avoided by LLVM (with increasing suffixes). If rustc doesn't generate names then that
 +            // can improve perf.
 +            // FIXME: I don't think that's true for libgccjit.
 +            Some(String::new())
 +        }
 +        _ => None,
 +    };
 +
 +    match layout.fields {
 +        FieldsShape::Primitive | FieldsShape::Union(_) => {
 +            let fill = cx.type_padding_filler(layout.size, layout.align.abi);
 +            let packed = false;
 +            match name {
 +                None => cx.type_struct(&[fill], packed),
 +                Some(ref name) => {
 +                    let gcc_type = cx.type_named_struct(name);
 +                    cx.set_struct_body(gcc_type, &[fill], packed);
 +                    gcc_type.as_type()
 +                },
 +            }
 +        }
 +        FieldsShape::Array { count, .. } => cx.type_array(layout.field(cx, 0).gcc_type(cx, true), count),
 +        FieldsShape::Arbitrary { .. } =>
 +            match name {
 +                None => {
 +                    let (gcc_fields, packed) = struct_fields(cx, layout);
 +                    cx.type_struct(&gcc_fields, packed)
 +                },
 +                Some(ref name) => {
 +                    let gcc_type = cx.type_named_struct(name);
 +                    *defer = Some((gcc_type, layout));
 +                    gcc_type.as_type()
 +                },
 +            },
 +    }
 +}
 +
 +pub trait LayoutGccExt<'tcx> {
 +    fn is_gcc_immediate(&self) -> bool;
 +    fn is_gcc_scalar_pair(&self) -> bool;
 +    fn gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, set_fields: bool) -> Type<'gcc>;
 +    fn immediate_gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc>;
 +    fn scalar_gcc_type_at<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, scalar: &abi::Scalar, offset: Size) -> Type<'gcc>;
 +    fn scalar_pair_element_gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, index: usize, immediate: bool) -> Type<'gcc>;
 +    fn gcc_field_index(&self, index: usize) -> u64;
 +    fn pointee_info_at<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, offset: Size) -> Option<PointeeInfo>;
 +}
 +
 +impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> {
 +    fn is_gcc_immediate(&self) -> bool {
 +        match self.abi {
 +            Abi::Scalar(_) | Abi::Vector { .. } => true,
 +            Abi::ScalarPair(..) => false,
 +            Abi::Uninhabited | Abi::Aggregate { .. } => self.is_zst(),
 +        }
 +    }
 +
 +    fn is_gcc_scalar_pair(&self) -> bool {
 +        match self.abi {
 +            Abi::ScalarPair(..) => true,
 +            Abi::Uninhabited | Abi::Scalar(_) | Abi::Vector { .. } | Abi::Aggregate { .. } => false,
 +        }
 +    }
 +
 +    /// Gets the GCC type corresponding to a Rust type, i.e., `rustc_middle::ty::Ty`.
 +    /// The pointee type of the pointer in `PlaceRef` is always this type.
 +    /// For sized types, it is also the right LLVM type for an `alloca`
 +    /// containing a value of that type, and most immediates (except `bool`).
 +    /// Unsized types, however, are represented by a "minimal unit", e.g.
 +    /// `[T]` becomes `T`, while `str` and `Trait` turn into `i8` - this
 +    /// is useful for indexing slices, as `&[T]`'s data pointer is `T*`.
 +    /// If the type is an unsized struct, the regular layout is generated,
 +    /// with the inner-most trailing unsized field using the "minimal unit"
 +    /// of that field's type - this is useful for taking the address of
 +    /// that field and ensuring the struct has the right alignment.
 +    fn gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, set_fields: bool) -> Type<'gcc> {
 +        if let Abi::Scalar(ref scalar) = self.abi {
 +            // Use a different cache for scalars because pointers to DSTs
 +            // can be either fat or thin (data pointers of fat pointers).
 +            if let Some(&ty) = cx.scalar_types.borrow().get(&self.ty) {
 +                return ty;
 +            }
 +            let ty =
 +                match *self.ty.kind() {
 +                    ty::Ref(_, ty, _) | ty::RawPtr(ty::TypeAndMut { ty, .. }) => {
 +                        cx.type_ptr_to(cx.layout_of(ty).gcc_type(cx, set_fields))
 +                    }
 +                    ty::Adt(def, _) if def.is_box() => {
 +                        cx.type_ptr_to(cx.layout_of(self.ty.boxed_ty()).gcc_type(cx, true))
 +                    }
 +                    ty::FnPtr(sig) => cx.fn_ptr_backend_type(&FnAbi::of_fn_ptr(cx, sig, &[])),
 +                    _ => self.scalar_gcc_type_at(cx, scalar, Size::ZERO),
 +                };
 +            cx.scalar_types.borrow_mut().insert(self.ty, ty);
 +            return ty;
 +        }
 +
 +        // Check the cache.
 +        let variant_index =
 +            match self.variants {
 +                Variants::Single { index } => Some(index),
 +                _ => None,
 +            };
 +        let cached_type = cx.types.borrow().get(&(self.ty, variant_index)).cloned();
 +        if let Some(ty) = cached_type {
 +            let type_to_set_fields = cx.types_with_fields_to_set.borrow_mut().remove(&ty);
 +            if let Some((struct_type, layout)) = type_to_set_fields {
 +                // Since we might be trying to generate a type containing another type which is not
 +                // completely generated yet, we deferred setting the fields until now.
 +                let (fields, packed) = struct_fields(cx, layout);
 +                cx.set_struct_body(struct_type, &fields, packed);
 +            }
 +            return ty;
 +        }
 +
 +        //debug!("gcc_type({:#?})", self);
 +
 +        assert!(!self.ty.has_escaping_bound_vars(), "{:?} has escaping bound vars", self.ty);
 +
 +        // Make sure lifetimes are erased, to avoid generating distinct LLVM
 +        // types for Rust types that only differ in the choice of lifetimes.
 +        let normal_ty = cx.tcx.erase_regions(self.ty);
 +
 +        let mut defer = None;
 +        let ty =
 +            if self.ty != normal_ty {
 +                let mut layout = cx.layout_of(normal_ty);
 +                if let Some(v) = variant_index {
 +                    layout = layout.for_variant(cx, v);
 +                }
 +                layout.gcc_type(cx, true)
 +            }
 +            else {
 +                uncached_gcc_type(cx, *self, &mut defer)
 +            };
 +        //debug!("--> mapped {:#?} to ty={:?}", self, ty);
 +
 +        cx.types.borrow_mut().insert((self.ty, variant_index), ty);
 +
 +        if let Some((ty, layout)) = defer {
 +            //TODO: do we still need this conditions and the set_fields parameter?
 +            //if set_fields {
 +                let (fields, packed) = struct_fields(cx, layout);
 +                cx.set_struct_body(ty, &fields, packed);
 +            /*}
 +            else {
 +                // Since we might be trying to generate a type containing another type which is not
 +                // completely generated yet, we don't set the fields right now, but we save the
 +                // type to set the fields later.
 +                cx.types_with_fields_to_set.borrow_mut().insert(ty.as_type(), (ty, layout));
 +            }*/
 +        }
 +
 +        ty
 +    }
 +
 +    fn immediate_gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc> {
 +        if let Abi::Scalar(ref scalar) = self.abi {
 +            if scalar.is_bool() {
 +                return cx.type_i1();
 +            }
 +        }
 +        self.gcc_type(cx, true)
 +    }
 +
 +    fn scalar_gcc_type_at<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, scalar: &abi::Scalar, offset: Size) -> Type<'gcc> {
 +        match scalar.value {
 +            Int(i, true) => cx.type_from_integer(i),
 +            Int(i, false) => cx.type_from_unsigned_integer(i),
 +            F32 => cx.type_f32(),
 +            F64 => cx.type_f64(),
 +            Pointer => {
 +                // If we know the alignment, pick something better than i8.
 +                let pointee =
 +                    if let Some(pointee) = self.pointee_info_at(cx, offset) {
 +                        cx.type_pointee_for_align(pointee.align)
 +                    }
 +                    else {
 +                        cx.type_i8()
 +                    };
 +                cx.type_ptr_to(pointee)
 +            }
 +        }
 +    }
 +
 +    fn scalar_pair_element_gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, index: usize, immediate: bool) -> Type<'gcc> {
 +        // TODO: remove llvm hack:
 +        // HACK(eddyb) special-case fat pointers until LLVM removes
 +        // pointee types, to avoid bitcasting every `OperandRef::deref`.
 +        match self.ty.kind() {
 +            ty::Ref(..) | ty::RawPtr(_) => {
 +                return self.field(cx, index).gcc_type(cx, true);
 +            }
 +            ty::Adt(def, _) if def.is_box() => {
 +                let ptr_ty = cx.tcx.mk_mut_ptr(self.ty.boxed_ty());
 +                return cx.layout_of(ptr_ty).scalar_pair_element_gcc_type(cx, index, immediate);
 +            }
 +            _ => {}
 +        }
 +
 +        let (a, b) = match self.abi {
 +            Abi::ScalarPair(ref a, ref b) => (a, b),
 +            _ => bug!("TyAndLayout::scalar_pair_element_llty({:?}): not applicable", self),
 +        };
 +        let scalar = [a, b][index];
 +
 +        // Make sure to return the same type `immediate_gcc_type` would when
 +        // dealing with an immediate pair.  This means that `(bool, bool)` is
 +        // effectively represented as `{i8, i8}` in memory and two `i1`s as an
 +        // immediate, just like `bool` is typically `i8` in memory and only `i1`
 +        // when immediate.  We need to load/store `bool` as `i8` to avoid
 +        // crippling LLVM optimizations or triggering other LLVM bugs with `i1`.
 +        // TODO: this bugs certainly don't happen in this case since the bool type is used instead of i1.
 +        if /*immediate &&*/ scalar.is_bool() {
 +            return cx.type_i1();
 +        }
 +
 +        let offset =
 +            if index == 0 {
 +                Size::ZERO
 +            }
 +            else {
 +                a.value.size(cx).align_to(b.value.align(cx).abi)
 +            };
 +        self.scalar_gcc_type_at(cx, scalar, offset)
 +    }
 +
 +    fn gcc_field_index(&self, index: usize) -> u64 {
 +        match self.abi {
 +            Abi::Scalar(_) | Abi::ScalarPair(..) => {
 +                bug!("TyAndLayout::gcc_field_index({:?}): not applicable", self)
 +            }
 +            _ => {}
 +        }
 +        match self.fields {
 +            FieldsShape::Primitive | FieldsShape::Union(_) => {
 +                bug!("TyAndLayout::gcc_field_index({:?}): not applicable", self)
 +            }
 +
 +            FieldsShape::Array { .. } => index as u64,
 +
 +            FieldsShape::Arbitrary { .. } => 1 + (self.fields.memory_index(index) as u64) * 2,
 +        }
 +    }
 +
 +    fn pointee_info_at<'a>(&self, cx: &CodegenCx<'a, 'tcx>, offset: Size) -> Option<PointeeInfo> {
 +        if let Some(&pointee) = cx.pointee_infos.borrow().get(&(self.ty, offset)) {
 +            return pointee;
 +        }
 +
 +        let result = Ty::pointee_info_at(*self, cx, offset);
 +
 +        cx.pointee_infos.borrow_mut().insert((self.ty, offset), result);
 +        result
 +    }
 +}
 +
 +impl<'gcc, 'tcx> LayoutTypeMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
 +    fn backend_type(&self, layout: TyAndLayout<'tcx>) -> Type<'gcc> {
 +        layout.gcc_type(self, true)
 +    }
 +
 +    fn immediate_backend_type(&self, layout: TyAndLayout<'tcx>) -> Type<'gcc> {
 +        layout.immediate_gcc_type(self)
 +    }
 +
 +    fn is_backend_immediate(&self, layout: TyAndLayout<'tcx>) -> bool {
 +        layout.is_gcc_immediate()
 +    }
 +
 +    fn is_backend_scalar_pair(&self, layout: TyAndLayout<'tcx>) -> bool {
 +        layout.is_gcc_scalar_pair()
 +    }
 +
 +    fn backend_field_index(&self, layout: TyAndLayout<'tcx>, index: usize) -> u64 {
 +        layout.gcc_field_index(index)
 +    }
 +
 +    fn scalar_pair_element_backend_type(&self, layout: TyAndLayout<'tcx>, index: usize, immediate: bool) -> Type<'gcc> {
 +        layout.scalar_pair_element_gcc_type(self, index, immediate)
 +    }
 +
 +    fn cast_backend_type(&self, ty: &CastTarget) -> Type<'gcc> {
 +        ty.gcc_type(self)
 +    }
 +
 +    fn fn_ptr_backend_type(&self, fn_abi: &FnAbi<'tcx, Ty<'tcx>>) -> Type<'gcc> {
 +        fn_abi.ptr_to_gcc_type(self)
 +    }
 +
 +    fn reg_backend_type(&self, _ty: &Reg) -> Type<'gcc> {
 +        unimplemented!();
 +        //ty.gcc_type(self)
 +    }
++
++    fn fn_decl_backend_type(&self, _fn_abi: &FnAbi<'tcx, Ty<'tcx>>) -> Type<'gcc> {
++        // FIXME: return correct type.
++        self.type_void()
++        //fn_abi.gcc_type(self)
++    }
 +}