]> git.lizzy.rs Git - rust.git/blobdiff - src/intrinsics/mod.rs
Fully remove the intrinsic_match macro
[rust.git] / src / intrinsics / mod.rs
index 86698460747494f7a95711fe58bfada5da0a34e0..49c0fa3434081c34eec3150857cc69b829fe5049 100644 (file)
@@ -1,6 +1,17 @@
 //! Codegen of intrinsics. This includes `extern "rust-intrinsic"`, `extern "platform-intrinsic"`
 //! and LLVM intrinsics that have symbol names starting with `llvm.`.
 
+macro_rules! intrinsic_args {
+    ($fx:expr, $args:expr => ($($arg:tt),*); $intrinsic:expr) => {
+        #[allow(unused_parens)]
+        let ($($arg),*) = if let [$($arg),*] = $args {
+            ($(codegen_operand($fx, $arg)),*)
+        } else {
+            bug!("wrong number of args for intrinsic {}", $intrinsic);
+        };
+    }
+}
+
 mod cpuid;
 mod llvm;
 mod simd;
 pub(crate) use llvm::codegen_llvm_intrinsic_call;
 
 use rustc_middle::ty::print::with_no_trimmed_paths;
-use rustc_span::symbol::{kw, sym};
+use rustc_middle::ty::subst::SubstsRef;
+use rustc_span::symbol::{kw, sym, Symbol};
 
 use crate::prelude::*;
 use cranelift_codegen::ir::AtomicRmwOp;
 
-macro intrinsic_pat {
-    (_) => {
-        _
-    },
-    ($name:ident) => {
-        sym::$name
-    },
-    (kw.$name:ident) => {
-        kw::$name
-    },
-    ($name:literal) => {
-        $name
-    },
-}
-
-macro intrinsic_arg {
-    (o $fx:expr, $arg:ident) => {
-        $arg
-    },
-    (c $fx:expr, $arg:ident) => {
-        codegen_operand($fx, $arg)
-    },
-    (v $fx:expr, $arg:ident) => {
-        codegen_operand($fx, $arg).load_scalar($fx)
-    }
-}
-
-macro intrinsic_substs {
-    ($substs:expr, $index:expr,) => {},
-    ($substs:expr, $index:expr, $first:ident $(,$rest:ident)*) => {
-        let $first = $substs.type_at($index);
-        intrinsic_substs!($substs, $index+1, $($rest),*);
-    }
-}
-
-macro intrinsic_match {
-    ($fx:expr, $intrinsic:expr, $substs:expr, $args:expr,
-    _ => $unknown:block;
-    $(
-        $($($name:tt).*)|+ $(if $cond:expr)?, $(<$($subst:ident),*>)? ($($a:ident $arg:ident),*) $content:block;
-    )*) => {
-        let _ = $substs; // Silence warning when substs is unused.
-        match $intrinsic {
-            $(
-                $(intrinsic_pat!($($name).*))|* $(if $cond)? => {
-                    #[allow(unused_parens, non_snake_case)]
-                    {
-                        $(
-                            intrinsic_substs!($substs, 0, $($subst),*);
-                        )?
-                        if let [$($arg),*] = $args {
-                            let ($($arg,)*) = (
-                                $(intrinsic_arg!($a $fx, $arg),)*
-                            );
-                            #[warn(unused_parens, non_snake_case)]
-                            {
-                                $content
-                            }
-                        } else {
-                            bug!("wrong number of args for intrinsic {:?}", $intrinsic);
-                        }
-                    }
-                }
-            )*
-            _ => $unknown,
-        }
-    }
-}
-
-macro call_intrinsic_match {
-    ($fx:expr, $intrinsic:expr, $substs:expr, $ret:expr, $destination:expr, $args:expr, $(
-        $name:ident($($arg:ident),*) -> $ty:ident => $func:ident,
-    )*) => {
-        match $intrinsic {
-            $(
-                sym::$name => {
-                    assert!($substs.is_noop());
-                    if let [$(ref $arg),*] = *$args {
-                        let ($($arg,)*) = (
-                            $(codegen_operand($fx, $arg),)*
-                        );
-                        let res = $fx.easy_call(stringify!($func), &[$($arg),*], $fx.tcx.types.$ty);
-                        $ret.write_cvalue($fx, res);
-
-                        if let Some((_, dest)) = $destination {
-                            let ret_block = $fx.get_block(dest);
-                            $fx.bcx.ins().jump(ret_block, &[]);
-                            return;
-                        } else {
-                            unreachable!();
-                        }
-                    } else {
-                        bug!("wrong number of args for intrinsic {:?}", $intrinsic);
-                    }
-                }
-            )*
-            _ => {}
-        }
-    }
-}
-
-macro validate_atomic_type($fx:ident, $intrinsic:ident, $span:ident, $ty:expr) {
-    match $ty.kind() {
-        ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
-        _ => {
-            $fx.tcx.sess.span_err(
-                $span,
-                &format!(
-                    "`{}` intrinsic: expected basic integer or raw pointer type, found `{:?}`",
-                    $intrinsic, $ty
-                ),
-            );
-            // Prevent verifier error
-            crate::trap::trap_unreachable($fx, "compilation should not have succeeded");
-            return;
-        }
-    }
-}
-
-macro validate_simd_type($fx:ident, $intrinsic:ident, $span:ident, $ty:expr) {
-    if !$ty.is_simd() {
-        $fx.tcx.sess.span_err($span, &format!("invalid monomorphization of `{}` intrinsic: expected SIMD input type, found non-SIMD `{}`", $intrinsic, $ty));
-        // Prevent verifier error
-        crate::trap::trap_unreachable($fx, "compilation should not have succeeded");
-        return;
-    }
+fn report_atomic_type_validation_error<'tcx>(
+    fx: &mut FunctionCx<'_, '_, 'tcx>,
+    intrinsic: Symbol,
+    span: Span,
+    ty: Ty<'tcx>,
+) {
+    fx.tcx.sess.span_err(
+        span,
+        &format!(
+            "`{}` intrinsic: expected basic integer or raw pointer type, found `{:?}`",
+            intrinsic, ty
+        ),
+    );
+    // Prevent verifier error
+    crate::trap::trap_unreachable(fx, "compilation should not have succeeded");
 }
 
 pub(crate) fn clif_vector_type<'tcx>(tcx: TyCtxt<'tcx>, layout: TyAndLayout<'tcx>) -> Option<Type> {
-    let (element, count) = match &layout.abi {
-        Abi::Vector { element, count } => (element.clone(), *count),
+    let (element, count) = match layout.abi {
+        Abi::Vector { element, count } => (elementcount),
         _ => unreachable!(),
     };
 
@@ -159,12 +60,7 @@ fn simd_for_each_lane<'tcx>(
     fx: &mut FunctionCx<'_, '_, 'tcx>,
     val: CValue<'tcx>,
     ret: CPlace<'tcx>,
-    f: impl Fn(
-        &mut FunctionCx<'_, '_, 'tcx>,
-        TyAndLayout<'tcx>,
-        TyAndLayout<'tcx>,
-        Value,
-    ) -> CValue<'tcx>,
+    f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, Ty<'tcx>, Ty<'tcx>, Value) -> Value,
 ) {
     let layout = val.layout();
 
@@ -177,7 +73,8 @@ fn simd_for_each_lane<'tcx>(
     for lane_idx in 0..lane_count {
         let lane = val.value_lane(fx, lane_idx).load_scalar(fx);
 
-        let res_lane = f(fx, lane_layout, ret_lane_layout, lane);
+        let res_lane = f(fx, lane_layout.ty, ret_lane_layout.ty, lane);
+        let res_lane = CValue::by_val(res_lane, ret_lane_layout);
 
         ret.place_lane(fx, lane_idx).write_cvalue(fx, res_lane);
     }
@@ -188,13 +85,7 @@ fn simd_pair_for_each_lane<'tcx>(
     x: CValue<'tcx>,
     y: CValue<'tcx>,
     ret: CPlace<'tcx>,
-    f: impl Fn(
-        &mut FunctionCx<'_, '_, 'tcx>,
-        TyAndLayout<'tcx>,
-        TyAndLayout<'tcx>,
-        Value,
-        Value,
-    ) -> CValue<'tcx>,
+    f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, Ty<'tcx>, Ty<'tcx>, Value, Value) -> Value,
 ) {
     assert_eq!(x.layout(), y.layout());
     let layout = x.layout();
@@ -209,7 +100,8 @@ fn simd_pair_for_each_lane<'tcx>(
         let x_lane = x.value_lane(fx, lane_idx).load_scalar(fx);
         let y_lane = y.value_lane(fx, lane_idx).load_scalar(fx);
 
-        let res_lane = f(fx, lane_layout, ret_lane_layout, x_lane, y_lane);
+        let res_lane = f(fx, lane_layout.ty, ret_lane_layout.ty, x_lane, y_lane);
+        let res_lane = CValue::by_val(res_lane, ret_lane_layout);
 
         ret.place_lane(fx, lane_idx).write_cvalue(fx, res_lane);
     }
@@ -220,7 +112,7 @@ fn simd_reduce<'tcx>(
     val: CValue<'tcx>,
     acc: Option<Value>,
     ret: CPlace<'tcx>,
-    f: impl Fn(&mut FunctionCx<'_, '_, 'tcx>, TyAndLayout<'tcx>, Value, Value) -> Value,
+    f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, Ty<'tcx>, Value, Value) -> Value,
 ) {
     let (lane_count, lane_ty) = val.layout().ty.simd_size_and_type(fx.tcx);
     let lane_layout = fx.layout_of(lane_ty);
@@ -230,7 +122,7 @@ fn simd_reduce<'tcx>(
         if let Some(acc) = acc { (acc, 0) } else { (val.value_lane(fx, 0).load_scalar(fx), 1) };
     for lane_idx in start_lane..lane_count {
         let lane = val.value_lane(fx, lane_idx).load_scalar(fx);
-        res_val = f(fx, lane_layout, res_val, lane);
+        res_val = f(fx, lane_layout.ty, res_val, lane);
     }
     let res = CValue::by_val(res_val, lane_layout);
     ret.write_cvalue(fx, res);
@@ -241,7 +133,7 @@ fn simd_reduce_bool<'tcx>(
     fx: &mut FunctionCx<'_, '_, 'tcx>,
     val: CValue<'tcx>,
     ret: CPlace<'tcx>,
-    f: impl Fn(&mut FunctionCx<'_, '_, 'tcx>, Value, Value) -> Value,
+    f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, Value, Value) -> Value,
 ) {
     let (lane_count, _lane_ty) = val.layout().ty.simd_size_and_type(fx.tcx);
     assert!(ret.layout().ty.is_bool());
@@ -264,10 +156,10 @@ fn simd_reduce_bool<'tcx>(
 
 fn bool_to_zero_or_max_uint<'tcx>(
     fx: &mut FunctionCx<'_, '_, 'tcx>,
-    layout: TyAndLayout<'tcx>,
+    ty: Ty<'tcx>,
     val: Value,
-) -> CValue<'tcx> {
-    let ty = fx.clif_type(layout.ty).unwrap();
+) -> Value {
+    let ty = fx.clif_type(ty).unwrap();
 
     let int_ty = match ty {
         types::F32 => types::I32,
@@ -282,289 +174,265 @@ fn bool_to_zero_or_max_uint<'tcx>(
         res = fx.bcx.ins().bitcast(ty, res);
     }
 
-    CValue::by_val(res, layout)
-}
-
-macro simd_cmp {
-    ($fx:expr, $cc:ident|$cc_f:ident($x:ident, $y:ident) -> $ret:ident) => {
-        let vector_ty = clif_vector_type($fx.tcx, $x.layout());
-
-        if let Some(vector_ty) = vector_ty {
-            let x = $x.load_scalar($fx);
-            let y = $y.load_scalar($fx);
-            let val = if vector_ty.lane_type().is_float() {
-                $fx.bcx.ins().fcmp(FloatCC::$cc_f, x, y)
-            } else {
-                $fx.bcx.ins().icmp(IntCC::$cc, x, y)
-            };
-
-            // HACK This depends on the fact that icmp for vectors represents bools as 0 and !0, not 0 and 1.
-            let val = $fx.bcx.ins().raw_bitcast(vector_ty, val);
-
-            $ret.write_cvalue($fx, CValue::by_val(val, $ret.layout()));
-        } else {
-            simd_pair_for_each_lane(
-                $fx,
-                $x,
-                $y,
-                $ret,
-                |fx, lane_layout, res_lane_layout, x_lane, y_lane| {
-                    let res_lane = match lane_layout.ty.kind() {
-                        ty::Uint(_) | ty::Int(_) => fx.bcx.ins().icmp(IntCC::$cc, x_lane, y_lane),
-                        ty::Float(_) => fx.bcx.ins().fcmp(FloatCC::$cc_f, x_lane, y_lane),
-                        _ => unreachable!("{:?}", lane_layout.ty),
-                    };
-                    bool_to_zero_or_max_uint(fx, res_lane_layout, res_lane)
-                },
-            );
-        }
-    },
-    ($fx:expr, $cc_u:ident|$cc_s:ident|$cc_f:ident($x:ident, $y:ident) -> $ret:ident) => {
-        // FIXME use vector icmp when possible
-        simd_pair_for_each_lane(
-            $fx,
-            $x,
-            $y,
-            $ret,
-            |fx, lane_layout, res_lane_layout, x_lane, y_lane| {
-                let res_lane = match lane_layout.ty.kind() {
-                    ty::Uint(_) => fx.bcx.ins().icmp(IntCC::$cc_u, x_lane, y_lane),
-                    ty::Int(_) => fx.bcx.ins().icmp(IntCC::$cc_s, x_lane, y_lane),
-                    ty::Float(_) => fx.bcx.ins().fcmp(FloatCC::$cc_f, x_lane, y_lane),
-                    _ => unreachable!("{:?}", lane_layout.ty),
-                };
-                bool_to_zero_or_max_uint(fx, res_lane_layout, res_lane)
-            },
-        );
-    },
-}
-
-macro simd_int_binop {
-    ($fx:expr, $op:ident($x:ident, $y:ident) -> $ret:ident) => {
-        simd_int_binop!($fx, $op|$op($x, $y) -> $ret);
-    },
-    ($fx:expr, $op_u:ident|$op_s:ident($x:ident, $y:ident) -> $ret:ident) => {
-        simd_pair_for_each_lane(
-            $fx,
-            $x,
-            $y,
-            $ret,
-            |fx, lane_layout, ret_lane_layout, x_lane, y_lane| {
-                let res_lane = match lane_layout.ty.kind() {
-                    ty::Uint(_) => fx.bcx.ins().$op_u(x_lane, y_lane),
-                    ty::Int(_) => fx.bcx.ins().$op_s(x_lane, y_lane),
-                    _ => unreachable!("{:?}", lane_layout.ty),
-                };
-                CValue::by_val(res_lane, ret_lane_layout)
-            },
-        );
-    },
-}
-
-macro simd_int_flt_binop {
-    ($fx:expr, $op:ident|$op_f:ident($x:ident, $y:ident) -> $ret:ident) => {
-        simd_int_flt_binop!($fx, $op|$op|$op_f($x, $y) -> $ret);
-    },
-    ($fx:expr, $op_u:ident|$op_s:ident|$op_f:ident($x:ident, $y:ident) -> $ret:ident) => {
-        simd_pair_for_each_lane(
-            $fx,
-            $x,
-            $y,
-            $ret,
-            |fx, lane_layout, ret_lane_layout, x_lane, y_lane| {
-                let res_lane = match lane_layout.ty.kind() {
-                    ty::Uint(_) => fx.bcx.ins().$op_u(x_lane, y_lane),
-                    ty::Int(_) => fx.bcx.ins().$op_s(x_lane, y_lane),
-                    ty::Float(_) => fx.bcx.ins().$op_f(x_lane, y_lane),
-                    _ => unreachable!("{:?}", lane_layout.ty),
-                };
-                CValue::by_val(res_lane, ret_lane_layout)
-            },
-        );
-    },
-}
-
-macro simd_flt_binop($fx:expr, $op:ident($x:ident, $y:ident) -> $ret:ident) {
-    simd_pair_for_each_lane(
-        $fx,
-        $x,
-        $y,
-        $ret,
-        |fx, lane_layout, ret_lane_layout, x_lane, y_lane| {
-            let res_lane = match lane_layout.ty.kind() {
-                ty::Float(_) => fx.bcx.ins().$op(x_lane, y_lane),
-                _ => unreachable!("{:?}", lane_layout.ty),
-            };
-            CValue::by_val(res_lane, ret_lane_layout)
-        },
-    );
+    res
 }
 
 pub(crate) fn codegen_intrinsic_call<'tcx>(
     fx: &mut FunctionCx<'_, '_, 'tcx>,
     instance: Instance<'tcx>,
     args: &[mir::Operand<'tcx>],
-    destination: Option<(CPlace<'tcx>, BasicBlock)>,
-    span: Span,
+    destination: CPlace<'tcx>,
+    target: Option<BasicBlock>,
+    source_info: mir::SourceInfo,
 ) {
-    let def_id = instance.def_id();
+    let intrinsic = fx.tcx.item_name(instance.def_id());
     let substs = instance.substs;
 
-    let intrinsic = fx.tcx.item_name(def_id);
-
-    let ret = match destination {
-        Some((place, _)) => place,
-        None => {
-            // Insert non returning intrinsics here
-            match intrinsic {
-                sym::abort => {
-                    trap_abort(fx, "Called intrinsic::abort.");
-                }
-                sym::transmute => {
-                    crate::base::codegen_panic(fx, "Transmuting to uninhabited type.", span);
-                }
-                _ => unimplemented!("unsupported instrinsic {}", intrinsic),
+    let target = if let Some(target) = target {
+        target
+    } else {
+        // Insert non returning intrinsics here
+        match intrinsic {
+            sym::abort => {
+                fx.bcx.ins().trap(TrapCode::User(0));
+            }
+            sym::transmute => {
+                crate::base::codegen_panic(fx, "Transmuting to uninhabited type.", source_info);
             }
-            return;
+            _ => unimplemented!("unsupported instrinsic {}", intrinsic),
         }
+        return;
     };
 
     if intrinsic.as_str().starts_with("simd_") {
-        self::simd::codegen_simd_intrinsic_call(fx, instance, args, ret, span);
-        let ret_block = fx.get_block(destination.expect("SIMD intrinsics don't diverge").1);
+        self::simd::codegen_simd_intrinsic_call(
+            fx,
+            intrinsic,
+            substs,
+            args,
+            destination,
+            source_info.span,
+        );
+        let ret_block = fx.get_block(target);
         fx.bcx.ins().jump(ret_block, &[]);
-        return;
+    } else if codegen_float_intrinsic_call(fx, intrinsic, args, destination) {
+        let ret_block = fx.get_block(target);
+        fx.bcx.ins().jump(ret_block, &[]);
+    } else {
+        codegen_regular_intrinsic_call(
+            fx,
+            instance,
+            intrinsic,
+            substs,
+            args,
+            destination,
+            Some(target),
+            source_info,
+        );
     }
+}
 
-    let usize_layout = fx.layout_of(fx.tcx.types.usize);
+fn codegen_float_intrinsic_call<'tcx>(
+    fx: &mut FunctionCx<'_, '_, 'tcx>,
+    intrinsic: Symbol,
+    args: &[mir::Operand<'tcx>],
+    ret: CPlace<'tcx>,
+) -> bool {
+    let (name, arg_count, ty) = match intrinsic {
+        sym::expf32 => ("expf", 1, fx.tcx.types.f32),
+        sym::expf64 => ("exp", 1, fx.tcx.types.f64),
+        sym::exp2f32 => ("exp2f", 1, fx.tcx.types.f32),
+        sym::exp2f64 => ("exp2", 1, fx.tcx.types.f64),
+        sym::sqrtf32 => ("sqrtf", 1, fx.tcx.types.f32),
+        sym::sqrtf64 => ("sqrt", 1, fx.tcx.types.f64),
+        sym::powif32 => ("__powisf2", 2, fx.tcx.types.f32), // compiler-builtins
+        sym::powif64 => ("__powidf2", 2, fx.tcx.types.f64), // compiler-builtins
+        sym::powf32 => ("powf", 2, fx.tcx.types.f32),
+        sym::powf64 => ("pow", 2, fx.tcx.types.f64),
+        sym::logf32 => ("logf", 1, fx.tcx.types.f32),
+        sym::logf64 => ("log", 1, fx.tcx.types.f64),
+        sym::log2f32 => ("log2f", 1, fx.tcx.types.f32),
+        sym::log2f64 => ("log2", 1, fx.tcx.types.f64),
+        sym::log10f32 => ("log10f", 1, fx.tcx.types.f32),
+        sym::log10f64 => ("log10", 1, fx.tcx.types.f64),
+        sym::fabsf32 => ("fabsf", 1, fx.tcx.types.f32),
+        sym::fabsf64 => ("fabs", 1, fx.tcx.types.f64),
+        sym::fmaf32 => ("fmaf", 3, fx.tcx.types.f32),
+        sym::fmaf64 => ("fma", 3, fx.tcx.types.f64),
+        sym::copysignf32 => ("copysignf", 2, fx.tcx.types.f32),
+        sym::copysignf64 => ("copysign", 2, fx.tcx.types.f64),
+        sym::floorf32 => ("floorf", 1, fx.tcx.types.f32),
+        sym::floorf64 => ("floor", 1, fx.tcx.types.f64),
+        sym::ceilf32 => ("ceilf", 1, fx.tcx.types.f32),
+        sym::ceilf64 => ("ceil", 1, fx.tcx.types.f64),
+        sym::truncf32 => ("truncf", 1, fx.tcx.types.f32),
+        sym::truncf64 => ("trunc", 1, fx.tcx.types.f64),
+        sym::roundf32 => ("roundf", 1, fx.tcx.types.f32),
+        sym::roundf64 => ("round", 1, fx.tcx.types.f64),
+        sym::sinf32 => ("sinf", 1, fx.tcx.types.f32),
+        sym::sinf64 => ("sin", 1, fx.tcx.types.f64),
+        sym::cosf32 => ("cosf", 1, fx.tcx.types.f32),
+        sym::cosf64 => ("cos", 1, fx.tcx.types.f64),
+        _ => return false,
+    };
 
-    call_intrinsic_match! {
-        fx, intrinsic, substs, ret, destination, args,
-        expf32(flt) -> f32 => expf,
-        expf64(flt) -> f64 => exp,
-        exp2f32(flt) -> f32 => exp2f,
-        exp2f64(flt) -> f64 => exp2,
-        sqrtf32(flt) -> f32 => sqrtf,
-        sqrtf64(flt) -> f64 => sqrt,
-        powif32(a, x) -> f32 => __powisf2, // compiler-builtins
-        powif64(a, x) -> f64 => __powidf2, // compiler-builtins
-        powf32(a, x) -> f32 => powf,
-        powf64(a, x) -> f64 => pow,
-        logf32(flt) -> f32 => logf,
-        logf64(flt) -> f64 => log,
-        log2f32(flt) -> f32 => log2f,
-        log2f64(flt) -> f64 => log2,
-        log10f32(flt) -> f32 => log10f,
-        log10f64(flt) -> f64 => log10,
-        fabsf32(flt) -> f32 => fabsf,
-        fabsf64(flt) -> f64 => fabs,
-        fmaf32(x, y, z) -> f32 => fmaf,
-        fmaf64(x, y, z) -> f64 => fma,
-        copysignf32(x, y) -> f32 => copysignf,
-        copysignf64(x, y) -> f64 => copysign,
-
-        // rounding variants
-        // FIXME use clif insts
-        floorf32(flt) -> f32 => floorf,
-        floorf64(flt) -> f64 => floor,
-        ceilf32(flt) -> f32 => ceilf,
-        ceilf64(flt) -> f64 => ceil,
-        truncf32(flt) -> f32 => truncf,
-        truncf64(flt) -> f64 => trunc,
-        roundf32(flt) -> f32 => roundf,
-        roundf64(flt) -> f64 => round,
-
-        // trigonometry
-        sinf32(flt) -> f32 => sinf,
-        sinf64(flt) -> f64 => sin,
-        cosf32(flt) -> f32 => cosf,
-        cosf64(flt) -> f64 => cos,
+    if args.len() != arg_count {
+        bug!("wrong number of args for intrinsic {:?}", intrinsic);
     }
 
-    intrinsic_match! {
-        fx, intrinsic, substs, args,
-        _ => {
-            fx.tcx.sess.span_fatal(span, &format!("unsupported intrinsic {}", intrinsic));
-        };
+    let (a, b, c);
+    let args = match args {
+        [x] => {
+            a = [codegen_operand(fx, x)];
+            &a as &[_]
+        }
+        [x, y] => {
+            b = [codegen_operand(fx, x), codegen_operand(fx, y)];
+            &b
+        }
+        [x, y, z] => {
+            c = [codegen_operand(fx, x), codegen_operand(fx, y), codegen_operand(fx, z)];
+            &c
+        }
+        _ => unreachable!(),
+    };
+
+    let res = fx.easy_call(name, &args, ty);
+    ret.write_cvalue(fx, res);
+
+    true
+}
+
+fn codegen_regular_intrinsic_call<'tcx>(
+    fx: &mut FunctionCx<'_, '_, 'tcx>,
+    instance: Instance<'tcx>,
+    intrinsic: Symbol,
+    substs: SubstsRef<'tcx>,
+    args: &[mir::Operand<'tcx>],
+    ret: CPlace<'tcx>,
+    destination: Option<BasicBlock>,
+    source_info: mir::SourceInfo,
+) {
+    let usize_layout = fx.layout_of(fx.tcx.types.usize);
+
+    match intrinsic {
+        sym::assume => {
+            intrinsic_args!(fx, args => (_a); intrinsic);
+        }
+        sym::likely | sym::unlikely => {
+            intrinsic_args!(fx, args => (a); intrinsic);
 
-        assume, (c _a) {};
-        likely | unlikely, (c a) {
             ret.write_cvalue(fx, a);
-        };
-        breakpoint, () {
+        }
+        sym::breakpoint => {
+            intrinsic_args!(fx, args => (); intrinsic);
+
             fx.bcx.ins().debugtrap();
-        };
-        copy | copy_nonoverlapping, <elem_ty> (v src, v dst, v count) {
+        }
+        sym::copy | sym::copy_nonoverlapping => {
+            intrinsic_args!(fx, args => (src, dst, count); intrinsic);
+            let src = src.load_scalar(fx);
+            let dst = dst.load_scalar(fx);
+            let count = count.load_scalar(fx);
+
+            let elem_ty = substs.type_at(0);
             let elem_size: u64 = fx.layout_of(elem_ty).size.bytes();
             assert_eq!(args.len(), 3);
-            let byte_amount = if elem_size != 1 {
-                fx.bcx.ins().imul_imm(count, elem_size as i64)
-            } else {
-                count
-            };
+            let byte_amount =
+                if elem_size != 1 { fx.bcx.ins().imul_imm(count, elem_size as i64) } else { count };
 
             if intrinsic == sym::copy_nonoverlapping {
                 // FIXME emit_small_memcpy
-                fx.bcx.call_memcpy(fx.module.target_config(), dst, src, byte_amount);
+                fx.bcx.call_memcpy(fx.target_config, dst, src, byte_amount);
             } else {
                 // FIXME emit_small_memmove
-                fx.bcx.call_memmove(fx.module.target_config(), dst, src, byte_amount);
+                fx.bcx.call_memmove(fx.target_config, dst, src, byte_amount);
             }
-        };
-        // NOTE: the volatile variants have src and dst swapped
-        volatile_copy_memory | volatile_copy_nonoverlapping_memory, <elem_ty> (v dst, v src, v count) {
+        }
+        sym::volatile_copy_memory | sym::volatile_copy_nonoverlapping_memory => {
+            // NOTE: the volatile variants have src and dst swapped
+            intrinsic_args!(fx, args => (dst, src, count); intrinsic);
+            let dst = dst.load_scalar(fx);
+            let src = src.load_scalar(fx);
+            let count = count.load_scalar(fx);
+
+            let elem_ty = substs.type_at(0);
             let elem_size: u64 = fx.layout_of(elem_ty).size.bytes();
             assert_eq!(args.len(), 3);
-            let byte_amount = if elem_size != 1 {
-                fx.bcx.ins().imul_imm(count, elem_size as i64)
-            } else {
-                count
-            };
+            let byte_amount =
+                if elem_size != 1 { fx.bcx.ins().imul_imm(count, elem_size as i64) } else { count };
 
             // FIXME make the copy actually volatile when using emit_small_mem{cpy,move}
             if intrinsic == sym::volatile_copy_nonoverlapping_memory {
                 // FIXME emit_small_memcpy
-                fx.bcx.call_memcpy(fx.module.target_config(), dst, src, byte_amount);
+                fx.bcx.call_memcpy(fx.target_config, dst, src, byte_amount);
             } else {
                 // FIXME emit_small_memmove
-                fx.bcx.call_memmove(fx.module.target_config(), dst, src, byte_amount);
+                fx.bcx.call_memmove(fx.target_config, dst, src, byte_amount);
             }
-        };
-        size_of_val, <T> (c ptr) {
-            let layout = fx.layout_of(T);
-            let size = if layout.is_unsized() {
+        }
+        sym::size_of_val => {
+            intrinsic_args!(fx, args => (ptr); intrinsic);
+
+            let layout = fx.layout_of(substs.type_at(0));
+            // Note: Can't use is_unsized here as truly unsized types need to take the fixed size
+            // branch
+            let size = if let Abi::ScalarPair(_, _) = ptr.layout().abi {
                 let (_ptr, info) = ptr.load_scalar_pair(fx);
                 let (size, _align) = crate::unsize::size_and_align_of_dst(fx, layout, info);
                 size
             } else {
-                fx
-                    .bcx
-                    .ins()
-                    .iconst(fx.pointer_type, layout.size.bytes() as i64)
+                fx.bcx.ins().iconst(fx.pointer_type, layout.size.bytes() as i64)
             };
             ret.write_cvalue(fx, CValue::by_val(size, usize_layout));
-        };
-        min_align_of_val, <T> (c ptr) {
-            let layout = fx.layout_of(T);
-            let align = if layout.is_unsized() {
+        }
+        sym::min_align_of_val => {
+            intrinsic_args!(fx, args => (ptr); intrinsic);
+
+            let layout = fx.layout_of(substs.type_at(0));
+            // Note: Can't use is_unsized here as truly unsized types need to take the fixed size
+            // branch
+            let align = if let Abi::ScalarPair(_, _) = ptr.layout().abi {
                 let (_ptr, info) = ptr.load_scalar_pair(fx);
                 let (_size, align) = crate::unsize::size_and_align_of_dst(fx, layout, info);
                 align
             } else {
-                fx
-                    .bcx
-                    .ins()
-                    .iconst(fx.pointer_type, layout.align.abi.bytes() as i64)
+                fx.bcx.ins().iconst(fx.pointer_type, layout.align.abi.bytes() as i64)
             };
             ret.write_cvalue(fx, CValue::by_val(align, usize_layout));
-        };
+        }
+
+        sym::vtable_size => {
+            intrinsic_args!(fx, args => (vtable); intrinsic);
+            let vtable = vtable.load_scalar(fx);
+
+            let size = crate::vtable::size_of_obj(fx, vtable);
+            ret.write_cvalue(fx, CValue::by_val(size, usize_layout));
+        }
+
+        sym::vtable_align => {
+            intrinsic_args!(fx, args => (vtable); intrinsic);
+            let vtable = vtable.load_scalar(fx);
+
+            let align = crate::vtable::min_align_of_obj(fx, vtable);
+            ret.write_cvalue(fx, CValue::by_val(align, usize_layout));
+        }
+
+        sym::unchecked_add
+        | sym::unchecked_sub
+        | sym::unchecked_mul
+        | sym::unchecked_div
+        | sym::exact_div
+        | sym::unchecked_rem
+        | sym::unchecked_shl
+        | sym::unchecked_shr => {
+            intrinsic_args!(fx, args => (x, y); intrinsic);
 
-        unchecked_add | unchecked_sub | unchecked_div | exact_div | unchecked_rem
-        | unchecked_shl | unchecked_shr, (c x, c y) {
             // FIXME trap on overflow
             let bin_op = match intrinsic {
                 sym::unchecked_add => BinOp::Add,
                 sym::unchecked_sub => BinOp::Sub,
+                sym::unchecked_mul => BinOp::Mul,
                 sym::unchecked_div | sym::exact_div => BinOp::Div,
                 sym::unchecked_rem => BinOp::Rem,
                 sym::unchecked_shl => BinOp::Shl,
@@ -573,8 +441,10 @@ pub(crate) fn codegen_intrinsic_call<'tcx>(
             };
             let res = crate::num::codegen_int_binop(fx, bin_op, x, y);
             ret.write_cvalue(fx, res);
-        };
-        add_with_overflow | sub_with_overflow | mul_with_overflow, (c x, c y) {
+        }
+        sym::add_with_overflow | sym::sub_with_overflow | sym::mul_with_overflow => {
+            intrinsic_args!(fx, args => (x, y); intrinsic);
+
             assert_eq!(x.layout().ty, y.layout().ty);
             let bin_op = match intrinsic {
                 sym::add_with_overflow => BinOp::Add,
@@ -583,15 +453,12 @@ pub(crate) fn codegen_intrinsic_call<'tcx>(
                 _ => unreachable!(),
             };
 
-            let res = crate::num::codegen_checked_int_binop(
-                fx,
-                bin_op,
-                x,
-                y,
-            );
+            let res = crate::num::codegen_checked_int_binop(fx, bin_op, x, y);
             ret.write_cvalue(fx, res);
-        };
-        saturating_add | saturating_sub, <T> (c lhs, c rhs) {
+        }
+        sym::saturating_add | sym::saturating_sub => {
+            intrinsic_args!(fx, args => (lhs, rhs); intrinsic);
+
             assert_eq!(lhs.layout().ty, rhs.layout().ty);
             let bin_op = match intrinsic {
                 sym::saturating_add => BinOp::Add,
@@ -599,17 +466,12 @@ pub(crate) fn codegen_intrinsic_call<'tcx>(
                 _ => unreachable!(),
             };
 
-            let signed = type_sign(T);
+            let signed = type_sign(lhs.layout().ty);
 
-            let checked_res = crate::num::codegen_checked_int_binop(
-                fx,
-                bin_op,
-                lhs,
-                rhs,
-            );
+            let checked_res = crate::num::codegen_checked_int_binop(fx, bin_op, lhs, rhs);
 
             let (val, has_overflow) = checked_res.load_scalar_pair(fx);
-            let clif_ty = fx.clif_type(T).unwrap();
+            let clif_ty = fx.clif_type(lhs.layout().ty).unwrap();
 
             let (min, max) = type_min_max_value(&mut fx.bcx, clif_ty, signed);
 
@@ -618,37 +480,50 @@ pub(crate) fn codegen_intrinsic_call<'tcx>(
                 (sym::saturating_sub, false) => fx.bcx.ins().select(has_overflow, min, val),
                 (sym::saturating_add, true) => {
                     let rhs = rhs.load_scalar(fx);
-                    let rhs_ge_zero = fx.bcx.ins().icmp_imm(IntCC::SignedGreaterThanOrEqual, rhs, 0);
+                    let rhs_ge_zero =
+                        fx.bcx.ins().icmp_imm(IntCC::SignedGreaterThanOrEqual, rhs, 0);
                     let sat_val = fx.bcx.ins().select(rhs_ge_zero, max, min);
                     fx.bcx.ins().select(has_overflow, sat_val, val)
                 }
                 (sym::saturating_sub, true) => {
                     let rhs = rhs.load_scalar(fx);
-                    let rhs_ge_zero = fx.bcx.ins().icmp_imm(IntCC::SignedGreaterThanOrEqual, rhs, 0);
+                    let rhs_ge_zero =
+                        fx.bcx.ins().icmp_imm(IntCC::SignedGreaterThanOrEqual, rhs, 0);
                     let sat_val = fx.bcx.ins().select(rhs_ge_zero, min, max);
                     fx.bcx.ins().select(has_overflow, sat_val, val)
                 }
                 _ => unreachable!(),
             };
 
-            let res = CValue::by_val(val, fx.layout_of(T));
+            let res = CValue::by_val(val, lhs.layout());
 
             ret.write_cvalue(fx, res);
-        };
-        rotate_left, <T>(v x, v y) {
-            let layout = fx.layout_of(T);
+        }
+        sym::rotate_left => {
+            intrinsic_args!(fx, args => (x, y); intrinsic);
+            let y = y.load_scalar(fx);
+
+            let layout = x.layout();
+            let x = x.load_scalar(fx);
             let res = fx.bcx.ins().rotl(x, y);
             ret.write_cvalue(fx, CValue::by_val(res, layout));
-        };
-        rotate_right, <T>(v x, v y) {
-            let layout = fx.layout_of(T);
+        }
+        sym::rotate_right => {
+            intrinsic_args!(fx, args => (x, y); intrinsic);
+            let y = y.load_scalar(fx);
+
+            let layout = x.layout();
+            let x = x.load_scalar(fx);
             let res = fx.bcx.ins().rotr(x, y);
             ret.write_cvalue(fx, CValue::by_val(res, layout));
-        };
+        }
 
         // The only difference between offset and arith_offset is regarding UB. Because Cranelift
         // doesn't have UB both are codegen'ed the same way
-        offset | arith_offset, (c base, v offset) {
+        sym::offset | sym::arith_offset => {
+            intrinsic_args!(fx, args => (base, offset); intrinsic);
+            let offset = offset.load_scalar(fx);
+
             let pointee_ty = base.layout().ty.builtin_deref(true).unwrap().ty;
             let pointee_size = fx.layout_of(pointee_ty).size.bytes();
             let ptr_diff = if pointee_size != 1 {
@@ -659,12 +534,18 @@ pub(crate) fn codegen_intrinsic_call<'tcx>(
             let base_val = base.load_scalar(fx);
             let res = fx.bcx.ins().iadd(base_val, ptr_diff);
             ret.write_cvalue(fx, CValue::by_val(res, base.layout()));
-        };
+        }
+
+        sym::transmute => {
+            intrinsic_args!(fx, args => (from); intrinsic);
 
-        transmute, (c from) {
             ret.write_cvalue_transmute(fx, from);
-        };
-        write_bytes | volatile_set_memory, (c dst, v val, v count) {
+        }
+        sym::write_bytes | sym::volatile_set_memory => {
+            intrinsic_args!(fx, args => (dst, val, count); intrinsic);
+            let val = val.load_scalar(fx);
+            let count = count.load_scalar(fx);
+
             let pointee_ty = dst.layout().ty.builtin_deref(true).unwrap().ty;
             let pointee_size = fx.layout_of(pointee_ty).size.bytes();
             let count = if pointee_size != 1 {
@@ -675,31 +556,43 @@ pub(crate) fn codegen_intrinsic_call<'tcx>(
             let dst_ptr = dst.load_scalar(fx);
             // FIXME make the memset actually volatile when switching to emit_small_memset
             // FIXME use emit_small_memset
-            fx.bcx.call_memset(fx.module.target_config(), dst_ptr, val, count);
-        };
-        ctlz | ctlz_nonzero, <T> (v arg) {
+            fx.bcx.call_memset(fx.target_config, dst_ptr, val, count);
+        }
+        sym::ctlz | sym::ctlz_nonzero => {
+            intrinsic_args!(fx, args => (arg); intrinsic);
+            let val = arg.load_scalar(fx);
+
             // FIXME trap on `ctlz_nonzero` with zero arg.
-            let res = fx.bcx.ins().clz(arg);
-            let res = CValue::by_val(res, fx.layout_of(T));
+            let res = fx.bcx.ins().clz(val);
+            let res = CValue::by_val(res, arg.layout());
             ret.write_cvalue(fx, res);
-        };
-        cttz | cttz_nonzero, <T> (v arg) {
+        }
+        sym::cttz | sym::cttz_nonzero => {
+            intrinsic_args!(fx, args => (arg); intrinsic);
+            let val = arg.load_scalar(fx);
+
             // FIXME trap on `cttz_nonzero` with zero arg.
-            let res = fx.bcx.ins().ctz(arg);
-            let res = CValue::by_val(res, fx.layout_of(T));
+            let res = fx.bcx.ins().ctz(val);
+            let res = CValue::by_val(res, arg.layout());
             ret.write_cvalue(fx, res);
-        };
-        ctpop, <T> (v arg) {
-            let res = fx.bcx.ins().popcnt(arg);
-            let res = CValue::by_val(res, fx.layout_of(T));
+        }
+        sym::ctpop => {
+            intrinsic_args!(fx, args => (arg); intrinsic);
+            let val = arg.load_scalar(fx);
+
+            let res = fx.bcx.ins().popcnt(val);
+            let res = CValue::by_val(res, arg.layout());
             ret.write_cvalue(fx, res);
-        };
-        bitreverse, <T> (v arg) {
-            let res = fx.bcx.ins().bitrev(arg);
-            let res = CValue::by_val(res, fx.layout_of(T));
+        }
+        sym::bitreverse => {
+            intrinsic_args!(fx, args => (arg); intrinsic);
+            let val = arg.load_scalar(fx);
+
+            let res = fx.bcx.ins().bitrev(val);
+            let res = CValue::by_val(res, arg.layout());
             ret.write_cvalue(fx, res);
-        };
-        bswap, <T> (v arg) {
+        }
+        sym::bswap => {
             // FIXME(CraneStation/cranelift#794) add bswap instruction to cranelift
             fn swap(bcx: &mut FunctionBuilder<'_>, v: Value) -> Value {
                 match bcx.func.dfg.value_type(v) {
@@ -775,116 +668,217 @@ fn swap(bcx: &mut FunctionBuilder<'_>, v: Value) -> Value {
                     ty => unreachable!("bswap {}", ty),
                 }
             }
-            let res = CValue::by_val(swap(&mut fx.bcx, arg), fx.layout_of(T));
+            intrinsic_args!(fx, args => (arg); intrinsic);
+            let val = arg.load_scalar(fx);
+
+            let res = CValue::by_val(swap(&mut fx.bcx, val), arg.layout());
             ret.write_cvalue(fx, res);
-        };
-        assert_inhabited | assert_zero_valid | assert_uninit_valid, <T> () {
-            let layout = fx.layout_of(T);
+        }
+        sym::assert_inhabited | sym::assert_zero_valid | sym::assert_uninit_valid => {
+            intrinsic_args!(fx, args => (); intrinsic);
+
+            let layout = fx.layout_of(substs.type_at(0));
             if layout.abi.is_uninhabited() {
-                with_no_trimmed_paths(|| crate::base::codegen_panic(
-                    fx,
-                    &format!("attempted to instantiate uninhabited type `{}`", T),
-                    span,
-                ));
+                with_no_trimmed_paths!({
+                    crate::base::codegen_panic(
+                        fx,
+                        &format!("attempted to instantiate uninhabited type `{}`", layout.ty),
+                        source_info,
+                    )
+                });
                 return;
             }
 
-            if intrinsic == sym::assert_zero_valid && !layout.might_permit_raw_init(fx, /*zero:*/ true).unwrap() {
-                with_no_trimmed_paths(|| crate::base::codegen_panic(
-                    fx,
-                    &format!("attempted to zero-initialize type `{}`, which is invalid", T),
-                    span,
-                ));
+            if intrinsic == sym::assert_zero_valid && !fx.tcx.permits_zero_init(layout) {
+                with_no_trimmed_paths!({
+                    crate::base::codegen_panic(
+                        fx,
+                        &format!(
+                            "attempted to zero-initialize type `{}`, which is invalid",
+                            layout.ty
+                        ),
+                        source_info,
+                    );
+                });
                 return;
             }
 
-            if intrinsic == sym::assert_uninit_valid && !layout.might_permit_raw_init(fx, /*zero:*/ false).unwrap() {
-                with_no_trimmed_paths(|| crate::base::codegen_panic(
-                    fx,
-                    &format!("attempted to leave type `{}` uninitialized, which is invalid", T),
-                    span,
-                ));
+            if intrinsic == sym::assert_uninit_valid && !fx.tcx.permits_uninit_init(layout) {
+                with_no_trimmed_paths!({
+                    crate::base::codegen_panic(
+                        fx,
+                        &format!(
+                            "attempted to leave type `{}` uninitialized, which is invalid",
+                            layout.ty
+                        ),
+                        source_info,
+                    )
+                });
                 return;
             }
-        };
+        }
+
+        sym::volatile_load | sym::unaligned_volatile_load => {
+            intrinsic_args!(fx, args => (ptr); intrinsic);
 
-        volatile_load | unaligned_volatile_load, (c ptr) {
             // Cranelift treats loads as volatile by default
             // FIXME correctly handle unaligned_volatile_load
-            let inner_layout =
-                fx.layout_of(ptr.layout().ty.builtin_deref(true).unwrap().ty);
+            let inner_layout = fx.layout_of(ptr.layout().ty.builtin_deref(true).unwrap().ty);
             let val = CValue::by_ref(Pointer::new(ptr.load_scalar(fx)), inner_layout);
             ret.write_cvalue(fx, val);
-        };
-        volatile_store | unaligned_volatile_store, (v ptr, c val) {
+        }
+        sym::volatile_store | sym::unaligned_volatile_store => {
+            intrinsic_args!(fx, args => (ptr, val); intrinsic);
+            let ptr = ptr.load_scalar(fx);
+
             // Cranelift treats stores as volatile by default
             // FIXME correctly handle unaligned_volatile_store
             let dest = CPlace::for_ptr(Pointer::new(ptr), val.layout());
             dest.write_cvalue(fx, val);
-        };
+        }
+
+        sym::pref_align_of
+        | sym::needs_drop
+        | sym::type_id
+        | sym::type_name
+        | sym::variant_count => {
+            intrinsic_args!(fx, args => (); intrinsic);
 
-        pref_align_of | min_align_of | needs_drop | type_id | type_name | variant_count, () {
             let const_val =
                 fx.tcx.const_eval_instance(ParamEnv::reveal_all(), instance, None).unwrap();
-            let val = crate::constant::codegen_const_value(
-                fx,
-                const_val,
-                ret.layout().ty,
-            );
+            let val = crate::constant::codegen_const_value(fx, const_val, ret.layout().ty);
             ret.write_cvalue(fx, val);
-        };
+        }
 
-        ptr_offset_from, <T> (v ptr, v base) {
-            let isize_layout = fx.layout_of(fx.tcx.types.isize);
+        sym::ptr_offset_from | sym::ptr_offset_from_unsigned => {
+            intrinsic_args!(fx, args => (ptr, base); intrinsic);
+            let ptr = ptr.load_scalar(fx);
+            let base = base.load_scalar(fx);
+            let ty = substs.type_at(0);
 
-            let pointee_size: u64 = fx.layout_of(T).size.bytes();
-            let diff = fx.bcx.ins().isub(ptr, base);
+            let pointee_size: u64 = fx.layout_of(ty).size.bytes();
+            let diff_bytes = fx.bcx.ins().isub(ptr, base);
             // FIXME this can be an exact division.
-            let val = CValue::by_val(fx.bcx.ins().sdiv_imm(diff, pointee_size as i64), isize_layout);
+            let val = if intrinsic == sym::ptr_offset_from_unsigned {
+                let usize_layout = fx.layout_of(fx.tcx.types.usize);
+                // Because diff_bytes ULE isize::MAX, this would be fine as signed,
+                // but unsigned is slightly easier to codegen, so might as well.
+                CValue::by_val(fx.bcx.ins().udiv_imm(diff_bytes, pointee_size as i64), usize_layout)
+            } else {
+                let isize_layout = fx.layout_of(fx.tcx.types.isize);
+                CValue::by_val(fx.bcx.ins().sdiv_imm(diff_bytes, pointee_size as i64), isize_layout)
+            };
             ret.write_cvalue(fx, val);
-        };
+        }
+
+        sym::ptr_guaranteed_eq => {
+            intrinsic_args!(fx, args => (a, b); intrinsic);
 
-        ptr_guaranteed_eq, (c a, c b) {
             let val = crate::num::codegen_ptr_binop(fx, BinOp::Eq, a, b);
             ret.write_cvalue(fx, val);
-        };
+        }
+
+        sym::ptr_guaranteed_ne => {
+            intrinsic_args!(fx, args => (a, b); intrinsic);
 
-        ptr_guaranteed_ne, (c a, c b) {
             let val = crate::num::codegen_ptr_binop(fx, BinOp::Ne, a, b);
             ret.write_cvalue(fx, val);
-        };
+        }
+
+        sym::caller_location => {
+            intrinsic_args!(fx, args => (); intrinsic);
 
-        caller_location, () {
-            let caller_location = fx.get_caller_location(span);
+            let caller_location = fx.get_caller_location(source_info);
             ret.write_cvalue(fx, caller_location);
-        };
+        }
+
+        _ if intrinsic.as_str().starts_with("atomic_fence") => {
+            intrinsic_args!(fx, args => (); intrinsic);
 
-        _ if intrinsic.as_str().starts_with("atomic_fence"), () {
             fx.bcx.ins().fence();
-        };
-        _ if intrinsic.as_str().starts_with("atomic_singlethreadfence"), () {
+        }
+        _ if intrinsic.as_str().starts_with("atomic_singlethreadfence") => {
+            intrinsic_args!(fx, args => (); intrinsic);
+
             // FIXME use a compiler fence once Cranelift supports it
             fx.bcx.ins().fence();
-        };
-        _ if intrinsic.as_str().starts_with("atomic_load"), <T> (v ptr) {
-            validate_atomic_type!(fx, intrinsic, span, T);
-            let ty = fx.clif_type(T).unwrap();
+        }
+        _ if intrinsic.as_str().starts_with("atomic_load") => {
+            intrinsic_args!(fx, args => (ptr); intrinsic);
+            let ptr = ptr.load_scalar(fx);
+
+            let ty = substs.type_at(0);
+            match ty.kind() {
+                ty::Uint(UintTy::U128) | ty::Int(IntTy::I128) => {
+                    // FIXME implement 128bit atomics
+                    if fx.tcx.is_compiler_builtins(LOCAL_CRATE) {
+                        // special case for compiler-builtins to avoid having to patch it
+                        crate::trap::trap_unimplemented(fx, "128bit atomics not yet supported");
+                        let ret_block = fx.get_block(destination.unwrap());
+                        fx.bcx.ins().jump(ret_block, &[]);
+                        return;
+                    } else {
+                        fx.tcx
+                            .sess
+                            .span_fatal(source_info.span, "128bit atomics not yet supported");
+                    }
+                }
+                ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
+                _ => {
+                    report_atomic_type_validation_error(fx, intrinsic, source_info.span, ty);
+                    return;
+                }
+            }
+            let clif_ty = fx.clif_type(ty).unwrap();
 
-            let val = fx.bcx.ins().atomic_load(ty, MemFlags::trusted(), ptr);
+            let val = fx.bcx.ins().atomic_load(clif_ty, MemFlags::trusted(), ptr);
 
-            let val = CValue::by_val(val, fx.layout_of(T));
+            let val = CValue::by_val(val, fx.layout_of(ty));
             ret.write_cvalue(fx, val);
-        };
-        _ if intrinsic.as_str().starts_with("atomic_store"), (v ptr, c val) {
-            validate_atomic_type!(fx, intrinsic, span, val.layout().ty);
+        }
+        _ if intrinsic.as_str().starts_with("atomic_store") => {
+            intrinsic_args!(fx, args => (ptr, val); intrinsic);
+            let ptr = ptr.load_scalar(fx);
+
+            let ty = substs.type_at(0);
+            match ty.kind() {
+                ty::Uint(UintTy::U128) | ty::Int(IntTy::I128) => {
+                    // FIXME implement 128bit atomics
+                    if fx.tcx.is_compiler_builtins(LOCAL_CRATE) {
+                        // special case for compiler-builtins to avoid having to patch it
+                        crate::trap::trap_unimplemented(fx, "128bit atomics not yet supported");
+                        let ret_block = fx.get_block(destination.unwrap());
+                        fx.bcx.ins().jump(ret_block, &[]);
+                        return;
+                    } else {
+                        fx.tcx
+                            .sess
+                            .span_fatal(source_info.span, "128bit atomics not yet supported");
+                    }
+                }
+                ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
+                _ => {
+                    report_atomic_type_validation_error(fx, intrinsic, source_info.span, ty);
+                    return;
+                }
+            }
 
             let val = val.load_scalar(fx);
 
             fx.bcx.ins().atomic_store(MemFlags::trusted(), val, ptr);
-        };
-        _ if intrinsic.as_str().starts_with("atomic_xchg"), (v ptr, c new) {
+        }
+        _ if intrinsic.as_str().starts_with("atomic_xchg") => {
+            intrinsic_args!(fx, args => (ptr, new); intrinsic);
+            let ptr = ptr.load_scalar(fx);
+
             let layout = new.layout();
-            validate_atomic_type!(fx, intrinsic, span, layout.ty);
+            match layout.ty.kind() {
+                ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
+                _ => {
+                    report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
+                    return;
+                }
+            }
             let ty = fx.clif_type(layout.ty).unwrap();
 
             let new = new.load_scalar(fx);
@@ -893,10 +887,20 @@ fn swap(bcx: &mut FunctionBuilder<'_>, v: Value) -> Value {
 
             let old = CValue::by_val(old, layout);
             ret.write_cvalue(fx, old);
-        };
-        _ if intrinsic.as_str().starts_with("atomic_cxchg"), (v ptr, c test_old, c new) { // both atomic_cxchg_* and atomic_cxchgweak_*
+        }
+        _ if intrinsic.as_str().starts_with("atomic_cxchg") => {
+            // both atomic_cxchg_* and atomic_cxchgweak_*
+            intrinsic_args!(fx, args => (ptr, test_old, new); intrinsic);
+            let ptr = ptr.load_scalar(fx);
+
             let layout = new.layout();
-            validate_atomic_type!(fx, intrinsic, span, layout.ty);
+            match layout.ty.kind() {
+                ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
+                _ => {
+                    report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
+                    return;
+                }
+            }
 
             let test_old = test_old.load_scalar(fx);
             let new = new.load_scalar(fx);
@@ -904,37 +908,67 @@ fn swap(bcx: &mut FunctionBuilder<'_>, v: Value) -> Value {
             let old = fx.bcx.ins().atomic_cas(MemFlags::trusted(), ptr, test_old, new);
             let is_eq = fx.bcx.ins().icmp(IntCC::Equal, old, test_old);
 
-            let ret_val = CValue::by_val_pair(old, fx.bcx.ins().bint(types::I8, is_eq), ret.layout());
+            let ret_val =
+                CValue::by_val_pair(old, fx.bcx.ins().bint(types::I8, is_eq), ret.layout());
             ret.write_cvalue(fx, ret_val)
-        };
+        }
+
+        _ if intrinsic.as_str().starts_with("atomic_xadd") => {
+            intrinsic_args!(fx, args => (ptr, amount); intrinsic);
+            let ptr = ptr.load_scalar(fx);
 
-        _ if intrinsic.as_str().starts_with("atomic_xadd"), (v ptr, c amount) {
             let layout = amount.layout();
-            validate_atomic_type!(fx, intrinsic, span, layout.ty);
+            match layout.ty.kind() {
+                ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
+                _ => {
+                    report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
+                    return;
+                }
+            }
             let ty = fx.clif_type(layout.ty).unwrap();
 
             let amount = amount.load_scalar(fx);
 
-            let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Add, ptr, amount);
+            let old =
+                fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Add, ptr, amount);
 
             let old = CValue::by_val(old, layout);
             ret.write_cvalue(fx, old);
-        };
-        _ if intrinsic.as_str().starts_with("atomic_xsub"), (v ptr, c amount) {
+        }
+        _ if intrinsic.as_str().starts_with("atomic_xsub") => {
+            intrinsic_args!(fx, args => (ptr, amount); intrinsic);
+            let ptr = ptr.load_scalar(fx);
+
             let layout = amount.layout();
-            validate_atomic_type!(fx, intrinsic, span, layout.ty);
+            match layout.ty.kind() {
+                ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
+                _ => {
+                    report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
+                    return;
+                }
+            }
             let ty = fx.clif_type(layout.ty).unwrap();
 
             let amount = amount.load_scalar(fx);
 
-            let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Sub, ptr, amount);
+            let old =
+                fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Sub, ptr, amount);
 
             let old = CValue::by_val(old, layout);
             ret.write_cvalue(fx, old);
-        };
-        _ if intrinsic.as_str().starts_with("atomic_and"), (v ptr, c src) {
+        }
+        _ if intrinsic.as_str().starts_with("atomic_and") => {
+            intrinsic_args!(fx, args => (ptr, src); intrinsic);
+            let ptr = ptr.load_scalar(fx);
+
             let layout = src.layout();
-            validate_atomic_type!(fx, intrinsic, span, layout.ty);
+            match layout.ty.kind() {
+                ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
+                _ => {
+                    report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
+                    return;
+                }
+            }
             let ty = fx.clif_type(layout.ty).unwrap();
 
             let src = src.load_scalar(fx);
@@ -943,10 +977,19 @@ fn swap(bcx: &mut FunctionBuilder<'_>, v: Value) -> Value {
 
             let old = CValue::by_val(old, layout);
             ret.write_cvalue(fx, old);
-        };
-        _ if intrinsic.as_str().starts_with("atomic_or"), (v ptr, c src) {
+        }
+        _ if intrinsic.as_str().starts_with("atomic_or") => {
+            intrinsic_args!(fx, args => (ptr, src); intrinsic);
+            let ptr = ptr.load_scalar(fx);
+
             let layout = src.layout();
-            validate_atomic_type!(fx, intrinsic, span, layout.ty);
+            match layout.ty.kind() {
+                ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
+                _ => {
+                    report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
+                    return;
+                }
+            }
             let ty = fx.clif_type(layout.ty).unwrap();
 
             let src = src.load_scalar(fx);
@@ -955,10 +998,19 @@ fn swap(bcx: &mut FunctionBuilder<'_>, v: Value) -> Value {
 
             let old = CValue::by_val(old, layout);
             ret.write_cvalue(fx, old);
-        };
-        _ if intrinsic.as_str().starts_with("atomic_xor"), (v ptr, c src) {
+        }
+        _ if intrinsic.as_str().starts_with("atomic_xor") => {
+            intrinsic_args!(fx, args => (ptr, src); intrinsic);
+            let ptr = ptr.load_scalar(fx);
+
             let layout = src.layout();
-            validate_atomic_type!(fx, intrinsic, span, layout.ty);
+            match layout.ty.kind() {
+                ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
+                _ => {
+                    report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
+                    return;
+                }
+            }
             let ty = fx.clif_type(layout.ty).unwrap();
 
             let src = src.load_scalar(fx);
@@ -967,10 +1019,19 @@ fn swap(bcx: &mut FunctionBuilder<'_>, v: Value) -> Value {
 
             let old = CValue::by_val(old, layout);
             ret.write_cvalue(fx, old);
-        };
-        _ if intrinsic.as_str().starts_with("atomic_nand"), (v ptr, c src) {
+        }
+        _ if intrinsic.as_str().starts_with("atomic_nand") => {
+            intrinsic_args!(fx, args => (ptr, src); intrinsic);
+            let ptr = ptr.load_scalar(fx);
+
             let layout = src.layout();
-            validate_atomic_type!(fx, intrinsic, span, layout.ty);
+            match layout.ty.kind() {
+                ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
+                _ => {
+                    report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
+                    return;
+                }
+            }
             let ty = fx.clif_type(layout.ty).unwrap();
 
             let src = src.load_scalar(fx);
@@ -979,10 +1040,19 @@ fn swap(bcx: &mut FunctionBuilder<'_>, v: Value) -> Value {
 
             let old = CValue::by_val(old, layout);
             ret.write_cvalue(fx, old);
-        };
-        _ if intrinsic.as_str().starts_with("atomic_max"), (v ptr, c src) {
+        }
+        _ if intrinsic.as_str().starts_with("atomic_max") => {
+            intrinsic_args!(fx, args => (ptr, src); intrinsic);
+            let ptr = ptr.load_scalar(fx);
+
             let layout = src.layout();
-            validate_atomic_type!(fx, intrinsic, span, layout.ty);
+            match layout.ty.kind() {
+                ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
+                _ => {
+                    report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
+                    return;
+                }
+            }
             let ty = fx.clif_type(layout.ty).unwrap();
 
             let src = src.load_scalar(fx);
@@ -991,10 +1061,19 @@ fn swap(bcx: &mut FunctionBuilder<'_>, v: Value) -> Value {
 
             let old = CValue::by_val(old, layout);
             ret.write_cvalue(fx, old);
-        };
-        _ if intrinsic.as_str().starts_with("atomic_umax"), (v ptr, c src) {
+        }
+        _ if intrinsic.as_str().starts_with("atomic_umax") => {
+            intrinsic_args!(fx, args => (ptr, src); intrinsic);
+            let ptr = ptr.load_scalar(fx);
+
             let layout = src.layout();
-            validate_atomic_type!(fx, intrinsic, span, layout.ty);
+            match layout.ty.kind() {
+                ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
+                _ => {
+                    report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
+                    return;
+                }
+            }
             let ty = fx.clif_type(layout.ty).unwrap();
 
             let src = src.load_scalar(fx);
@@ -1003,10 +1082,19 @@ fn swap(bcx: &mut FunctionBuilder<'_>, v: Value) -> Value {
 
             let old = CValue::by_val(old, layout);
             ret.write_cvalue(fx, old);
-        };
-        _ if intrinsic.as_str().starts_with("atomic_min"), (v ptr, c src) {
+        }
+        _ if intrinsic.as_str().starts_with("atomic_min") => {
+            intrinsic_args!(fx, args => (ptr, src); intrinsic);
+            let ptr = ptr.load_scalar(fx);
+
             let layout = src.layout();
-            validate_atomic_type!(fx, intrinsic, span, layout.ty);
+            match layout.ty.kind() {
+                ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
+                _ => {
+                    report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
+                    return;
+                }
+            }
             let ty = fx.clif_type(layout.ty).unwrap();
 
             let src = src.load_scalar(fx);
@@ -1015,10 +1103,19 @@ fn swap(bcx: &mut FunctionBuilder<'_>, v: Value) -> Value {
 
             let old = CValue::by_val(old, layout);
             ret.write_cvalue(fx, old);
-        };
-        _ if intrinsic.as_str().starts_with("atomic_umin"), (v ptr, c src) {
+        }
+        _ if intrinsic.as_str().starts_with("atomic_umin") => {
+            intrinsic_args!(fx, args => (ptr, src); intrinsic);
+            let ptr = ptr.load_scalar(fx);
+
             let layout = src.layout();
-            validate_atomic_type!(fx, intrinsic, span, layout.ty);
+            match layout.ty.kind() {
+                ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
+                _ => {
+                    report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
+                    return;
+                }
+            }
             let ty = fx.clif_type(layout.ty).unwrap();
 
             let src = src.load_scalar(fx);
@@ -1027,49 +1124,54 @@ fn swap(bcx: &mut FunctionBuilder<'_>, v: Value) -> Value {
 
             let old = CValue::by_val(old, layout);
             ret.write_cvalue(fx, old);
-        };
+        }
+
+        sym::minnumf32 => {
+            intrinsic_args!(fx, args => (a, b); intrinsic);
+            let a = a.load_scalar(fx);
+            let b = b.load_scalar(fx);
 
-        // In Rust floating point min and max don't propagate NaN. In Cranelift they do however.
-        // For this reason it is necessary to use `a.is_nan() ? b : (a >= b ? b : a)` for `minnumf*`
-        // and `a.is_nan() ? b : (a <= b ? b : a)` for `maxnumf*`. NaN checks are done by comparing
-        // a float against itself. Only in case of NaN is it not equal to itself.
-        minnumf32, (v a, v b) {
-            let a_is_nan = fx.bcx.ins().fcmp(FloatCC::NotEqual, a, a);
-            let a_ge_b = fx.bcx.ins().fcmp(FloatCC::GreaterThanOrEqual, a, b);
-            let temp = fx.bcx.ins().select(a_ge_b, b, a);
-            let val = fx.bcx.ins().select(a_is_nan, b, temp);
+            let val = crate::num::codegen_float_min(fx, a, b);
             let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f32));
             ret.write_cvalue(fx, val);
-        };
-        minnumf64, (v a, v b) {
-            let a_is_nan = fx.bcx.ins().fcmp(FloatCC::NotEqual, a, a);
-            let a_ge_b = fx.bcx.ins().fcmp(FloatCC::GreaterThanOrEqual, a, b);
-            let temp = fx.bcx.ins().select(a_ge_b, b, a);
-            let val = fx.bcx.ins().select(a_is_nan, b, temp);
+        }
+        sym::minnumf64 => {
+            intrinsic_args!(fx, args => (a, b); intrinsic);
+            let a = a.load_scalar(fx);
+            let b = b.load_scalar(fx);
+
+            let val = crate::num::codegen_float_min(fx, a, b);
             let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f64));
             ret.write_cvalue(fx, val);
-        };
-        maxnumf32, (v a, v b) {
-            let a_is_nan = fx.bcx.ins().fcmp(FloatCC::NotEqual, a, a);
-            let a_le_b = fx.bcx.ins().fcmp(FloatCC::LessThanOrEqual, a, b);
-            let temp = fx.bcx.ins().select(a_le_b, b, a);
-            let val = fx.bcx.ins().select(a_is_nan, b, temp);
+        }
+        sym::maxnumf32 => {
+            intrinsic_args!(fx, args => (a, b); intrinsic);
+            let a = a.load_scalar(fx);
+            let b = b.load_scalar(fx);
+
+            let val = crate::num::codegen_float_max(fx, a, b);
             let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f32));
             ret.write_cvalue(fx, val);
-        };
-        maxnumf64, (v a, v b) {
-            let a_is_nan = fx.bcx.ins().fcmp(FloatCC::NotEqual, a, a);
-            let a_le_b = fx.bcx.ins().fcmp(FloatCC::LessThanOrEqual, a, b);
-            let temp = fx.bcx.ins().select(a_le_b, b, a);
-            let val = fx.bcx.ins().select(a_is_nan, b, temp);
+        }
+        sym::maxnumf64 => {
+            intrinsic_args!(fx, args => (a, b); intrinsic);
+            let a = a.load_scalar(fx);
+            let b = b.load_scalar(fx);
+
+            let val = crate::num::codegen_float_max(fx, a, b);
             let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f64));
             ret.write_cvalue(fx, val);
-        };
+        }
+
+        kw::Try => {
+            intrinsic_args!(fx, args => (f, data, catch_fn); intrinsic);
+            let f = f.load_scalar(fx);
+            let data = data.load_scalar(fx);
+            let _catch_fn = catch_fn.load_scalar(fx);
 
-        kw.Try, (v f, v data, v _catch_fn) {
             // FIXME once unwinding is supported, change this to actually catch panics
             let f_sig = fx.bcx.func.import_signature(Signature {
-                call_conv: CallConv::triple_default(fx.triple()),
+                call_conv: fx.target_config.default_call_conv,
                 params: vec![AbiParam::new(fx.bcx.func.dfg.value_type(data))],
                 returns: vec![],
             });
@@ -1079,20 +1181,30 @@ fn swap(bcx: &mut FunctionBuilder<'_>, v: Value) -> Value {
             let layout = ret.layout();
             let ret_val = CValue::const_val(fx, layout, ty::ScalarInt::null(layout.size));
             ret.write_cvalue(fx, ret_val);
-        };
+        }
 
-        fadd_fast | fsub_fast | fmul_fast | fdiv_fast | frem_fast, (c x, c y) {
-            let res = crate::num::codegen_float_binop(fx, match intrinsic {
-                sym::fadd_fast => BinOp::Add,
-                sym::fsub_fast => BinOp::Sub,
-                sym::fmul_fast => BinOp::Mul,
-                sym::fdiv_fast => BinOp::Div,
-                sym::frem_fast => BinOp::Rem,
-                _ => unreachable!(),
-            }, x, y);
+        sym::fadd_fast | sym::fsub_fast | sym::fmul_fast | sym::fdiv_fast | sym::frem_fast => {
+            intrinsic_args!(fx, args => (x, y); intrinsic);
+
+            let res = crate::num::codegen_float_binop(
+                fx,
+                match intrinsic {
+                    sym::fadd_fast => BinOp::Add,
+                    sym::fsub_fast => BinOp::Sub,
+                    sym::fmul_fast => BinOp::Mul,
+                    sym::fdiv_fast => BinOp::Div,
+                    sym::frem_fast => BinOp::Rem,
+                    _ => unreachable!(),
+                },
+                x,
+                y,
+            );
             ret.write_cvalue(fx, res);
-        };
-        float_to_int_unchecked, (v f) {
+        }
+        sym::float_to_int_unchecked => {
+            intrinsic_args!(fx, args => (f); intrinsic);
+            let f = f.load_scalar(fx);
+
             let res = crate::cast::clif_int_or_float_cast(
                 fx,
                 f,
@@ -1101,47 +1213,76 @@ fn swap(bcx: &mut FunctionBuilder<'_>, v: Value) -> Value {
                 type_sign(ret.layout().ty),
             );
             ret.write_cvalue(fx, CValue::by_val(res, ret.layout()));
-        };
+        }
 
-        raw_eq, <T>(v lhs_ref, v rhs_ref) {
-            fn type_by_size(size: Size) -> Option<Type> {
-                Type::int(size.bits().try_into().ok()?)
-            }
+        sym::raw_eq => {
+            intrinsic_args!(fx, args => (lhs_ref, rhs_ref); intrinsic);
+            let lhs_ref = lhs_ref.load_scalar(fx);
+            let rhs_ref = rhs_ref.load_scalar(fx);
 
-            let size = fx.layout_of(T).layout.size;
+            let size = fx.layout_of(substs.type_at(0)).layout.size();
             // FIXME add and use emit_small_memcmp
-            let is_eq_value =
-                if size == Size::ZERO {
-                    // No bytes means they're trivially equal
-                    fx.bcx.ins().iconst(types::I8, 1)
-                } else if let Some(clty) = type_by_size(size) {
-                    // Can't use `trusted` for these loads; they could be unaligned.
-                    let mut flags = MemFlags::new();
-                    flags.set_notrap();
-                    let lhs_val = fx.bcx.ins().load(clty, flags, lhs_ref, 0);
-                    let rhs_val = fx.bcx.ins().load(clty, flags, rhs_ref, 0);
-                    let eq = fx.bcx.ins().icmp(IntCC::Equal, lhs_val, rhs_val);
-                    fx.bcx.ins().bint(types::I8, eq)
-                } else {
-                    // Just call `memcmp` (like slices do in core) when the
-                    // size is too large or it's not a power-of-two.
-                    let signed_bytes = i64::try_from(size.bytes()).unwrap();
-                    let bytes_val = fx.bcx.ins().iconst(fx.pointer_type, signed_bytes);
-                    let params = vec![AbiParam::new(fx.pointer_type); 3];
-                    let returns = vec![AbiParam::new(types::I32)];
-                    let args = &[lhs_ref, rhs_ref, bytes_val];
-                    let cmp = fx.lib_call("memcmp", params, returns, args)[0];
-                    let eq = fx.bcx.ins().icmp_imm(IntCC::Equal, cmp, 0);
-                    fx.bcx.ins().bint(types::I8, eq)
-                };
+            let is_eq_value = if size == Size::ZERO {
+                // No bytes means they're trivially equal
+                fx.bcx.ins().iconst(types::I8, 1)
+            } else if let Some(clty) = size.bits().try_into().ok().and_then(Type::int) {
+                // Can't use `trusted` for these loads; they could be unaligned.
+                let mut flags = MemFlags::new();
+                flags.set_notrap();
+                let lhs_val = fx.bcx.ins().load(clty, flags, lhs_ref, 0);
+                let rhs_val = fx.bcx.ins().load(clty, flags, rhs_ref, 0);
+                let eq = fx.bcx.ins().icmp(IntCC::Equal, lhs_val, rhs_val);
+                fx.bcx.ins().bint(types::I8, eq)
+            } else {
+                // Just call `memcmp` (like slices do in core) when the
+                // size is too large or it's not a power-of-two.
+                let signed_bytes = i64::try_from(size.bytes()).unwrap();
+                let bytes_val = fx.bcx.ins().iconst(fx.pointer_type, signed_bytes);
+                let params = vec![AbiParam::new(fx.pointer_type); 3];
+                let returns = vec![AbiParam::new(types::I32)];
+                let args = &[lhs_ref, rhs_ref, bytes_val];
+                let cmp = fx.lib_call("memcmp", params, returns, args)[0];
+                let eq = fx.bcx.ins().icmp_imm(IntCC::Equal, cmp, 0);
+                fx.bcx.ins().bint(types::I8, eq)
+            };
             ret.write_cvalue(fx, CValue::by_val(is_eq_value, ret.layout()));
-        };
-    }
+        }
 
-    if let Some((_, dest)) = destination {
-        let ret_block = fx.get_block(dest);
-        fx.bcx.ins().jump(ret_block, &[]);
-    } else {
-        trap_unreachable(fx, "[corruption] Diverging intrinsic returned.");
+        sym::const_allocate => {
+            intrinsic_args!(fx, args => (_size, _align); intrinsic);
+
+            // returns a null pointer at runtime.
+            let null = fx.bcx.ins().iconst(fx.pointer_type, 0);
+            ret.write_cvalue(fx, CValue::by_val(null, ret.layout()));
+        }
+
+        sym::const_deallocate => {
+            intrinsic_args!(fx, args => (_ptr, _size, _align); intrinsic);
+            // nop at runtime.
+        }
+
+        sym::black_box => {
+            intrinsic_args!(fx, args => (a); intrinsic);
+
+            // FIXME implement black_box semantics
+            ret.write_cvalue(fx, a);
+        }
+
+        // FIXME implement variadics in cranelift
+        sym::va_copy | sym::va_arg | sym::va_end => {
+            fx.tcx.sess.span_fatal(
+                source_info.span,
+                "Defining variadic functions is not yet supported by Cranelift",
+            );
+        }
+
+        _ => {
+            fx.tcx
+                .sess
+                .span_fatal(source_info.span, &format!("unsupported intrinsic {}", intrinsic));
+        }
     }
+
+    let ret_block = fx.get_block(destination.unwrap());
+    fx.bcx.ins().jump(ret_block, &[]);
 }