]> git.lizzy.rs Git - rust.git/blobdiff - src/intrinsics/mod.rs
Add const_eval_select intrinsic
[rust.git] / src / intrinsics / mod.rs
index 012829822060757ee67437b4851256641dfb76d5..313b62c5770b6fe823fa6e6723f73d20bab6d4e8 100644 (file)
@@ -1,21 +1,32 @@
-pub mod llvm;
+//! Codegen of intrinsics. This includes `extern "rust-intrinsic"`, `extern "platform-intrinsic"`
+//! and LLVM intrinsics that have symbol names starting with `llvm.`.
+
+mod cpuid;
+mod llvm;
 mod simd;
 
+pub(crate) use cpuid::codegen_cpuid_call;
+pub(crate) use llvm::codegen_llvm_intrinsic_call;
+
+use rustc_middle::ty::print::with_no_trimmed_paths;
+use rustc_span::symbol::{kw, sym};
+
 use crate::prelude::*;
+use cranelift_codegen::ir::AtomicRmwOp;
 
 macro intrinsic_pat {
     (_) => {
         _
     },
     ($name:ident) => {
-        stringify!($name)
+        sym::$name
+    },
+    (kw.$name:ident) => {
+        kw::$name
     },
     ($name:literal) => {
-        stringify!($name)
+        $name
     },
-    ($x:ident . $($xs:tt).*) => {
-        concat!(stringify!($x), ".", intrinsic_pat!($($xs).*))
-    }
 }
 
 macro intrinsic_arg {
         $arg
     },
     (c $fx:expr, $arg:ident) => {
-        trans_operand($fx, $arg)
+        codegen_operand($fx, $arg)
     },
     (v $fx:expr, $arg:ident) => {
-        trans_operand($fx, $arg).load_scalar($fx)
+        codegen_operand($fx, $arg).load_scalar($fx)
     }
 }
 
@@ -44,6 +55,7 @@
     $(
         $($($name:tt).*)|+ $(if $cond:expr)?, $(<$($subst:ident),*>)? ($($a:ident $arg:ident),*) $content:block;
     )*) => {
+        let _ = $substs; // Silence warning when substs is unused.
         match $intrinsic {
             $(
                 $(intrinsic_pat!($($name).*))|* $(if $cond)? => {
     )*) => {
         match $intrinsic {
             $(
-                stringify!($name) => {
+                sym::$name => {
                     assert!($substs.is_noop());
                     if let [$(ref $arg),*] = *$args {
                         let ($($arg,)*) = (
-                            $(trans_operand($fx, $arg),)*
+                            $(codegen_operand($fx, $arg),)*
                         );
                         let res = $fx.easy_call(stringify!($func), &[$($arg),*], $fx.tcx.types.$ty);
                         $ret.write_cvalue($fx, res);
 
                         if let Some((_, dest)) = $destination {
-                            let ret_ebb = $fx.get_ebb(dest);
-                            $fx.bcx.ins().jump(ret_ebb, &[]);
+                            let ret_block = $fx.get_block(dest);
+                            $fx.bcx.ins().jump(ret_block, &[]);
                             return;
                         } else {
                             unreachable!();
     }
 }
 
-macro atomic_binop_return_old($fx:expr, $op:ident<$T:ident>($ptr:ident, $src:ident) -> $ret:ident)  {
-    let clif_ty = $fx.clif_type($T).unwrap();
-    let old = $fx.bcx.ins().load(clif_ty, MemFlags::new(), $ptr, 0);
-    let new = $fx.bcx.ins().$op(old, $src);
-    $fx.bcx.ins().store(MemFlags::new(), new, $ptr, 0);
-    $ret.write_cvalue($fx, CValue::by_val(old, $fx.layout_of($T)));
+macro validate_atomic_type($fx:ident, $intrinsic:ident, $span:ident, $ty:expr) {
+    match $ty.kind() {
+        ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
+        _ => {
+            $fx.tcx.sess.span_err(
+                $span,
+                &format!(
+                    "`{}` intrinsic: expected basic integer or raw pointer type, found `{:?}`",
+                    $intrinsic, $ty
+                ),
+            );
+            // Prevent verifier error
+            crate::trap::trap_unreachable($fx, "compilation should not have succeeded");
+            return;
+        }
+    }
 }
 
-macro atomic_minmax($fx:expr, $cc:expr, <$T:ident> ($ptr:ident, $src:ident) -> $ret:ident) {
-    // Read old
-    let clif_ty = $fx.clif_type($T).unwrap();
-    let old = $fx.bcx.ins().load(clif_ty, MemFlags::new(), $ptr, 0);
-
-    // Compare
-    let is_eq = codegen_icmp($fx, IntCC::SignedGreaterThan, old, $src);
-    let new = $fx.bcx.ins().select(is_eq, old, $src);
-
-    // Write new
-    $fx.bcx.ins().store(MemFlags::new(), new, $ptr, 0);
-
-    let ret_val = CValue::by_val(old, $ret.layout());
-    $ret.write_cvalue($fx, ret_val);
+macro validate_simd_type($fx:ident, $intrinsic:ident, $span:ident, $ty:expr) {
+    if !$ty.is_simd() {
+        $fx.tcx.sess.span_err($span, &format!("invalid monomorphization of `{}` intrinsic: expected SIMD input type, found non-SIMD `{}`", $intrinsic, $ty));
+        // Prevent verifier error
+        crate::trap::trap_unreachable($fx, "compilation should not have succeeded");
+        return;
+    }
 }
 
-pub fn lane_type_and_count<'tcx>(
-    tcx: TyCtxt<'tcx>,
-    layout: TyLayout<'tcx>,
-) -> (TyLayout<'tcx>, u32) {
-    assert!(layout.ty.is_simd());
-    let lane_count = match layout.fields {
-        layout::FieldPlacement::Array { stride: _, count } => u32::try_from(count).unwrap(),
-        _ => unreachable!("lane_type_and_count({:?})", layout),
+pub(crate) fn clif_vector_type<'tcx>(tcx: TyCtxt<'tcx>, layout: TyAndLayout<'tcx>) -> Option<Type> {
+    let (element, count) = match layout.abi {
+        Abi::Vector { element, count } => (element, count),
+        _ => unreachable!(),
     };
-    let lane_layout = layout.field(&ty::layout::LayoutCx {
-        tcx,
-        param_env: ParamEnv::reveal_all(),
-    }, 0).unwrap();
-    (lane_layout, lane_count)
+
+    match scalar_to_clif_type(tcx, element).by(u16::try_from(count).unwrap()) {
+        // Cranelift currently only implements icmp for 128bit vectors.
+        Some(vector_ty) if vector_ty.bits() == 128 => Some(vector_ty),
+        _ => None,
+    }
 }
 
-fn simd_for_each_lane<'tcx, B: Backend>(
-    fx: &mut FunctionCx<'_, 'tcx, B>,
-    intrinsic: &str,
+fn simd_for_each_lane<'tcx>(
+    fx: &mut FunctionCx<'_, '_, 'tcx>,
     val: CValue<'tcx>,
     ret: CPlace<'tcx>,
     f: impl Fn(
-        &mut FunctionCx<'_, 'tcx, B>,
-        TyLayout<'tcx>,
-        TyLayout<'tcx>,
+        &mut FunctionCx<'_, '_, 'tcx>,
+        TyAndLayout<'tcx>,
+        TyAndLayout<'tcx>,
         Value,
     ) -> CValue<'tcx>,
 ) {
     let layout = val.layout();
 
-    let (lane_layout, lane_count) = lane_type_and_count(fx.tcx, layout);
-    let (ret_lane_layout, ret_lane_count) = lane_type_and_count(fx.tcx, ret.layout());
+    let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
+    let lane_layout = fx.layout_of(lane_ty);
+    let (ret_lane_count, ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx);
+    let ret_lane_layout = fx.layout_of(ret_lane_ty);
     assert_eq!(lane_count, ret_lane_count);
 
     for lane_idx in 0..lane_count {
-        let lane_idx = mir::Field::new(lane_idx.try_into().unwrap());
-        let lane = val.value_field(fx, lane_idx).load_scalar(fx);
+        let lane = val.value_lane(fx, lane_idx).load_scalar(fx);
 
         let res_lane = f(fx, lane_layout, ret_lane_layout, lane);
 
-        ret.place_field(fx, lane_idx).write_cvalue(fx, res_lane);
+        ret.place_lane(fx, lane_idx).write_cvalue(fx, res_lane);
     }
 }
 
-fn simd_pair_for_each_lane<'tcx, B: Backend>(
-    fx: &mut FunctionCx<'_, 'tcx, B>,
-    intrinsic: &str,
+fn simd_pair_for_each_lane<'tcx>(
+    fx: &mut FunctionCx<'_, '_, 'tcx>,
     x: CValue<'tcx>,
     y: CValue<'tcx>,
     ret: CPlace<'tcx>,
     f: impl Fn(
-        &mut FunctionCx<'_, 'tcx, B>,
-        TyLayout<'tcx>,
-        TyLayout<'tcx>,
+        &mut FunctionCx<'_, '_, 'tcx>,
+        TyAndLayout<'tcx>,
+        TyAndLayout<'tcx>,
         Value,
         Value,
     ) -> CValue<'tcx>,
@@ -188,24 +199,72 @@ fn simd_pair_for_each_lane<'tcx, B: Backend>(
     assert_eq!(x.layout(), y.layout());
     let layout = x.layout();
 
-    let (lane_layout, lane_count) = lane_type_and_count(fx.tcx, layout);
-    let (ret_lane_layout, ret_lane_count) = lane_type_and_count(fx.tcx, ret.layout());
+    let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
+    let lane_layout = fx.layout_of(lane_ty);
+    let (ret_lane_count, ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx);
+    let ret_lane_layout = fx.layout_of(ret_lane_ty);
     assert_eq!(lane_count, ret_lane_count);
 
-    for lane in 0..lane_count {
-        let lane = mir::Field::new(lane.try_into().unwrap());
-        let x_lane = x.value_field(fx, lane).load_scalar(fx);
-        let y_lane = y.value_field(fx, lane).load_scalar(fx);
+    for lane_idx in 0..lane_count {
+        let x_lane = x.value_lane(fx, lane_idx).load_scalar(fx);
+        let y_lane = y.value_lane(fx, lane_idx).load_scalar(fx);
 
         let res_lane = f(fx, lane_layout, ret_lane_layout, x_lane, y_lane);
 
-        ret.place_field(fx, lane).write_cvalue(fx, res_lane);
+        ret.place_lane(fx, lane_idx).write_cvalue(fx, res_lane);
     }
 }
 
+fn simd_reduce<'tcx>(
+    fx: &mut FunctionCx<'_, '_, 'tcx>,
+    val: CValue<'tcx>,
+    acc: Option<Value>,
+    ret: CPlace<'tcx>,
+    f: impl Fn(&mut FunctionCx<'_, '_, 'tcx>, TyAndLayout<'tcx>, Value, Value) -> Value,
+) {
+    let (lane_count, lane_ty) = val.layout().ty.simd_size_and_type(fx.tcx);
+    let lane_layout = fx.layout_of(lane_ty);
+    assert_eq!(lane_layout, ret.layout());
+
+    let (mut res_val, start_lane) =
+        if let Some(acc) = acc { (acc, 0) } else { (val.value_lane(fx, 0).load_scalar(fx), 1) };
+    for lane_idx in start_lane..lane_count {
+        let lane = val.value_lane(fx, lane_idx).load_scalar(fx);
+        res_val = f(fx, lane_layout, res_val, lane);
+    }
+    let res = CValue::by_val(res_val, lane_layout);
+    ret.write_cvalue(fx, res);
+}
+
+// FIXME move all uses to `simd_reduce`
+fn simd_reduce_bool<'tcx>(
+    fx: &mut FunctionCx<'_, '_, 'tcx>,
+    val: CValue<'tcx>,
+    ret: CPlace<'tcx>,
+    f: impl Fn(&mut FunctionCx<'_, '_, 'tcx>, Value, Value) -> Value,
+) {
+    let (lane_count, _lane_ty) = val.layout().ty.simd_size_and_type(fx.tcx);
+    assert!(ret.layout().ty.is_bool());
+
+    let res_val = val.value_lane(fx, 0).load_scalar(fx);
+    let mut res_val = fx.bcx.ins().band_imm(res_val, 1); // mask to boolean
+    for lane_idx in 1..lane_count {
+        let lane = val.value_lane(fx, lane_idx).load_scalar(fx);
+        let lane = fx.bcx.ins().band_imm(lane, 1); // mask to boolean
+        res_val = f(fx, res_val, lane);
+    }
+    let res_val = if fx.bcx.func.dfg.value_type(res_val) != types::I8 {
+        fx.bcx.ins().ireduce(types::I8, res_val)
+    } else {
+        res_val
+    };
+    let res = CValue::by_val(res_val, ret.layout());
+    ret.write_cvalue(fx, res);
+}
+
 fn bool_to_zero_or_max_uint<'tcx>(
-    fx: &mut FunctionCx<'_, 'tcx, impl Backend>,
-    layout: TyLayout<'tcx>,
+    fx: &mut FunctionCx<'_, '_, 'tcx>,
+    layout: TyAndLayout<'tcx>,
     val: Value,
 ) -> CValue<'tcx> {
     let ty = fx.clif_type(layout.ty).unwrap();
@@ -216,12 +275,8 @@ fn bool_to_zero_or_max_uint<'tcx>(
         ty => ty,
     };
 
-    let zero = fx.bcx.ins().iconst(int_ty, 0);
-    let max = fx
-        .bcx
-        .ins()
-        .iconst(int_ty, (u64::max_value() >> (64 - int_ty.bits())) as i64);
-    let mut res = fx.bcx.ins().select(val, max, zero);
+    let val = fx.bcx.ins().bint(int_ty, val);
+    let mut res = fx.bcx.ins().ineg(val);
 
     if ty.is_float() {
         res = fx.bcx.ins().bitcast(ty, res);
@@ -231,119 +286,140 @@ fn bool_to_zero_or_max_uint<'tcx>(
 }
 
 macro simd_cmp {
-    ($fx:expr, $intrinsic:expr, $cc:ident($x:ident, $y:ident) -> $ret:ident) => {
+    ($fx:expr, $cc:ident|$cc_f:ident($x:ident, $y:ident) -> $ret:ident) => {
+        let vector_ty = clif_vector_type($fx.tcx, $x.layout());
+
+        if let Some(vector_ty) = vector_ty {
+            let x = $x.load_scalar($fx);
+            let y = $y.load_scalar($fx);
+            let val = if vector_ty.lane_type().is_float() {
+                $fx.bcx.ins().fcmp(FloatCC::$cc_f, x, y)
+            } else {
+                $fx.bcx.ins().icmp(IntCC::$cc, x, y)
+            };
+
+            // HACK This depends on the fact that icmp for vectors represents bools as 0 and !0, not 0 and 1.
+            let val = $fx.bcx.ins().raw_bitcast(vector_ty, val);
+
+            $ret.write_cvalue($fx, CValue::by_val(val, $ret.layout()));
+        } else {
+            simd_pair_for_each_lane(
+                $fx,
+                $x,
+                $y,
+                $ret,
+                |fx, lane_layout, res_lane_layout, x_lane, y_lane| {
+                    let res_lane = match lane_layout.ty.kind() {
+                        ty::Uint(_) | ty::Int(_) => fx.bcx.ins().icmp(IntCC::$cc, x_lane, y_lane),
+                        ty::Float(_) => fx.bcx.ins().fcmp(FloatCC::$cc_f, x_lane, y_lane),
+                        _ => unreachable!("{:?}", lane_layout.ty),
+                    };
+                    bool_to_zero_or_max_uint(fx, res_lane_layout, res_lane)
+                },
+            );
+        }
+    },
+    ($fx:expr, $cc_u:ident|$cc_s:ident|$cc_f:ident($x:ident, $y:ident) -> $ret:ident) => {
+        // FIXME use vector icmp when possible
         simd_pair_for_each_lane(
             $fx,
-            $intrinsic,
             $x,
             $y,
             $ret,
             |fx, lane_layout, res_lane_layout, x_lane, y_lane| {
-                let res_lane = match lane_layout.ty.kind {
-                    ty::Uint(_) | ty::Int(_) => codegen_icmp(fx, IntCC::$cc, x_lane, y_lane),
+                let res_lane = match lane_layout.ty.kind() {
+                    ty::Uint(_) => fx.bcx.ins().icmp(IntCC::$cc_u, x_lane, y_lane),
+                    ty::Int(_) => fx.bcx.ins().icmp(IntCC::$cc_s, x_lane, y_lane),
+                    ty::Float(_) => fx.bcx.ins().fcmp(FloatCC::$cc_f, x_lane, y_lane),
                     _ => unreachable!("{:?}", lane_layout.ty),
                 };
                 bool_to_zero_or_max_uint(fx, res_lane_layout, res_lane)
             },
         );
     },
-    ($fx:expr, $intrinsic:expr, $cc_u:ident|$cc_s:ident($x:ident, $y:ident) -> $ret:ident) => {
+}
+
+macro simd_int_binop {
+    ($fx:expr, $op:ident($x:ident, $y:ident) -> $ret:ident) => {
+        simd_int_binop!($fx, $op|$op($x, $y) -> $ret);
+    },
+    ($fx:expr, $op_u:ident|$op_s:ident($x:ident, $y:ident) -> $ret:ident) => {
         simd_pair_for_each_lane(
             $fx,
-            $intrinsic,
             $x,
             $y,
             $ret,
-            |fx, lane_layout, res_lane_layout, x_lane, y_lane| {
-                let res_lane = match lane_layout.ty.kind {
-                    ty::Uint(_) => codegen_icmp(fx, IntCC::$cc_u, x_lane, y_lane),
-                    ty::Int(_) => codegen_icmp(fx, IntCC::$cc_s, x_lane, y_lane),
+            |fx, lane_layout, ret_lane_layout, x_lane, y_lane| {
+                let res_lane = match lane_layout.ty.kind() {
+                    ty::Uint(_) => fx.bcx.ins().$op_u(x_lane, y_lane),
+                    ty::Int(_) => fx.bcx.ins().$op_s(x_lane, y_lane),
                     _ => unreachable!("{:?}", lane_layout.ty),
                 };
-                bool_to_zero_or_max_uint(fx, res_lane_layout, res_lane)
+                CValue::by_val(res_lane, ret_lane_layout)
             },
         );
     },
 }
 
-macro simd_int_binop {
-    ($fx:expr, $op:ident($x:ident, $y:ident) -> $ret:ident) => {
-        simd_int_binop!($fx, $op|$op($x, $y) -> $ret);
-    },
-    ($fx:expr, $op_u:ident|$op_s:ident($x:ident, $y:ident) -> $ret:ident) => {
-        let (lane_layout, lane_count) = lane_type_and_count($fx.tcx, $x.layout());
-        let x_val = $x.load_vector($fx);
-        let y_val = $y.load_vector($fx);
-
-        let res = match lane_layout.ty.kind {
-            ty::Uint(_) => $fx.bcx.ins().$op_u(x_val, y_val),
-            ty::Int(_) => $fx.bcx.ins().$op_s(x_val, y_val),
-            _ => unreachable!("{:?}", lane_layout.ty),
-        };
-        $ret.write_cvalue($fx, CValue::by_val(res, $ret.layout()));
-    },
-}
-
 macro simd_int_flt_binop {
     ($fx:expr, $op:ident|$op_f:ident($x:ident, $y:ident) -> $ret:ident) => {
         simd_int_flt_binop!($fx, $op|$op|$op_f($x, $y) -> $ret);
     },
     ($fx:expr, $op_u:ident|$op_s:ident|$op_f:ident($x:ident, $y:ident) -> $ret:ident) => {
-        let (lane_layout, lane_count) = lane_type_and_count($fx.tcx, $x.layout());
-        let x_val = $x.load_vector($fx);
-        let y_val = $y.load_vector($fx);
-
-        let res = match lane_layout.ty.kind {
-            ty::Uint(_) => $fx.bcx.ins().$op_u(x_val, y_val),
-            ty::Int(_) => $fx.bcx.ins().$op_s(x_val, y_val),
-            ty::Float(_) => $fx.bcx.ins().$op_f(x_val, y_val),
-            _ => unreachable!("{:?}", lane_layout.ty),
-        };
-        $ret.write_cvalue($fx, CValue::by_val(res, $ret.layout()));
+        simd_pair_for_each_lane(
+            $fx,
+            $x,
+            $y,
+            $ret,
+            |fx, lane_layout, ret_lane_layout, x_lane, y_lane| {
+                let res_lane = match lane_layout.ty.kind() {
+                    ty::Uint(_) => fx.bcx.ins().$op_u(x_lane, y_lane),
+                    ty::Int(_) => fx.bcx.ins().$op_s(x_lane, y_lane),
+                    ty::Float(_) => fx.bcx.ins().$op_f(x_lane, y_lane),
+                    _ => unreachable!("{:?}", lane_layout.ty),
+                };
+                CValue::by_val(res_lane, ret_lane_layout)
+            },
+        );
     },
 }
 
 macro simd_flt_binop($fx:expr, $op:ident($x:ident, $y:ident) -> $ret:ident) {
-    let (lane_layout, lane_count) = lane_type_and_count($fx.tcx, $x.layout());
-    let x_val = $x.load_vector($fx);
-    let y_val = $y.load_vector($fx);
-
-    let res = match lane_layout.ty.kind {
-        ty::Float(_) => $fx.bcx.ins().$op(x_val, y_val),
-        _ => unreachable!("{:?}", lane_layout.ty),
-    };
-    $ret.write_cvalue($fx, CValue::by_val(res, $ret.layout()));
+    simd_pair_for_each_lane(
+        $fx,
+        $x,
+        $y,
+        $ret,
+        |fx, lane_layout, ret_lane_layout, x_lane, y_lane| {
+            let res_lane = match lane_layout.ty.kind() {
+                ty::Float(_) => fx.bcx.ins().$op(x_lane, y_lane),
+                _ => unreachable!("{:?}", lane_layout.ty),
+            };
+            CValue::by_val(res_lane, ret_lane_layout)
+        },
+    );
 }
 
-pub fn codegen_intrinsic_call<'tcx>(
-    fx: &mut FunctionCx<'_, 'tcx, impl Backend>,
+pub(crate) fn codegen_intrinsic_call<'tcx>(
+    fx: &mut FunctionCx<'_, '_, 'tcx>,
     instance: Instance<'tcx>,
     args: &[mir::Operand<'tcx>],
     destination: Option<(CPlace<'tcx>, BasicBlock)>,
     span: Span,
 ) {
-    let def_id = instance.def_id();
+    let intrinsic = fx.tcx.item_name(instance.def_id());
     let substs = instance.substs;
 
-    let intrinsic = fx.tcx.item_name(def_id).as_str();
-    let intrinsic = &intrinsic[..];
-
     let ret = match destination {
         Some((place, _)) => place,
         None => {
             // Insert non returning intrinsics here
             match intrinsic {
-                "abort" => {
-                    trap_panic(fx, "Called intrinsic::abort.");
+                sym::abort => {
+                    trap_abort(fx, "Called intrinsic::abort.");
                 }
-                "unreachable" => {
-                    trap_unreachable(fx, "[corruption] Called intrinsic::unreachable.");
-                }
-                "transmute" => {
-                    trap_unreachable(
-                        fx,
-                        "[corruption] Called intrinsic::transmute with uninhabited argument.",
-                    );
+                sym::transmute => {
+                    crate::base::codegen_panic(fx, "Transmuting to uninhabited type.", span);
                 }
                 _ => unimplemented!("unsupported instrinsic {}", intrinsic),
             }
@@ -351,10 +427,10 @@ pub fn codegen_intrinsic_call<'tcx>(
         }
     };
 
-    if intrinsic.starts_with("simd_") {
+    if intrinsic.as_str().starts_with("simd_") {
         self::simd::codegen_simd_intrinsic_call(fx, instance, args, ret, span);
-        let ret_ebb = fx.get_ebb(destination.expect("SIMD intrinsics don't diverge").1);
-        fx.bcx.ins().jump(ret_ebb, &[]);
+        let ret_block = fx.get_block(destination.expect("SIMD intrinsics don't diverge").1);
+        fx.bcx.ins().jump(ret_block, &[]);
         return;
     }
 
@@ -401,14 +477,12 @@ pub fn codegen_intrinsic_call<'tcx>(
         sinf64(flt) -> f64 => sin,
         cosf32(flt) -> f32 => cosf,
         cosf64(flt) -> f64 => cos,
-        tanf32(flt) -> f32 => tanf,
-        tanf64(flt) -> f64 => tan,
     }
 
     intrinsic_match! {
         fx, intrinsic, substs, args,
         _ => {
-            unimpl!("unsupported intrinsic {}", intrinsic)
+            fx.tcx.sess.span_fatal(span, &format!("unsupported intrinsic {}", intrinsic));
         };
 
         assume, (c _a) {};
@@ -420,30 +494,45 @@ pub fn codegen_intrinsic_call<'tcx>(
         };
         copy | copy_nonoverlapping, <elem_ty> (v src, v dst, v count) {
             let elem_size: u64 = fx.layout_of(elem_ty).size.bytes();
-            let elem_size = fx
-                .bcx
-                .ins()
-                .iconst(fx.pointer_type, elem_size as i64);
             assert_eq!(args.len(), 3);
-            let byte_amount = fx.bcx.ins().imul(count, elem_size);
+            let byte_amount = if elem_size != 1 {
+                fx.bcx.ins().imul_imm(count, elem_size as i64)
+            } else {
+                count
+            };
 
-            if intrinsic.ends_with("_nonoverlapping") {
+            if intrinsic == sym::copy_nonoverlapping {
+                // FIXME emit_small_memcpy
                 fx.bcx.call_memcpy(fx.module.target_config(), dst, src, byte_amount);
             } else {
+                // FIXME emit_small_memmove
                 fx.bcx.call_memmove(fx.module.target_config(), dst, src, byte_amount);
             }
         };
-        discriminant_value, (c ptr) {
-            let pointee_layout = fx.layout_of(ptr.layout().ty.builtin_deref(true).unwrap().ty);
-            let val = CValue::by_ref(Pointer::new(ptr.load_scalar(fx)), pointee_layout);
-            let discr = crate::discriminant::codegen_get_discriminant(fx, val, ret.layout());
-            ret.write_cvalue(fx, discr);
+        // NOTE: the volatile variants have src and dst swapped
+        volatile_copy_memory | volatile_copy_nonoverlapping_memory, <elem_ty> (v dst, v src, v count) {
+            let elem_size: u64 = fx.layout_of(elem_ty).size.bytes();
+            assert_eq!(args.len(), 3);
+            let byte_amount = if elem_size != 1 {
+                fx.bcx.ins().imul_imm(count, elem_size as i64)
+            } else {
+                count
+            };
+
+            // FIXME make the copy actually volatile when using emit_small_mem{cpy,move}
+            if intrinsic == sym::volatile_copy_nonoverlapping_memory {
+                // FIXME emit_small_memcpy
+                fx.bcx.call_memcpy(fx.module.target_config(), dst, src, byte_amount);
+            } else {
+                // FIXME emit_small_memmove
+                fx.bcx.call_memmove(fx.module.target_config(), dst, src, byte_amount);
+            }
         };
         size_of_val, <T> (c ptr) {
             let layout = fx.layout_of(T);
             let size = if layout.is_unsized() {
                 let (_ptr, info) = ptr.load_scalar_pair(fx);
-                let (size, _align) = crate::unsize::size_and_align_of_dst(fx, layout.ty, info);
+                let (size, _align) = crate::unsize::size_and_align_of_dst(fx, layout, info);
                 size
             } else {
                 fx
@@ -457,7 +546,7 @@ pub fn codegen_intrinsic_call<'tcx>(
             let layout = fx.layout_of(T);
             let align = if layout.is_unsized() {
                 let (_ptr, info) = ptr.load_scalar_pair(fx);
-                let (_size, align) = crate::unsize::size_and_align_of_dst(fx, layout.ty, info);
+                let (_size, align) = crate::unsize::size_and_align_of_dst(fx, layout, info);
                 align
             } else {
                 fx
@@ -468,45 +557,31 @@ pub fn codegen_intrinsic_call<'tcx>(
             ret.write_cvalue(fx, CValue::by_val(align, usize_layout));
         };
 
-        _ if intrinsic.starts_with("unchecked_") || intrinsic == "exact_div", (c x, c y) {
+        unchecked_add | unchecked_sub | unchecked_div | exact_div | unchecked_rem
+        | unchecked_shl | unchecked_shr, (c x, c y) {
             // FIXME trap on overflow
             let bin_op = match intrinsic {
-                "unchecked_sub" => BinOp::Sub,
-                "unchecked_div" | "exact_div" => BinOp::Div,
-                "unchecked_rem" => BinOp::Rem,
-                "unchecked_shl" => BinOp::Shl,
-                "unchecked_shr" => BinOp::Shr,
-                _ => unimplemented!("intrinsic {}", intrinsic),
+                sym::unchecked_add => BinOp::Add,
+                sym::unchecked_sub => BinOp::Sub,
+                sym::unchecked_div | sym::exact_div => BinOp::Div,
+                sym::unchecked_rem => BinOp::Rem,
+                sym::unchecked_shl => BinOp::Shl,
+                sym::unchecked_shr => BinOp::Shr,
+                _ => unreachable!(),
             };
-            let res = crate::num::trans_int_binop(fx, bin_op, x, y);
+            let res = crate::num::codegen_int_binop(fx, bin_op, x, y);
             ret.write_cvalue(fx, res);
         };
-        _ if intrinsic.ends_with("_with_overflow"), (c x, c y) {
+        add_with_overflow | sub_with_overflow | mul_with_overflow, (c x, c y) {
             assert_eq!(x.layout().ty, y.layout().ty);
             let bin_op = match intrinsic {
-                "add_with_overflow" => BinOp::Add,
-                "sub_with_overflow" => BinOp::Sub,
-                "mul_with_overflow" => BinOp::Mul,
-                _ => unimplemented!("intrinsic {}", intrinsic),
+                sym::add_with_overflow => BinOp::Add,
+                sym::sub_with_overflow => BinOp::Sub,
+                sym::mul_with_overflow => BinOp::Mul,
+                _ => unreachable!(),
             };
 
-            let res = crate::num::trans_checked_int_binop(
-                fx,
-                bin_op,
-                x,
-                y,
-            );
-            ret.write_cvalue(fx, res);
-        };
-        _ if intrinsic.starts_with("wrapping_"), (c x, c y) {
-            assert_eq!(x.layout().ty, y.layout().ty);
-            let bin_op = match intrinsic {
-                "wrapping_add" => BinOp::Add,
-                "wrapping_sub" => BinOp::Sub,
-                "wrapping_mul" => BinOp::Mul,
-                _ => unimplemented!("intrinsic {}", intrinsic),
-            };
-            let res = crate::num::trans_int_binop(
+            let res = crate::num::codegen_checked_int_binop(
                 fx,
                 bin_op,
                 x,
@@ -514,17 +589,17 @@ pub fn codegen_intrinsic_call<'tcx>(
             );
             ret.write_cvalue(fx, res);
         };
-        _ if intrinsic.starts_with("saturating_"), <T> (c lhs, c rhs) {
+        saturating_add | saturating_sub, <T> (c lhs, c rhs) {
             assert_eq!(lhs.layout().ty, rhs.layout().ty);
             let bin_op = match intrinsic {
-                "saturating_add" => BinOp::Add,
-                "saturating_sub" => BinOp::Sub,
-                _ => unimplemented!("intrinsic {}", intrinsic),
+                sym::saturating_add => BinOp::Add,
+                sym::saturating_sub => BinOp::Sub,
+                _ => unreachable!(),
             };
 
             let signed = type_sign(T);
 
-            let checked_res = crate::num::trans_checked_int_binop(
+            let checked_res = crate::num::codegen_checked_int_binop(
                 fx,
                 bin_op,
                 lhs,
@@ -534,23 +609,18 @@ pub fn codegen_intrinsic_call<'tcx>(
             let (val, has_overflow) = checked_res.load_scalar_pair(fx);
             let clif_ty = fx.clif_type(T).unwrap();
 
-            // `select.i8` is not implemented by Cranelift.
-            let has_overflow = fx.bcx.ins().uextend(types::I32, has_overflow);
-
-            let (min, max) = type_min_max_value(clif_ty, signed);
-            let min = fx.bcx.ins().iconst(clif_ty, min);
-            let max = fx.bcx.ins().iconst(clif_ty, max);
+            let (min, max) = type_min_max_value(&mut fx.bcx, clif_ty, signed);
 
             let val = match (intrinsic, signed) {
-                ("saturating_add", false) => fx.bcx.ins().select(has_overflow, max, val),
-                ("saturating_sub", false) => fx.bcx.ins().select(has_overflow, min, val),
-                ("saturating_add", true) => {
+                (sym::saturating_add, false) => fx.bcx.ins().select(has_overflow, max, val),
+                (sym::saturating_sub, false) => fx.bcx.ins().select(has_overflow, min, val),
+                (sym::saturating_add, true) => {
                     let rhs = rhs.load_scalar(fx);
                     let rhs_ge_zero = fx.bcx.ins().icmp_imm(IntCC::SignedGreaterThanOrEqual, rhs, 0);
                     let sat_val = fx.bcx.ins().select(rhs_ge_zero, max, min);
                     fx.bcx.ins().select(has_overflow, sat_val, val)
                 }
-                ("saturating_sub", true) => {
+                (sym::saturating_sub, true) => {
                     let rhs = rhs.load_scalar(fx);
                     let rhs_ge_zero = fx.bcx.ins().icmp_imm(IntCC::SignedGreaterThanOrEqual, rhs, 0);
                     let sat_val = fx.bcx.ins().select(rhs_ge_zero, min, max);
@@ -579,128 +649,41 @@ pub fn codegen_intrinsic_call<'tcx>(
         offset | arith_offset, (c base, v offset) {
             let pointee_ty = base.layout().ty.builtin_deref(true).unwrap().ty;
             let pointee_size = fx.layout_of(pointee_ty).size.bytes();
-            let ptr_diff = fx.bcx.ins().imul_imm(offset, pointee_size as i64);
+            let ptr_diff = if pointee_size != 1 {
+                fx.bcx.ins().imul_imm(offset, pointee_size as i64)
+            } else {
+                offset
+            };
             let base_val = base.load_scalar(fx);
             let res = fx.bcx.ins().iadd(base_val, ptr_diff);
             ret.write_cvalue(fx, CValue::by_val(res, base.layout()));
         };
 
-        transmute, <src_ty, dst_ty> (c from) {
-            assert_eq!(from.layout().ty, src_ty);
-            let addr = from.force_stack(fx);
-            let dst_layout = fx.layout_of(dst_ty);
-            ret.write_cvalue(fx, CValue::by_ref(addr, dst_layout))
+        transmute, (c from) {
+            ret.write_cvalue_transmute(fx, from);
         };
-        init, () {
-            let layout = ret.layout();
-            if layout.abi == Abi::Uninhabited {
-                crate::trap::trap_panic(fx, "[panic] Called intrinsic::init for uninhabited type.");
-                return;
-            }
-
-            match *ret.inner() {
-                CPlaceInner::NoPlace => {}
-                CPlaceInner::Var(var) => {
-                    let clif_ty = fx.clif_type(layout.ty).unwrap();
-                    let val = match clif_ty {
-                        types::I8 | types::I16 | types::I32 | types::I64 => fx.bcx.ins().iconst(clif_ty, 0),
-                        types::I128 => {
-                            let zero = fx.bcx.ins().iconst(types::I64, 0);
-                            fx.bcx.ins().iconcat(zero, zero)
-                        }
-                        types::F32 => {
-                            let zero = fx.bcx.ins().iconst(types::I32, 0);
-                            fx.bcx.ins().bitcast(types::F32, zero)
-                        }
-                        types::F64 => {
-                            let zero = fx.bcx.ins().iconst(types::I64, 0);
-                            fx.bcx.ins().bitcast(types::F64, zero)
-                        }
-                        _ => panic!("clif_type returned {}", clif_ty),
-                    };
-                    fx.bcx.set_val_label(val, cranelift_codegen::ir::ValueLabel::from_u32(var.as_u32()));
-                    fx.bcx.def_var(mir_var(var), val);
-                }
-                _ => {
-                    let addr = ret.to_ptr(fx).get_addr(fx);
-                    let layout = ret.layout();
-                    fx.bcx.emit_small_memset(fx.module.target_config(), addr, 0, layout.size.bytes(), 1);
-                }
-            }
-        };
-        uninit, () {
-            let layout = ret.layout();
-            if layout.abi == Abi::Uninhabited {
-                crate::trap::trap_panic(fx, "[panic] Called intrinsic::uninit for uninhabited type.");
-                return;
-            }
-            match *ret.inner() {
-                CPlaceInner::NoPlace => {},
-                CPlaceInner::Var(var) => {
-                    let clif_ty = fx.clif_type(layout.ty).unwrap();
-                    let val = match clif_ty {
-                        types::I8 | types::I16 | types::I32 | types::I64 => fx.bcx.ins().iconst(clif_ty, 42),
-                        types::I128 => {
-                            let zero = fx.bcx.ins().iconst(types::I64, 0);
-                            let fourty_two = fx.bcx.ins().iconst(types::I64, 42);
-                            fx.bcx.ins().iconcat(fourty_two, zero)
-                        }
-                        types::F32 => {
-                            let zero = fx.bcx.ins().iconst(types::I32, 0xdeadbeef);
-                            fx.bcx.ins().bitcast(types::F32, zero)
-                        }
-                        types::F64 => {
-                            let zero = fx.bcx.ins().iconst(types::I64, 0xcafebabedeadbeefu64 as i64);
-                            fx.bcx.ins().bitcast(types::F64, zero)
-                        }
-                        _ => panic!("clif_type returned {}", clif_ty),
-                    };
-                    fx.bcx.set_val_label(val, cranelift_codegen::ir::ValueLabel::from_u32(var.as_u32()));
-                    fx.bcx.def_var(mir_var(var), val);
-                }
-                CPlaceInner::Addr(_, _) => {
-                    // Don't write to `ret`, as the destination memory is already uninitialized.
-                }
-            }
-        };
-        write_bytes, (c dst, v val, v count) {
+        write_bytes | volatile_set_memory, (c dst, v val, v count) {
             let pointee_ty = dst.layout().ty.builtin_deref(true).unwrap().ty;
             let pointee_size = fx.layout_of(pointee_ty).size.bytes();
-            let count = fx.bcx.ins().imul_imm(count, pointee_size as i64);
+            let count = if pointee_size != 1 {
+                fx.bcx.ins().imul_imm(count, pointee_size as i64)
+            } else {
+                count
+            };
             let dst_ptr = dst.load_scalar(fx);
+            // FIXME make the memset actually volatile when switching to emit_small_memset
+            // FIXME use emit_small_memset
             fx.bcx.call_memset(fx.module.target_config(), dst_ptr, val, count);
         };
         ctlz | ctlz_nonzero, <T> (v arg) {
             // FIXME trap on `ctlz_nonzero` with zero arg.
-            let res = if T == fx.tcx.types.u128 || T == fx.tcx.types.i128 {
-                // FIXME verify this algorithm is correct
-                let (lsb, msb) = fx.bcx.ins().isplit(arg);
-                let lsb_lz = fx.bcx.ins().clz(lsb);
-                let msb_lz = fx.bcx.ins().clz(msb);
-                let msb_is_zero = fx.bcx.ins().icmp_imm(IntCC::Equal, msb, 0);
-                let lsb_lz_plus_64 = fx.bcx.ins().iadd_imm(lsb_lz, 64);
-                let res = fx.bcx.ins().select(msb_is_zero, lsb_lz_plus_64, msb_lz);
-                fx.bcx.ins().uextend(types::I128, res)
-            } else {
-                fx.bcx.ins().clz(arg)
-            };
+            let res = fx.bcx.ins().clz(arg);
             let res = CValue::by_val(res, fx.layout_of(T));
             ret.write_cvalue(fx, res);
         };
         cttz | cttz_nonzero, <T> (v arg) {
             // FIXME trap on `cttz_nonzero` with zero arg.
-            let res = if T == fx.tcx.types.u128 || T == fx.tcx.types.i128 {
-                // FIXME verify this algorithm is correct
-                let (lsb, msb) = fx.bcx.ins().isplit(arg);
-                let lsb_tz = fx.bcx.ins().ctz(lsb);
-                let msb_tz = fx.bcx.ins().ctz(msb);
-                let lsb_is_zero = fx.bcx.ins().icmp_imm(IntCC::Equal, lsb, 0);
-                let msb_tz_plus_64 = fx.bcx.ins().iadd_imm(msb_tz, 64);
-                let res = fx.bcx.ins().select(lsb_is_zero, msb_tz_plus_64, lsb_tz);
-                fx.bcx.ins().uextend(types::I128, res)
-            } else {
-                fx.bcx.ins().ctz(arg)
-            };
+            let res = fx.bcx.ins().ctz(arg);
             let res = CValue::by_val(res, fx.layout_of(T));
             ret.write_cvalue(fx, res);
         };
@@ -716,7 +699,7 @@ pub fn codegen_intrinsic_call<'tcx>(
         };
         bswap, <T> (v arg) {
             // FIXME(CraneStation/cranelift#794) add bswap instruction to cranelift
-            fn swap(bcx: &mut FunctionBuilder, v: Value) -> Value {
+            fn swap(bcx: &mut FunctionBuilder<'_>, v: Value) -> Value {
                 match bcx.func.dfg.value_type(v) {
                     types::I8 => v,
 
@@ -787,36 +770,65 @@ fn swap(bcx: &mut FunctionBuilder, v: Value) -> Value {
                         let hi = swap(bcx, hi);
                         bcx.ins().iconcat(hi, lo)
                     }
-                    ty => unimplemented!("bswap {}", ty),
+                    ty => unreachable!("bswap {}", ty),
                 }
-            };
+            }
             let res = CValue::by_val(swap(&mut fx.bcx, arg), fx.layout_of(T));
             ret.write_cvalue(fx, res);
         };
-        panic_if_uninhabited, <T> () {
-            if fx.layout_of(T).abi.is_uninhabited() {
-                crate::trap::trap_panic(fx, "[panic] Called intrinsic::panic_if_uninhabited for uninhabited type.");
+        assert_inhabited | assert_zero_valid | assert_uninit_valid, <T> () {
+            let layout = fx.layout_of(T);
+            if layout.abi.is_uninhabited() {
+                with_no_trimmed_paths(|| crate::base::codegen_panic(
+                    fx,
+                    &format!("attempted to instantiate uninhabited type `{}`", T),
+                    span,
+                ));
+                return;
+            }
+
+            if intrinsic == sym::assert_zero_valid && !layout.might_permit_raw_init(fx, /*zero:*/ true) {
+                with_no_trimmed_paths(|| crate::base::codegen_panic(
+                    fx,
+                    &format!("attempted to zero-initialize type `{}`, which is invalid", T),
+                    span,
+                ));
+                return;
+            }
+
+            if intrinsic == sym::assert_uninit_valid && !layout.might_permit_raw_init(fx, /*zero:*/ false) {
+                with_no_trimmed_paths(|| crate::base::codegen_panic(
+                    fx,
+                    &format!("attempted to leave type `{}` uninitialized, which is invalid", T),
+                    span,
+                ));
                 return;
             }
         };
 
-        volatile_load, (c ptr) {
+        volatile_load | unaligned_volatile_load, (c ptr) {
             // Cranelift treats loads as volatile by default
+            // FIXME correctly handle unaligned_volatile_load
             let inner_layout =
                 fx.layout_of(ptr.layout().ty.builtin_deref(true).unwrap().ty);
             let val = CValue::by_ref(Pointer::new(ptr.load_scalar(fx)), inner_layout);
             ret.write_cvalue(fx, val);
         };
-        volatile_store, (v ptr, c val) {
+        volatile_store | unaligned_volatile_store, (v ptr, c val) {
             // Cranelift treats stores as volatile by default
+            // FIXME correctly handle unaligned_volatile_store
             let dest = CPlace::for_ptr(Pointer::new(ptr), val.layout());
             dest.write_cvalue(fx, val);
         };
 
-        size_of | pref_align_of | min_align_of | needs_drop | type_id | type_name, () {
+        pref_align_of | needs_drop | type_id | type_name | variant_count, () {
             let const_val =
                 fx.tcx.const_eval_instance(ParamEnv::reveal_all(), instance, None).unwrap();
-            let val = crate::constant::trans_const_value(fx, const_val);
+            let val = crate::constant::codegen_const_value(
+                fx,
+                const_val,
+                ret.layout().ty,
+            );
             ret.write_cvalue(fx, val);
         };
 
@@ -826,7 +838,17 @@ fn swap(bcx: &mut FunctionBuilder, v: Value) -> Value {
             let pointee_size: u64 = fx.layout_of(T).size.bytes();
             let diff = fx.bcx.ins().isub(ptr, base);
             // FIXME this can be an exact division.
-            let val = CValue::by_val(fx.bcx.ins().udiv_imm(diff, pointee_size as i64), isize_layout);
+            let val = CValue::by_val(fx.bcx.ins().sdiv_imm(diff, pointee_size as i64), isize_layout);
+            ret.write_cvalue(fx, val);
+        };
+
+        ptr_guaranteed_eq, (c a, c b) {
+            let val = crate::num::codegen_ptr_binop(fx, BinOp::Eq, a, b);
+            ret.write_cvalue(fx, val);
+        };
+
+        ptr_guaranteed_ne, (c a, c b) {
+            let val = crate::num::codegen_ptr_binop(fx, BinOp::Ne, a, b);
             ret.write_cvalue(fx, val);
         };
 
@@ -835,103 +857,214 @@ fn swap(bcx: &mut FunctionBuilder, v: Value) -> Value {
             ret.write_cvalue(fx, caller_location);
         };
 
-        _ if intrinsic.starts_with("atomic_fence"), () {};
-        _ if intrinsic.starts_with("atomic_singlethreadfence"), () {};
-        _ if intrinsic.starts_with("atomic_load"), (c ptr) {
-            let inner_layout =
-                fx.layout_of(ptr.layout().ty.builtin_deref(true).unwrap().ty);
-            let val = CValue::by_ref(Pointer::new(ptr.load_scalar(fx)), inner_layout);
+        _ if intrinsic.as_str().starts_with("atomic_fence"), () {
+            fx.bcx.ins().fence();
+        };
+        _ if intrinsic.as_str().starts_with("atomic_singlethreadfence"), () {
+            // FIXME use a compiler fence once Cranelift supports it
+            fx.bcx.ins().fence();
+        };
+        _ if intrinsic.as_str().starts_with("atomic_load"), <T> (v ptr) {
+            validate_atomic_type!(fx, intrinsic, span, T);
+            let ty = fx.clif_type(T).unwrap();
+
+            let val = fx.bcx.ins().atomic_load(ty, MemFlags::trusted(), ptr);
+
+            let val = CValue::by_val(val, fx.layout_of(T));
             ret.write_cvalue(fx, val);
         };
-        _ if intrinsic.starts_with("atomic_store"), (v ptr, c val) {
-            let dest = CPlace::for_ptr(Pointer::new(ptr), val.layout());
-            dest.write_cvalue(fx, val);
+        _ if intrinsic.as_str().starts_with("atomic_store"), (v ptr, c val) {
+            validate_atomic_type!(fx, intrinsic, span, val.layout().ty);
+
+            let val = val.load_scalar(fx);
+
+            fx.bcx.ins().atomic_store(MemFlags::trusted(), val, ptr);
         };
-        _ if intrinsic.starts_with("atomic_xchg"), <T> (v ptr, c src) {
-            // Read old
-            let clif_ty = fx.clif_type(T).unwrap();
-            let old = fx.bcx.ins().load(clif_ty, MemFlags::new(), ptr, 0);
-            ret.write_cvalue(fx, CValue::by_val(old, fx.layout_of(T)));
+        _ if intrinsic.as_str().starts_with("atomic_xchg"), (v ptr, c new) {
+            let layout = new.layout();
+            validate_atomic_type!(fx, intrinsic, span, layout.ty);
+            let ty = fx.clif_type(layout.ty).unwrap();
+
+            let new = new.load_scalar(fx);
+
+            let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Xchg, ptr, new);
 
-            // Write new
-            let dest = CPlace::for_ptr(Pointer::new(ptr), src.layout());
-            dest.write_cvalue(fx, src);
+            let old = CValue::by_val(old, layout);
+            ret.write_cvalue(fx, old);
         };
-        _ if intrinsic.starts_with("atomic_cxchg"), <T> (v ptr, v test_old, v new) { // both atomic_cxchg_* and atomic_cxchgweak_*
-            // Read old
-            let clif_ty = fx.clif_type(T).unwrap();
-            let old = fx.bcx.ins().load(clif_ty, MemFlags::new(), ptr, 0);
+        _ if intrinsic.as_str().starts_with("atomic_cxchg"), (v ptr, c test_old, c new) { // both atomic_cxchg_* and atomic_cxchgweak_*
+            let layout = new.layout();
+            validate_atomic_type!(fx, intrinsic, span, layout.ty);
 
-            // Compare
-            let is_eq = codegen_icmp(fx, IntCC::Equal, old, test_old);
-            let new = fx.bcx.ins().select(is_eq, new, old); // Keep old if not equal to test_old
+            let test_old = test_old.load_scalar(fx);
+            let new = new.load_scalar(fx);
 
-            // Write new
-            fx.bcx.ins().store(MemFlags::new(), new, ptr, 0);
+            let old = fx.bcx.ins().atomic_cas(MemFlags::trusted(), ptr, test_old, new);
+            let is_eq = fx.bcx.ins().icmp(IntCC::Equal, old, test_old);
 
             let ret_val = CValue::by_val_pair(old, fx.bcx.ins().bint(types::I8, is_eq), ret.layout());
-            ret.write_cvalue(fx, ret_val);
+            ret.write_cvalue(fx, ret_val)
         };
 
-        _ if intrinsic.starts_with("atomic_xadd"), <T> (v ptr, v amount) {
-            atomic_binop_return_old! (fx, iadd<T>(ptr, amount) -> ret);
+        _ if intrinsic.as_str().starts_with("atomic_xadd"), (v ptr, c amount) {
+            let layout = amount.layout();
+            validate_atomic_type!(fx, intrinsic, span, layout.ty);
+            let ty = fx.clif_type(layout.ty).unwrap();
+
+            let amount = amount.load_scalar(fx);
+
+            let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Add, ptr, amount);
+
+            let old = CValue::by_val(old, layout);
+            ret.write_cvalue(fx, old);
         };
-        _ if intrinsic.starts_with("atomic_xsub"), <T> (v ptr, v amount) {
-            atomic_binop_return_old! (fx, isub<T>(ptr, amount) -> ret);
+        _ if intrinsic.as_str().starts_with("atomic_xsub"), (v ptr, c amount) {
+            let layout = amount.layout();
+            validate_atomic_type!(fx, intrinsic, span, layout.ty);
+            let ty = fx.clif_type(layout.ty).unwrap();
+
+            let amount = amount.load_scalar(fx);
+
+            let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Sub, ptr, amount);
+
+            let old = CValue::by_val(old, layout);
+            ret.write_cvalue(fx, old);
         };
-        _ if intrinsic.starts_with("atomic_and"), <T> (v ptr, v src) {
-            atomic_binop_return_old! (fx, band<T>(ptr, src) -> ret);
+        _ if intrinsic.as_str().starts_with("atomic_and"), (v ptr, c src) {
+            let layout = src.layout();
+            validate_atomic_type!(fx, intrinsic, span, layout.ty);
+            let ty = fx.clif_type(layout.ty).unwrap();
+
+            let src = src.load_scalar(fx);
+
+            let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::And, ptr, src);
+
+            let old = CValue::by_val(old, layout);
+            ret.write_cvalue(fx, old);
         };
-        _ if intrinsic.starts_with("atomic_nand"), <T> (v ptr, v src) {
-            let clif_ty = fx.clif_type(T).unwrap();
-            let old = fx.bcx.ins().load(clif_ty, MemFlags::new(), ptr, 0);
-            let and = fx.bcx.ins().band(old, src);
-            let new = fx.bcx.ins().bnot(and);
-            fx.bcx.ins().store(MemFlags::new(), new, ptr, 0);
-            ret.write_cvalue(fx, CValue::by_val(old, fx.layout_of(T)));
+        _ if intrinsic.as_str().starts_with("atomic_or"), (v ptr, c src) {
+            let layout = src.layout();
+            validate_atomic_type!(fx, intrinsic, span, layout.ty);
+            let ty = fx.clif_type(layout.ty).unwrap();
+
+            let src = src.load_scalar(fx);
+
+            let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Or, ptr, src);
+
+            let old = CValue::by_val(old, layout);
+            ret.write_cvalue(fx, old);
         };
-        _ if intrinsic.starts_with("atomic_or"), <T> (v ptr, v src) {
-            atomic_binop_return_old! (fx, bor<T>(ptr, src) -> ret);
+        _ if intrinsic.as_str().starts_with("atomic_xor"), (v ptr, c src) {
+            let layout = src.layout();
+            validate_atomic_type!(fx, intrinsic, span, layout.ty);
+            let ty = fx.clif_type(layout.ty).unwrap();
+
+            let src = src.load_scalar(fx);
+
+            let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Xor, ptr, src);
+
+            let old = CValue::by_val(old, layout);
+            ret.write_cvalue(fx, old);
         };
-        _ if intrinsic.starts_with("atomic_xor"), <T> (v ptr, v src) {
-            atomic_binop_return_old! (fx, bxor<T>(ptr, src) -> ret);
+        _ if intrinsic.as_str().starts_with("atomic_nand"), (v ptr, c src) {
+            let layout = src.layout();
+            validate_atomic_type!(fx, intrinsic, span, layout.ty);
+            let ty = fx.clif_type(layout.ty).unwrap();
+
+            let src = src.load_scalar(fx);
+
+            let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Nand, ptr, src);
+
+            let old = CValue::by_val(old, layout);
+            ret.write_cvalue(fx, old);
         };
+        _ if intrinsic.as_str().starts_with("atomic_max"), (v ptr, c src) {
+            let layout = src.layout();
+            validate_atomic_type!(fx, intrinsic, span, layout.ty);
+            let ty = fx.clif_type(layout.ty).unwrap();
+
+            let src = src.load_scalar(fx);
 
-        _ if intrinsic.starts_with("atomic_max"), <T> (v ptr, v src) {
-            atomic_minmax!(fx, IntCC::SignedGreaterThan, <T> (ptr, src) -> ret);
+            let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Smax, ptr, src);
+
+            let old = CValue::by_val(old, layout);
+            ret.write_cvalue(fx, old);
         };
-        _ if intrinsic.starts_with("atomic_umax"), <T> (v ptr, v src) {
-            atomic_minmax!(fx, IntCC::UnsignedGreaterThan, <T> (ptr, src) -> ret);
+        _ if intrinsic.as_str().starts_with("atomic_umax"), (v ptr, c src) {
+            let layout = src.layout();
+            validate_atomic_type!(fx, intrinsic, span, layout.ty);
+            let ty = fx.clif_type(layout.ty).unwrap();
+
+            let src = src.load_scalar(fx);
+
+            let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Umax, ptr, src);
+
+            let old = CValue::by_val(old, layout);
+            ret.write_cvalue(fx, old);
         };
-        _ if intrinsic.starts_with("atomic_min"), <T> (v ptr, v src) {
-            atomic_minmax!(fx, IntCC::SignedLessThan, <T> (ptr, src) -> ret);
+        _ if intrinsic.as_str().starts_with("atomic_min"), (v ptr, c src) {
+            let layout = src.layout();
+            validate_atomic_type!(fx, intrinsic, span, layout.ty);
+            let ty = fx.clif_type(layout.ty).unwrap();
+
+            let src = src.load_scalar(fx);
+
+            let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Smin, ptr, src);
+
+            let old = CValue::by_val(old, layout);
+            ret.write_cvalue(fx, old);
         };
-        _ if intrinsic.starts_with("atomic_umin"), <T> (v ptr, v src) {
-            atomic_minmax!(fx, IntCC::UnsignedLessThan, <T> (ptr, src) -> ret);
+        _ if intrinsic.as_str().starts_with("atomic_umin"), (v ptr, c src) {
+            let layout = src.layout();
+            validate_atomic_type!(fx, intrinsic, span, layout.ty);
+            let ty = fx.clif_type(layout.ty).unwrap();
+
+            let src = src.load_scalar(fx);
+
+            let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Umin, ptr, src);
+
+            let old = CValue::by_val(old, layout);
+            ret.write_cvalue(fx, old);
         };
 
+        // In Rust floating point min and max don't propagate NaN. In Cranelift they do however.
+        // For this reason it is necessary to use `a.is_nan() ? b : (a >= b ? b : a)` for `minnumf*`
+        // and `a.is_nan() ? b : (a <= b ? b : a)` for `maxnumf*`. NaN checks are done by comparing
+        // a float against itself. Only in case of NaN is it not equal to itself.
         minnumf32, (v a, v b) {
-            let val = fx.bcx.ins().fmin(a, b);
+            let a_is_nan = fx.bcx.ins().fcmp(FloatCC::NotEqual, a, a);
+            let a_ge_b = fx.bcx.ins().fcmp(FloatCC::GreaterThanOrEqual, a, b);
+            let temp = fx.bcx.ins().select(a_ge_b, b, a);
+            let val = fx.bcx.ins().select(a_is_nan, b, temp);
             let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f32));
             ret.write_cvalue(fx, val);
         };
         minnumf64, (v a, v b) {
-            let val = fx.bcx.ins().fmin(a, b);
+            let a_is_nan = fx.bcx.ins().fcmp(FloatCC::NotEqual, a, a);
+            let a_ge_b = fx.bcx.ins().fcmp(FloatCC::GreaterThanOrEqual, a, b);
+            let temp = fx.bcx.ins().select(a_ge_b, b, a);
+            let val = fx.bcx.ins().select(a_is_nan, b, temp);
             let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f64));
             ret.write_cvalue(fx, val);
         };
         maxnumf32, (v a, v b) {
-            let val = fx.bcx.ins().fmax(a, b);
+            let a_is_nan = fx.bcx.ins().fcmp(FloatCC::NotEqual, a, a);
+            let a_le_b = fx.bcx.ins().fcmp(FloatCC::LessThanOrEqual, a, b);
+            let temp = fx.bcx.ins().select(a_le_b, b, a);
+            let val = fx.bcx.ins().select(a_is_nan, b, temp);
             let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f32));
             ret.write_cvalue(fx, val);
         };
         maxnumf64, (v a, v b) {
-            let val = fx.bcx.ins().fmax(a, b);
+            let a_is_nan = fx.bcx.ins().fcmp(FloatCC::NotEqual, a, a);
+            let a_le_b = fx.bcx.ins().fcmp(FloatCC::LessThanOrEqual, a, b);
+            let temp = fx.bcx.ins().select(a_le_b, b, a);
+            let val = fx.bcx.ins().select(a_is_nan, b, temp);
             let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f64));
             ret.write_cvalue(fx, val);
         };
 
-        try, (v f, v data, v _local_ptr) {
+        kw.Try, (v f, v data, v _catch_fn) {
             // FIXME once unwinding is supported, change this to actually catch panics
             let f_sig = fx.bcx.func.import_signature(Signature {
                 call_conv: CallConv::triple_default(fx.triple()),
@@ -941,14 +1074,76 @@ fn swap(bcx: &mut FunctionBuilder, v: Value) -> Value {
 
             fx.bcx.ins().call_indirect(f_sig, f, &[data]);
 
-            let ret_val = CValue::const_val(fx, ret.layout().ty, 0);
+            let layout = ret.layout();
+            let ret_val = CValue::const_val(fx, layout, ty::ScalarInt::null(layout.size));
             ret.write_cvalue(fx, ret_val);
         };
+
+        fadd_fast | fsub_fast | fmul_fast | fdiv_fast | frem_fast, (c x, c y) {
+            let res = crate::num::codegen_float_binop(fx, match intrinsic {
+                sym::fadd_fast => BinOp::Add,
+                sym::fsub_fast => BinOp::Sub,
+                sym::fmul_fast => BinOp::Mul,
+                sym::fdiv_fast => BinOp::Div,
+                sym::frem_fast => BinOp::Rem,
+                _ => unreachable!(),
+            }, x, y);
+            ret.write_cvalue(fx, res);
+        };
+        float_to_int_unchecked, (v f) {
+            let res = crate::cast::clif_int_or_float_cast(
+                fx,
+                f,
+                false,
+                fx.clif_type(ret.layout().ty).unwrap(),
+                type_sign(ret.layout().ty),
+            );
+            ret.write_cvalue(fx, CValue::by_val(res, ret.layout()));
+        };
+
+        raw_eq, <T>(v lhs_ref, v rhs_ref) {
+            fn type_by_size(size: Size) -> Option<Type> {
+                Type::int(size.bits().try_into().ok()?)
+            }
+
+            let size = fx.layout_of(T).layout.size;
+            // FIXME add and use emit_small_memcmp
+            let is_eq_value =
+                if size == Size::ZERO {
+                    // No bytes means they're trivially equal
+                    fx.bcx.ins().iconst(types::I8, 1)
+                } else if let Some(clty) = type_by_size(size) {
+                    // Can't use `trusted` for these loads; they could be unaligned.
+                    let mut flags = MemFlags::new();
+                    flags.set_notrap();
+                    let lhs_val = fx.bcx.ins().load(clty, flags, lhs_ref, 0);
+                    let rhs_val = fx.bcx.ins().load(clty, flags, rhs_ref, 0);
+                    let eq = fx.bcx.ins().icmp(IntCC::Equal, lhs_val, rhs_val);
+                    fx.bcx.ins().bint(types::I8, eq)
+                } else {
+                    // Just call `memcmp` (like slices do in core) when the
+                    // size is too large or it's not a power-of-two.
+                    let signed_bytes = i64::try_from(size.bytes()).unwrap();
+                    let bytes_val = fx.bcx.ins().iconst(fx.pointer_type, signed_bytes);
+                    let params = vec![AbiParam::new(fx.pointer_type); 3];
+                    let returns = vec![AbiParam::new(types::I32)];
+                    let args = &[lhs_ref, rhs_ref, bytes_val];
+                    let cmp = fx.lib_call("memcmp", params, returns, args)[0];
+                    let eq = fx.bcx.ins().icmp_imm(IntCC::Equal, cmp, 0);
+                    fx.bcx.ins().bint(types::I8, eq)
+                };
+            ret.write_cvalue(fx, CValue::by_val(is_eq_value, ret.layout()));
+        };
+
+        black_box, (c a) {
+            // FIXME implement black_box semantics
+            ret.write_cvalue(fx, a);
+        };
     }
 
     if let Some((_, dest)) = destination {
-        let ret_ebb = fx.get_ebb(dest);
-        fx.bcx.ins().jump(ret_ebb, &[]);
+        let ret_block = fx.get_block(dest);
+        fx.bcx.ins().jump(ret_block, &[]);
     } else {
         trap_unreachable(fx, "[corruption] Diverging intrinsic returned.");
     }