]> git.lizzy.rs Git - rust.git/blobdiff - src/intrinsics/mod.rs
Rustup to rustc 1.44.0-nightly (7ceebd98c 2020-03-17)
[rust.git] / src / intrinsics / mod.rs
index 7e4144d29e1017d05c3669e41b511eb597b54ae9..de96c4e6edec81a83b6114f996c082d706ba54f8 100644 (file)
@@ -1,4 +1,5 @@
 pub mod llvm;
+mod simd;
 
 use crate::prelude::*;
 
@@ -43,6 +44,7 @@
     $(
         $($($name:tt).*)|+ $(if $cond:expr)?, $(<$($subst:ident),*>)? ($($a:ident $arg:ident),*) $content:block;
     )*) => {
+        let _ = $substs; // Silence warning when substs is unused.
         match $intrinsic {
             $(
                 $(intrinsic_pat!($($name).*))|* $(if $cond)? => {
@@ -70,7 +72,7 @@
     }
 }
 
-macro_rules! call_intrinsic_match {
+macro call_intrinsic_match {
     ($fx:expr, $intrinsic:expr, $substs:expr, $ret:expr, $destination:expr, $args:expr, $(
         $name:ident($($arg:ident),*) -> $ty:ident => $func:ident,
     )*) => {
@@ -86,8 +88,8 @@ macro_rules! call_intrinsic_match {
                         $ret.write_cvalue($fx, res);
 
                         if let Some((_, dest)) = $destination {
-                            let ret_ebb = $fx.get_ebb(dest);
-                            $fx.bcx.ins().jump(ret_ebb, &[]);
+                            let ret_block = $fx.get_block(dest);
+                            $fx.bcx.ins().jump(ret_block, &[]);
                             return;
                         } else {
                             unreachable!();
@@ -102,54 +104,83 @@ macro_rules! call_intrinsic_match {
     }
 }
 
-macro_rules! atomic_binop_return_old {
-    ($fx:expr, $op:ident<$T:ident>($ptr:ident, $src:ident) -> $ret:ident) => {
-        let clif_ty = $fx.clif_type($T).unwrap();
-        let old = $fx.bcx.ins().load(clif_ty, MemFlags::new(), $ptr, 0);
-        let new = $fx.bcx.ins().$op(old, $src);
-        $fx.bcx.ins().store(MemFlags::new(), new, $ptr, 0);
-        $ret.write_cvalue($fx, CValue::by_val(old, $fx.layout_of($T)));
-    };
+macro atomic_binop_return_old($fx:expr, $op:ident<$T:ident>($ptr:ident, $src:ident) -> $ret:ident)  {
+    crate::atomic_shim::lock_global_lock($fx);
+
+    let clif_ty = $fx.clif_type($T).unwrap();
+    let old = $fx.bcx.ins().load(clif_ty, MemFlags::new(), $ptr, 0);
+    let new = $fx.bcx.ins().$op(old, $src);
+    $fx.bcx.ins().store(MemFlags::new(), new, $ptr, 0);
+    $ret.write_cvalue($fx, CValue::by_val(old, $fx.layout_of($T)));
+
+    crate::atomic_shim::unlock_global_lock($fx);
 }
 
-macro_rules! atomic_minmax {
-    ($fx:expr, $cc:expr, <$T:ident> ($ptr:ident, $src:ident) -> $ret:ident) => {
-        // Read old
-        let clif_ty = $fx.clif_type($T).unwrap();
-        let old = $fx.bcx.ins().load(clif_ty, MemFlags::new(), $ptr, 0);
+macro atomic_minmax($fx:expr, $cc:expr, <$T:ident> ($ptr:ident, $src:ident) -> $ret:ident) {
+    crate::atomic_shim::lock_global_lock($fx);
 
-        // Compare
-        let is_eq = codegen_icmp($fx, IntCC::SignedGreaterThan, old, $src);
-        let new = $fx.bcx.ins().select(is_eq, old, $src);
+    // Read old
+    let clif_ty = $fx.clif_type($T).unwrap();
+    let old = $fx.bcx.ins().load(clif_ty, MemFlags::new(), $ptr, 0);
 
-        // Write new
-        $fx.bcx.ins().store(MemFlags::new(), new, $ptr, 0);
+    // Compare
+    let is_eq = codegen_icmp($fx, IntCC::SignedGreaterThan, old, $src);
+    let new = $fx.bcx.ins().select(is_eq, old, $src);
 
-        let ret_val = CValue::by_val(old, $ret.layout());
-        $ret.write_cvalue($fx, ret_val);
-    };
+    // Write new
+    $fx.bcx.ins().store(MemFlags::new(), new, $ptr, 0);
+
+    let ret_val = CValue::by_val(old, $ret.layout());
+    $ret.write_cvalue($fx, ret_val);
+
+    crate::atomic_shim::unlock_global_lock($fx);
 }
 
 fn lane_type_and_count<'tcx>(
-    fx: &FunctionCx<'_, 'tcx, impl Backend>,
+    tcx: TyCtxt<'tcx>,
     layout: TyLayout<'tcx>,
-    intrinsic: &str,
 ) -> (TyLayout<'tcx>, u32) {
     assert!(layout.ty.is_simd());
     let lane_count = match layout.fields {
         layout::FieldPlacement::Array { stride: _, count } => u32::try_from(count).unwrap(),
-        _ => panic!(
-            "Non vector type {:?} passed to or returned from simd_* intrinsic {}",
-            layout.ty, intrinsic
-        ),
+        _ => unreachable!("lane_type_and_count({:?})", layout),
     };
-    let lane_layout = layout.field(fx, 0);
+    let lane_layout = layout.field(&ty::layout::LayoutCx {
+        tcx,
+        param_env: ParamEnv::reveal_all(),
+    }, 0).unwrap();
     (lane_layout, lane_count)
 }
 
 fn simd_for_each_lane<'tcx, B: Backend>(
     fx: &mut FunctionCx<'_, 'tcx, B>,
-    intrinsic: &str,
+    val: CValue<'tcx>,
+    ret: CPlace<'tcx>,
+    f: impl Fn(
+        &mut FunctionCx<'_, 'tcx, B>,
+        TyLayout<'tcx>,
+        TyLayout<'tcx>,
+        Value,
+    ) -> CValue<'tcx>,
+) {
+    let layout = val.layout();
+
+    let (lane_layout, lane_count) = lane_type_and_count(fx.tcx, layout);
+    let (ret_lane_layout, ret_lane_count) = lane_type_and_count(fx.tcx, ret.layout());
+    assert_eq!(lane_count, ret_lane_count);
+
+    for lane_idx in 0..lane_count {
+        let lane_idx = mir::Field::new(lane_idx.try_into().unwrap());
+        let lane = val.value_field(fx, lane_idx).load_scalar(fx);
+
+        let res_lane = f(fx, lane_layout, ret_lane_layout, lane);
+
+        ret.place_field(fx, lane_idx).write_cvalue(fx, res_lane);
+    }
+}
+
+fn simd_pair_for_each_lane<'tcx, B: Backend>(
+    fx: &mut FunctionCx<'_, 'tcx, B>,
     x: CValue<'tcx>,
     y: CValue<'tcx>,
     ret: CPlace<'tcx>,
@@ -164,8 +195,8 @@ fn simd_for_each_lane<'tcx, B: Backend>(
     assert_eq!(x.layout(), y.layout());
     let layout = x.layout();
 
-    let (lane_layout, lane_count) = lane_type_and_count(fx, layout, intrinsic);
-    let (ret_lane_layout, ret_lane_count) = lane_type_and_count(fx, ret.layout(), intrinsic);
+    let (lane_layout, lane_count) = lane_type_and_count(fx.tcx, layout);
+    let (ret_lane_layout, ret_lane_count) = lane_type_and_count(fx.tcx, ret.layout());
     assert_eq!(lane_count, ret_lane_count);
 
     for lane in 0..lane_count {
@@ -206,11 +237,10 @@ fn bool_to_zero_or_max_uint<'tcx>(
     CValue::by_val(res, layout)
 }
 
-macro_rules! simd_cmp {
-    ($fx:expr, $intrinsic:expr, $cc:ident($x:ident, $y:ident) -> $ret:ident) => {
-        simd_for_each_lane(
+macro simd_cmp {
+    ($fx:expr, $cc:ident($x:ident, $y:ident) -> $ret:ident) => {
+        simd_pair_for_each_lane(
             $fx,
-            $intrinsic,
             $x,
             $y,
             $ret,
@@ -222,11 +252,10 @@ macro_rules! simd_cmp {
                 bool_to_zero_or_max_uint(fx, res_lane_layout, res_lane)
             },
         );
-    };
-    ($fx:expr, $intrinsic:expr, $cc_u:ident|$cc_s:ident($x:ident, $y:ident) -> $ret:ident) => {
-        simd_for_each_lane(
+    },
+    ($fx:expr, $cc_u:ident|$cc_s:ident($x:ident, $y:ident) -> $ret:ident) => {
+        simd_pair_for_each_lane(
             $fx,
-            $intrinsic,
             $x,
             $y,
             $ret,
@@ -239,30 +268,16 @@ macro_rules! simd_cmp {
                 bool_to_zero_or_max_uint(fx, res_lane_layout, res_lane)
             },
         );
-    };
+    },
 }
 
-macro_rules! simd_int_binop {
-    ($fx:expr, $intrinsic:expr, $op:ident($x:ident, $y:ident) -> $ret:ident) => {
-        simd_for_each_lane(
-            $fx,
-            $intrinsic,
-            $x,
-            $y,
-            $ret,
-            |fx, lane_layout, ret_lane_layout, x_lane, y_lane| {
-                let res_lane = match lane_layout.ty.kind {
-                    ty::Uint(_) | ty::Int(_) => fx.bcx.ins().$op(x_lane, y_lane),
-                    _ => unreachable!("{:?}", lane_layout.ty),
-                };
-                CValue::by_val(res_lane, ret_lane_layout)
-            },
-        );
-    };
-    ($fx:expr, $intrinsic:expr, $op_u:ident|$op_s:ident($x:ident, $y:ident) -> $ret:ident) => {
-        simd_for_each_lane(
+macro simd_int_binop {
+    ($fx:expr, $op:ident($x:ident, $y:ident) -> $ret:ident) => {
+        simd_int_binop!($fx, $op|$op($x, $y) -> $ret);
+    },
+    ($fx:expr, $op_u:ident|$op_s:ident($x:ident, $y:ident) -> $ret:ident) => {
+        simd_pair_for_each_lane(
             $fx,
-            $intrinsic,
             $x,
             $y,
             $ret,
@@ -275,31 +290,16 @@ macro_rules! simd_int_binop {
                 CValue::by_val(res_lane, ret_lane_layout)
             },
         );
-    };
+    },
 }
 
-macro_rules! simd_int_flt_binop {
-    ($fx:expr, $intrinsic:expr, $op:ident|$op_f:ident($x:ident, $y:ident) -> $ret:ident) => {
-        simd_for_each_lane(
-            $fx,
-            $intrinsic,
-            $x,
-            $y,
-            $ret,
-            |fx, lane_layout, ret_lane_layout, x_lane, y_lane| {
-                let res_lane = match lane_layout.ty.kind {
-                    ty::Uint(_) | ty::Int(_) => fx.bcx.ins().$op(x_lane, y_lane),
-                    ty::Float(_) => fx.bcx.ins().$op_f(x_lane, y_lane),
-                    _ => unreachable!("{:?}", lane_layout.ty),
-                };
-                CValue::by_val(res_lane, ret_lane_layout)
-            },
-        );
-    };
-    ($fx:expr, $intrinsic:expr, $op_u:ident|$op_s:ident|$op_f:ident($x:ident, $y:ident) -> $ret:ident) => {
-        simd_for_each_lane(
+macro simd_int_flt_binop {
+    ($fx:expr, $op:ident|$op_f:ident($x:ident, $y:ident) -> $ret:ident) => {
+        simd_int_flt_binop!($fx, $op|$op|$op_f($x, $y) -> $ret);
+    },
+    ($fx:expr, $op_u:ident|$op_s:ident|$op_f:ident($x:ident, $y:ident) -> $ret:ident) => {
+        simd_pair_for_each_lane(
             $fx,
-            $intrinsic,
             $x,
             $y,
             $ret,
@@ -313,26 +313,23 @@ macro_rules! simd_int_flt_binop {
                 CValue::by_val(res_lane, ret_lane_layout)
             },
         );
-    };
+    },
 }
 
-macro_rules! simd_flt_binop {
-    ($fx:expr, $intrinsic:expr, $op:ident($x:ident, $y:ident) -> $ret:ident) => {
-        simd_for_each_lane(
-            $fx,
-            $intrinsic,
-            $x,
-            $y,
-            $ret,
-            |fx, lane_layout, ret_lane_layout, x_lane, y_lane| {
-                let res_lane = match lane_layout.ty.kind {
-                    ty::Float(_) => fx.bcx.ins().$op(x_lane, y_lane),
-                    _ => unreachable!("{:?}", lane_layout.ty),
-                };
-                CValue::by_val(res_lane, ret_lane_layout)
-            },
-        );
-    };
+macro simd_flt_binop($fx:expr, $op:ident($x:ident, $y:ident) -> $ret:ident) {
+    simd_pair_for_each_lane(
+        $fx,
+        $x,
+        $y,
+        $ret,
+        |fx, lane_layout, ret_lane_layout, x_lane, y_lane| {
+            let res_lane = match lane_layout.ty.kind {
+                ty::Float(_) => fx.bcx.ins().$op(x_lane, y_lane),
+                _ => unreachable!("{:?}", lane_layout.ty),
+            };
+            CValue::by_val(res_lane, ret_lane_layout)
+        },
+    );
 }
 
 pub fn codegen_intrinsic_call<'tcx>(
@@ -362,7 +359,7 @@ pub fn codegen_intrinsic_call<'tcx>(
                 "transmute" => {
                     trap_unreachable(
                         fx,
-                        "[corruption] Called intrinsic::transmute with uninhabited argument.",
+                        "[corruption] Transmuting to uninhabited type.",
                     );
                 }
                 _ => unimplemented!("unsupported instrinsic {}", intrinsic),
@@ -371,6 +368,13 @@ pub fn codegen_intrinsic_call<'tcx>(
         }
     };
 
+    if intrinsic.starts_with("simd_") {
+        self::simd::codegen_simd_intrinsic_call(fx, instance, args, ret, span);
+        let ret_block = fx.get_block(destination.expect("SIMD intrinsics don't diverge").1);
+        fx.bcx.ins().jump(ret_block, &[]);
+        return;
+    }
+
     let usize_layout = fx.layout_of(fx.tcx.types.usize);
 
     call_intrinsic_match! {
@@ -421,7 +425,7 @@ pub fn codegen_intrinsic_call<'tcx>(
     intrinsic_match! {
         fx, intrinsic, substs, args,
         _ => {
-            unimpl!("unsupported intrinsic {}", intrinsic)
+            unimpl_fatal!(fx.tcx, span, "unsupported intrinsic {}", intrinsic);
         };
 
         assume, (c _a) {};
@@ -441,8 +445,10 @@ pub fn codegen_intrinsic_call<'tcx>(
             let byte_amount = fx.bcx.ins().imul(count, elem_size);
 
             if intrinsic.ends_with("_nonoverlapping") {
+                // FIXME emit_small_memcpy
                 fx.bcx.call_memcpy(fx.module.target_config(), dst, src, byte_amount);
             } else {
+                // FIXME emit_small_memmove
                 fx.bcx.call_memmove(fx.module.target_config(), dst, src, byte_amount);
             }
         };
@@ -456,7 +462,7 @@ pub fn codegen_intrinsic_call<'tcx>(
             let layout = fx.layout_of(T);
             let size = if layout.is_unsized() {
                 let (_ptr, info) = ptr.load_scalar_pair(fx);
-                let (size, _align) = crate::unsize::size_and_align_of_dst(fx, layout.ty, info);
+                let (size, _align) = crate::unsize::size_and_align_of_dst(fx, layout, info);
                 size
             } else {
                 fx
@@ -470,7 +476,7 @@ pub fn codegen_intrinsic_call<'tcx>(
             let layout = fx.layout_of(T);
             let align = if layout.is_unsized() {
                 let (_ptr, info) = ptr.load_scalar_pair(fx);
-                let (_size, align) = crate::unsize::size_and_align_of_dst(fx, layout.ty, info);
+                let (_size, align) = crate::unsize::size_and_align_of_dst(fx, layout, info);
                 align
             } else {
                 fx
@@ -489,7 +495,7 @@ pub fn codegen_intrinsic_call<'tcx>(
                 "unchecked_rem" => BinOp::Rem,
                 "unchecked_shl" => BinOp::Shl,
                 "unchecked_shr" => BinOp::Shr,
-                _ => unimplemented!("intrinsic {}", intrinsic),
+                _ => unreachable!("intrinsic {}", intrinsic),
             };
             let res = crate::num::trans_int_binop(fx, bin_op, x, y);
             ret.write_cvalue(fx, res);
@@ -500,7 +506,7 @@ pub fn codegen_intrinsic_call<'tcx>(
                 "add_with_overflow" => BinOp::Add,
                 "sub_with_overflow" => BinOp::Sub,
                 "mul_with_overflow" => BinOp::Mul,
-                _ => unimplemented!("intrinsic {}", intrinsic),
+                _ => unreachable!("intrinsic {}", intrinsic),
             };
 
             let res = crate::num::trans_checked_int_binop(
@@ -517,7 +523,7 @@ pub fn codegen_intrinsic_call<'tcx>(
                 "wrapping_add" => BinOp::Add,
                 "wrapping_sub" => BinOp::Sub,
                 "wrapping_mul" => BinOp::Mul,
-                _ => unimplemented!("intrinsic {}", intrinsic),
+                _ => unreachable!("intrinsic {}", intrinsic),
             };
             let res = crate::num::trans_int_binop(
                 fx,
@@ -532,7 +538,7 @@ pub fn codegen_intrinsic_call<'tcx>(
             let bin_op = match intrinsic {
                 "saturating_add" => BinOp::Add,
                 "saturating_sub" => BinOp::Sub,
-                _ => unimplemented!("intrinsic {}", intrinsic),
+                _ => unreachable!("intrinsic {}", intrinsic),
             };
 
             let signed = type_sign(T);
@@ -600,82 +606,11 @@ pub fn codegen_intrinsic_call<'tcx>(
 
         transmute, <src_ty, dst_ty> (c from) {
             assert_eq!(from.layout().ty, src_ty);
-            let addr = from.force_stack(fx);
+            let (addr, meta) = from.force_stack(fx);
+            assert!(meta.is_none());
             let dst_layout = fx.layout_of(dst_ty);
             ret.write_cvalue(fx, CValue::by_ref(addr, dst_layout))
         };
-        init, () {
-            let layout = ret.layout();
-            if layout.abi == Abi::Uninhabited {
-                crate::trap::trap_panic(fx, "[panic] Called intrinsic::init for uninhabited type.");
-                return;
-            }
-
-            match *ret.inner() {
-                CPlaceInner::NoPlace => {}
-                CPlaceInner::Var(var) => {
-                    let clif_ty = fx.clif_type(layout.ty).unwrap();
-                    let val = match clif_ty {
-                        types::I8 | types::I16 | types::I32 | types::I64 => fx.bcx.ins().iconst(clif_ty, 0),
-                        types::I128 => {
-                            let zero = fx.bcx.ins().iconst(types::I64, 0);
-                            fx.bcx.ins().iconcat(zero, zero)
-                        }
-                        types::F32 => {
-                            let zero = fx.bcx.ins().iconst(types::I32, 0);
-                            fx.bcx.ins().bitcast(types::F32, zero)
-                        }
-                        types::F64 => {
-                            let zero = fx.bcx.ins().iconst(types::I64, 0);
-                            fx.bcx.ins().bitcast(types::F64, zero)
-                        }
-                        _ => panic!("clif_type returned {}", clif_ty),
-                    };
-                    fx.bcx.set_val_label(val, cranelift::codegen::ir::ValueLabel::from_u32(var.as_u32()));
-                    fx.bcx.def_var(mir_var(var), val);
-                }
-                _ => {
-                    let addr = ret.to_ptr(fx).get_addr(fx);
-                    let layout = ret.layout();
-                    fx.bcx.emit_small_memset(fx.module.target_config(), addr, 0, layout.size.bytes(), 1);
-                }
-            }
-        };
-        uninit, () {
-            let layout = ret.layout();
-            if layout.abi == Abi::Uninhabited {
-                crate::trap::trap_panic(fx, "[panic] Called intrinsic::uninit for uninhabited type.");
-                return;
-            }
-            match *ret.inner() {
-                CPlaceInner::NoPlace => {},
-                CPlaceInner::Var(var) => {
-                    let clif_ty = fx.clif_type(layout.ty).unwrap();
-                    let val = match clif_ty {
-                        types::I8 | types::I16 | types::I32 | types::I64 => fx.bcx.ins().iconst(clif_ty, 42),
-                        types::I128 => {
-                            let zero = fx.bcx.ins().iconst(types::I64, 0);
-                            let fourty_two = fx.bcx.ins().iconst(types::I64, 42);
-                            fx.bcx.ins().iconcat(fourty_two, zero)
-                        }
-                        types::F32 => {
-                            let zero = fx.bcx.ins().iconst(types::I32, 0xdeadbeef);
-                            fx.bcx.ins().bitcast(types::F32, zero)
-                        }
-                        types::F64 => {
-                            let zero = fx.bcx.ins().iconst(types::I64, 0xcafebabedeadbeefu64 as i64);
-                            fx.bcx.ins().bitcast(types::F64, zero)
-                        }
-                        _ => panic!("clif_type returned {}", clif_ty),
-                    };
-                    fx.bcx.set_val_label(val, cranelift::codegen::ir::ValueLabel::from_u32(var.as_u32()));
-                    fx.bcx.def_var(mir_var(var), val);
-                }
-                CPlaceInner::Addr(_, _) => {
-                    // Don't write to `ret`, as the destination memory is already uninitialized.
-                }
-            }
-        };
         write_bytes, (c dst, v val, v count) {
             let pointee_ty = dst.layout().ty.builtin_deref(true).unwrap().ty;
             let pointee_size = fx.layout_of(pointee_ty).size.bytes();
@@ -800,15 +735,26 @@ fn swap(bcx: &mut FunctionBuilder, v: Value) -> Value {
                         let hi = swap(bcx, hi);
                         bcx.ins().iconcat(hi, lo)
                     }
-                    ty => unimplemented!("bswap {}", ty),
+                    ty => unreachable!("bswap {}", ty),
                 }
             };
             let res = CValue::by_val(swap(&mut fx.bcx, arg), fx.layout_of(T));
             ret.write_cvalue(fx, res);
         };
-        panic_if_uninhabited, <T> () {
-            if fx.layout_of(T).abi.is_uninhabited() {
-                crate::trap::trap_panic(fx, "[panic] Called intrinsic::panic_if_uninhabited for uninhabited type.");
+        assert_inhabited | assert_zero_valid | assert_uninit_valid, <T> () {
+            let layout = fx.layout_of(T);
+            if layout.abi.is_uninhabited() {
+                crate::trap::trap_panic(fx, &format!("attempted to instantiate uninhabited type `{}`", T));
+                return;
+            }
+
+            if intrinsic == "assert_zero_valid" && !layout.might_permit_raw_init(fx, /*zero:*/ true).unwrap() {
+                crate::trap::trap_panic(fx, &format!("attempted to zero-initialize type `{}`, which is invalid", T));
+                return;
+            }
+
+            if intrinsic == "assert_uninit_valid" && !layout.might_permit_raw_init(fx, /*zero:*/ false).unwrap() {
+                crate::trap::trap_panic(fx, &format!("attempted to leave type `{}` uninitialized, which is invalid", T));
                 return;
             }
         };
@@ -829,7 +775,10 @@ fn swap(bcx: &mut FunctionBuilder, v: Value) -> Value {
         size_of | pref_align_of | min_align_of | needs_drop | type_id | type_name, () {
             let const_val =
                 fx.tcx.const_eval_instance(ParamEnv::reveal_all(), instance, None).unwrap();
-            let val = crate::constant::trans_const_value(fx, const_val);
+            let val = crate::constant::trans_const_value(
+                fx,
+                ty::Const::from_value(fx.tcx, const_val, ret.layout().ty),
+            );
             ret.write_cvalue(fx, val);
         };
 
@@ -848,19 +797,35 @@ fn swap(bcx: &mut FunctionBuilder, v: Value) -> Value {
             ret.write_cvalue(fx, caller_location);
         };
 
-        _ if intrinsic.starts_with("atomic_fence"), () {};
-        _ if intrinsic.starts_with("atomic_singlethreadfence"), () {};
+        _ if intrinsic.starts_with("atomic_fence"), () {
+            crate::atomic_shim::lock_global_lock(fx);
+            crate::atomic_shim::unlock_global_lock(fx);
+        };
+        _ if intrinsic.starts_with("atomic_singlethreadfence"), () {
+            crate::atomic_shim::lock_global_lock(fx);
+            crate::atomic_shim::unlock_global_lock(fx);
+        };
         _ if intrinsic.starts_with("atomic_load"), (c ptr) {
+            crate::atomic_shim::lock_global_lock(fx);
+
             let inner_layout =
                 fx.layout_of(ptr.layout().ty.builtin_deref(true).unwrap().ty);
             let val = CValue::by_ref(Pointer::new(ptr.load_scalar(fx)), inner_layout);
             ret.write_cvalue(fx, val);
+
+            crate::atomic_shim::unlock_global_lock(fx);
         };
         _ if intrinsic.starts_with("atomic_store"), (v ptr, c val) {
+            crate::atomic_shim::lock_global_lock(fx);
+
             let dest = CPlace::for_ptr(Pointer::new(ptr), val.layout());
             dest.write_cvalue(fx, val);
+
+            crate::atomic_shim::unlock_global_lock(fx);
         };
         _ if intrinsic.starts_with("atomic_xchg"), <T> (v ptr, c src) {
+            crate::atomic_shim::lock_global_lock(fx);
+
             // Read old
             let clif_ty = fx.clif_type(T).unwrap();
             let old = fx.bcx.ins().load(clif_ty, MemFlags::new(), ptr, 0);
@@ -869,8 +834,12 @@ fn swap(bcx: &mut FunctionBuilder, v: Value) -> Value {
             // Write new
             let dest = CPlace::for_ptr(Pointer::new(ptr), src.layout());
             dest.write_cvalue(fx, src);
+
+            crate::atomic_shim::unlock_global_lock(fx);
         };
         _ if intrinsic.starts_with("atomic_cxchg"), <T> (v ptr, v test_old, v new) { // both atomic_cxchg_* and atomic_cxchgweak_*
+            crate::atomic_shim::lock_global_lock(fx);
+
             // Read old
             let clif_ty = fx.clif_type(T).unwrap();
             let old = fx.bcx.ins().load(clif_ty, MemFlags::new(), ptr, 0);
@@ -884,6 +853,8 @@ fn swap(bcx: &mut FunctionBuilder, v: Value) -> Value {
 
             let ret_val = CValue::by_val_pair(old, fx.bcx.ins().bint(types::I8, is_eq), ret.layout());
             ret.write_cvalue(fx, ret_val);
+
+            crate::atomic_shim::unlock_global_lock(fx);
         };
 
         _ if intrinsic.starts_with("atomic_xadd"), <T> (v ptr, v amount) {
@@ -896,12 +867,16 @@ fn swap(bcx: &mut FunctionBuilder, v: Value) -> Value {
             atomic_binop_return_old! (fx, band<T>(ptr, src) -> ret);
         };
         _ if intrinsic.starts_with("atomic_nand"), <T> (v ptr, v src) {
+            crate::atomic_shim::lock_global_lock(fx);
+
             let clif_ty = fx.clif_type(T).unwrap();
             let old = fx.bcx.ins().load(clif_ty, MemFlags::new(), ptr, 0);
             let and = fx.bcx.ins().band(old, src);
             let new = fx.bcx.ins().bnot(and);
             fx.bcx.ins().store(MemFlags::new(), new, ptr, 0);
             ret.write_cvalue(fx, CValue::by_val(old, fx.layout_of(T)));
+
+            crate::atomic_shim::unlock_global_lock(fx);
         };
         _ if intrinsic.starts_with("atomic_or"), <T> (v ptr, v src) {
             atomic_binop_return_old! (fx, bor<T>(ptr, src) -> ret);
@@ -944,155 +919,7 @@ fn swap(bcx: &mut FunctionBuilder, v: Value) -> Value {
             ret.write_cvalue(fx, val);
         };
 
-        simd_cast, (c a) {
-            let (lane_layout, lane_count) = lane_type_and_count(fx, a.layout(), intrinsic);
-            let (ret_lane_layout, ret_lane_count) = lane_type_and_count(fx, ret.layout(), intrinsic);
-            assert_eq!(lane_count, ret_lane_count);
-
-            let ret_lane_ty = fx.clif_type(ret_lane_layout.ty).unwrap();
-
-            let from_signed = type_sign(lane_layout.ty);
-            let to_signed = type_sign(ret_lane_layout.ty);
-
-            for lane in 0..lane_count {
-                let lane = mir::Field::new(lane.try_into().unwrap());
-
-                let a_lane = a.value_field(fx, lane).load_scalar(fx);
-                let res = clif_int_or_float_cast(fx, a_lane, from_signed, ret_lane_ty, to_signed);
-                ret.place_field(fx, lane).write_cvalue(fx, CValue::by_val(res, ret_lane_layout));
-            }
-        };
-
-        simd_eq, (c x, c y) {
-            simd_cmp!(fx, intrinsic, Equal(x, y) -> ret);
-        };
-        simd_ne, (c x, c y) {
-            simd_cmp!(fx, intrinsic, NotEqual(x, y) -> ret);
-        };
-        simd_lt, (c x, c y) {
-            simd_cmp!(fx, intrinsic, UnsignedLessThan|SignedLessThan(x, y) -> ret);
-        };
-        simd_le, (c x, c y) {
-            simd_cmp!(fx, intrinsic, UnsignedLessThanOrEqual|SignedLessThanOrEqual(x, y) -> ret);
-        };
-        simd_gt, (c x, c y) {
-            simd_cmp!(fx, intrinsic, UnsignedGreaterThan|SignedGreaterThan(x, y) -> ret);
-        };
-        simd_ge, (c x, c y) {
-            simd_cmp!(fx, intrinsic, UnsignedGreaterThanOrEqual|SignedGreaterThanOrEqual(x, y) -> ret);
-        };
-
-        // simd_shuffle32<T, U>(x: T, y: T, idx: [u32; 32]) -> U
-        _ if intrinsic.starts_with("simd_shuffle"), (c x, c y, o idx) {
-            let n: u32 = intrinsic["simd_shuffle".len()..].parse().unwrap();
-
-            assert_eq!(x.layout(), y.layout());
-            let layout = x.layout();
-
-            let (lane_type, lane_count) = lane_type_and_count(fx, layout, intrinsic);
-            let (ret_lane_type, ret_lane_count) = lane_type_and_count(fx, ret.layout(), intrinsic);
-
-            assert_eq!(lane_type, ret_lane_type);
-            assert_eq!(n, ret_lane_count);
-
-            let total_len = lane_count * 2;
-
-            let indexes = {
-                use rustc::mir::interpret::*;
-                let idx_const = crate::constant::mir_operand_get_const_val(fx, idx).expect("simd_shuffle* idx not const");
-
-                let idx_bytes = match idx_const.val {
-                    ty::ConstKind::Value(ConstValue::ByRef { alloc, offset }) => {
-                        let ptr = Pointer::new(AllocId(0 /* dummy */), offset);
-                        let size = Size::from_bytes(4 * u64::from(ret_lane_count) /* size_of([u32; ret_lane_count]) */);
-                        alloc.get_bytes(fx, ptr, size).unwrap()
-                    }
-                    _ => unreachable!("{:?}", idx_const),
-                };
-
-                (0..ret_lane_count).map(|i| {
-                    let i = usize::try_from(i).unwrap();
-                    let idx = rustc::mir::interpret::read_target_uint(
-                        fx.tcx.data_layout.endian,
-                        &idx_bytes[4*i.. 4*i + 4],
-                    ).expect("read_target_uint");
-                    u32::try_from(idx).expect("try_from u32")
-                }).collect::<Vec<u32>>()
-            };
-
-            for &idx in &indexes {
-                assert!(idx < total_len, "idx {} out of range 0..{}", idx, total_len);
-            }
-
-            for (out_idx, in_idx) in indexes.into_iter().enumerate() {
-                let in_lane = if in_idx < lane_count {
-                    x.value_field(fx, mir::Field::new(in_idx.try_into().unwrap()))
-                } else {
-                    y.value_field(fx, mir::Field::new((in_idx - lane_count).try_into().unwrap()))
-                };
-                let out_lane = ret.place_field(fx, mir::Field::new(out_idx));
-                out_lane.write_cvalue(fx, in_lane);
-            }
-        };
-
-        simd_extract, (c v, o idx) {
-            let idx_const = if let Some(idx_const) = crate::constant::mir_operand_get_const_val(fx, idx) {
-                idx_const
-            } else {
-                fx.tcx.sess.span_warn(
-                    fx.mir.span,
-                    "`#[rustc_arg_required_const(..)]` is not yet supported. Calling this function will panic.",
-                );
-                crate::trap::trap_panic(fx, "`#[rustc_arg_required_const(..)]` is not yet supported.");
-                return;
-            };
-
-            let idx = idx_const.val.try_to_bits(Size::from_bytes(4 /* u32*/)).expect(&format!("kind not scalar: {:?}", idx_const));
-            let (_lane_type, lane_count) = lane_type_and_count(fx, v.layout(), intrinsic);
-            if idx >= lane_count.into() {
-                fx.tcx.sess.span_fatal(fx.mir.span, &format!("[simd_extract] idx {} >= lane_count {}", idx, lane_count));
-            }
-
-            let ret_lane = v.value_field(fx, mir::Field::new(idx.try_into().unwrap()));
-            ret.write_cvalue(fx, ret_lane);
-        };
-
-        simd_add, (c x, c y) {
-            simd_int_flt_binop!(fx, intrinsic, iadd|fadd(x, y) -> ret);
-        };
-        simd_sub, (c x, c y) {
-            simd_int_flt_binop!(fx, intrinsic, isub|fsub(x, y) -> ret);
-        };
-        simd_mul, (c x, c y) {
-            simd_int_flt_binop!(fx, intrinsic, imul|fmul(x, y) -> ret);
-        };
-        simd_div, (c x, c y) {
-            simd_int_flt_binop!(fx, intrinsic, udiv|sdiv|fdiv(x, y) -> ret);
-        };
-        simd_shl, (c x, c y) {
-            simd_int_binop!(fx, intrinsic, ishl(x, y) -> ret);
-        };
-        simd_shr, (c x, c y) {
-            simd_int_binop!(fx, intrinsic, ushr|sshr(x, y) -> ret);
-        };
-        simd_and, (c x, c y) {
-            simd_int_binop!(fx, intrinsic, band(x, y) -> ret);
-        };
-        simd_or, (c x, c y) {
-            simd_int_binop!(fx, intrinsic, bor(x, y) -> ret);
-        };
-        simd_xor, (c x, c y) {
-            simd_int_binop!(fx, intrinsic, bxor(x, y) -> ret);
-        };
-
-        simd_fmin, (c x, c y) {
-            simd_flt_binop!(fx, intrinsic, fmin(x, y) -> ret);
-        };
-        simd_fmax, (c x, c y) {
-            simd_flt_binop!(fx, intrinsic, fmax(x, y) -> ret);
-        };
-
-        try, (v f, v data, v _local_ptr) {
+        try, (v f, v data, v _catch_fn) {
             // FIXME once unwinding is supported, change this to actually catch panics
             let f_sig = fx.bcx.func.import_signature(Signature {
                 call_conv: CallConv::triple_default(fx.triple()),
@@ -1102,14 +929,14 @@ fn swap(bcx: &mut FunctionBuilder, v: Value) -> Value {
 
             fx.bcx.ins().call_indirect(f_sig, f, &[data]);
 
-            let ret_val = CValue::const_val(fx, ret.layout().ty, 0);
+            let ret_val = CValue::const_val(fx, ret.layout(), 0);
             ret.write_cvalue(fx, ret_val);
         };
     }
 
     if let Some((_, dest)) = destination {
-        let ret_ebb = fx.get_ebb(dest);
-        fx.bcx.ins().jump(ret_ebb, &[]);
+        let ret_block = fx.get_block(dest);
+        fx.bcx.ins().jump(ret_block, &[]);
     } else {
         trap_unreachable(fx, "[corruption] Diverging intrinsic returned.");
     }