3 macro_rules! intrinsic_pat {
12 macro_rules! intrinsic_arg {
13 (c $fx:expr, $arg:ident) => {
16 (v $fx:expr, $arg:ident) => {
21 macro_rules! intrinsic_substs {
22 ($substs:expr, $index:expr,) => {};
23 ($substs:expr, $index:expr, $first:ident $(,$rest:ident)*) => {
24 let $first = $substs.type_at($index);
25 intrinsic_substs!($substs, $index+1, $($rest),*);
29 macro_rules! intrinsic_match {
30 ($fx:expr, $intrinsic:expr, $substs:expr, $args:expr, $(
31 $($name:tt)|+ $(if $cond:expr)?, $(<$($subst:ident),*>)? ($($a:ident $arg:ident),*) $content:block;
35 $(intrinsic_pat!($name))|* $(if $cond)? => {
36 #[allow(unused_parens, non_snake_case)]
39 intrinsic_substs!($substs, 0, $($subst),*);
41 if let [$($arg),*] = *$args {
43 $(intrinsic_arg!($a $fx, $arg)),*
45 #[warn(unused_parens, non_snake_case)]
50 bug!("wrong number of args for intrinsic {:?}", $intrinsic);
55 _ => unimpl!("unsupported intrinsic {}", $intrinsic),
60 macro_rules! atomic_binop_return_old {
61 ($fx:expr, $op:ident<$T:ident>($ptr:ident, $src:ident) -> $ret:ident) => {
62 let clif_ty = $fx.clif_type($T).unwrap();
63 let old = $fx.bcx.ins().load(clif_ty, MemFlags::new(), $ptr, 0);
64 let new = $fx.bcx.ins().band(old, $src);
65 $fx.bcx.ins().store(MemFlags::new(), new, $ptr, 0);
66 $ret.write_cvalue($fx, CValue::ByVal(old, $fx.layout_of($T)));
70 macro_rules! atomic_minmax {
71 ($fx:expr, $cc:expr, <$T:ident> ($ptr:ident, $src:ident) -> $ret:ident) => {
73 let clif_ty = $fx.clif_type($T).unwrap();
74 let old = $fx.bcx.ins().load(clif_ty, MemFlags::new(), $ptr, 0);
77 let is_eq = $fx.bcx.ins().icmp(IntCC::SignedGreaterThan, old, $src);
78 let new = crate::common::codegen_select(&mut $fx.bcx, is_eq, old, $src);
81 $fx.bcx.ins().store(MemFlags::new(), new, $ptr, 0);
83 let ret_val = CValue::ByVal(old, $ret.layout());
84 $ret.write_cvalue($fx, ret_val);
88 pub fn codegen_intrinsic_call<'a, 'tcx: 'a>(
89 fx: &mut FunctionCx<'a, 'tcx, impl Backend>,
92 args: Vec<CValue<'tcx>>,
93 destination: Option<(CPlace<'tcx>, BasicBlock)>,
95 let intrinsic = fx.tcx.item_name(def_id).as_str();
96 let intrinsic = &intrinsic[..];
98 let ret = match destination {
99 Some((place, _)) => place,
101 // Insert non returning intrinsics here
104 trap_panic(&mut fx.bcx);
107 trap_unreachable(&mut fx.bcx);
109 _ => unimplemented!("unsupported instrinsic {}", intrinsic),
115 let u64_layout = fx.layout_of(fx.tcx.types.u64);
116 let usize_layout = fx.layout_of(fx.tcx.types.usize);
119 fx, intrinsic, substs, args,
122 arith_offset, (v base, v offset) {
123 let res = fx.bcx.ins().iadd(base, offset);
124 let res = CValue::ByVal(res, ret.layout());
125 ret.write_cvalue(fx, res);
127 likely | unlikely, (c a) {
128 ret.write_cvalue(fx, a);
131 fx.bcx.ins().debugtrap();
133 copy | copy_nonoverlapping, <elem_ty> (v src, v dst, v count) {
134 let elem_size: u64 = fx.layout_of(elem_ty).size.bytes();
138 .iconst(fx.pointer_type, elem_size as i64);
139 assert_eq!(args.len(), 3);
140 let byte_amount = fx.bcx.ins().imul(count, elem_size);
142 if intrinsic.ends_with("_nonoverlapping") {
143 fx.bcx.call_memcpy(fx.module.target_config(), dst, src, byte_amount);
145 fx.bcx.call_memmove(fx.module.target_config(), dst, src, byte_amount);
148 discriminant_value, (c val) {
149 let discr = crate::base::trans_get_discriminant(fx, val, ret.layout());
150 ret.write_cvalue(fx, discr);
153 let size_of = fx.layout_of(T).size.bytes();
154 let size_of = CValue::const_val(fx, usize_layout.ty, size_of as i64);
155 ret.write_cvalue(fx, size_of);
157 size_of_val, <T> (c ptr) {
158 let layout = fx.layout_of(T);
159 let size = if layout.is_unsized() {
160 let (_ptr, info) = ptr.load_value_pair(fx);
161 let (size, _align) = crate::unsize::size_and_align_of_dst(fx, layout.ty, info);
167 .iconst(fx.pointer_type, layout.size.bytes() as i64)
169 ret.write_cvalue(fx, CValue::ByVal(size, usize_layout));
171 min_align_of, <T> () {
172 let min_align = fx.layout_of(T).align.abi.bytes();
173 let min_align = CValue::const_val(fx, usize_layout.ty, min_align as i64);
174 ret.write_cvalue(fx, min_align);
176 min_align_of_val, <T> (c ptr) {
177 let layout = fx.layout_of(T);
178 let align = if layout.is_unsized() {
179 let (_ptr, info) = ptr.load_value_pair(fx);
180 let (_size, align) = crate::unsize::size_and_align_of_dst(fx, layout.ty, info);
186 .iconst(fx.pointer_type, layout.align.abi.bytes() as i64)
188 ret.write_cvalue(fx, CValue::ByVal(align, usize_layout));
191 let type_id = fx.tcx.type_id_hash(T);
192 let type_id = CValue::const_val(fx, u64_layout.ty, type_id as i64);
193 ret.write_cvalue(fx, type_id);
195 _ if intrinsic.starts_with("unchecked_") || intrinsic == "exact_div", (c x, c y) {
196 let bin_op = match intrinsic {
197 "unchecked_div" | "exact_div" => BinOp::Div,
198 "unchecked_rem" => BinOp::Rem,
199 "unchecked_shl" => BinOp::Shl,
200 "unchecked_shr" => BinOp::Shr,
201 _ => unimplemented!("intrinsic {}", intrinsic),
203 let res = match ret.layout().ty.sty {
204 ty::Uint(_) => crate::base::trans_int_binop(
212 ty::Int(_) => crate::base::trans_int_binop(
222 ret.write_cvalue(fx, res);
224 _ if intrinsic.ends_with("_with_overflow"), <T> (c x, c y) {
225 assert_eq!(x.layout().ty, y.layout().ty);
226 let bin_op = match intrinsic {
227 "add_with_overflow" => BinOp::Add,
228 "sub_with_overflow" => BinOp::Sub,
229 "mul_with_overflow" => BinOp::Mul,
230 _ => unimplemented!("intrinsic {}", intrinsic),
232 let res = match T.sty {
233 ty::Uint(_) => crate::base::trans_checked_int_binop(
241 ty::Int(_) => crate::base::trans_checked_int_binop(
251 ret.write_cvalue(fx, res);
253 _ if intrinsic.starts_with("overflowing_"), <T> (c x, c y) {
254 assert_eq!(x.layout().ty, y.layout().ty);
255 let bin_op = match intrinsic {
256 "overflowing_add" => BinOp::Add,
257 "overflowing_sub" => BinOp::Sub,
258 "overflowing_mul" => BinOp::Mul,
259 _ => unimplemented!("intrinsic {}", intrinsic),
261 let res = match T.sty {
262 ty::Uint(_) => crate::base::trans_int_binop(
270 ty::Int(_) => crate::base::trans_int_binop(
280 ret.write_cvalue(fx, res);
282 rotate_left, <T>(v x, v y) {
283 let layout = fx.layout_of(T);
284 let res = fx.bcx.ins().rotl(x, y);
285 ret.write_cvalue(fx, CValue::ByVal(res, layout));
287 rotate_right, <T>(v x, v y) {
288 let layout = fx.layout_of(T);
289 let res = fx.bcx.ins().rotr(x, y);
290 ret.write_cvalue(fx, CValue::ByVal(res, layout));
292 offset, (c base, v offset) {
293 let pointee_ty = base.layout().ty.builtin_deref(true).unwrap().ty;
294 let pointee_size = fx.layout_of(pointee_ty).size.bytes();
295 let ptr_diff = fx.bcx.ins().imul_imm(offset, pointee_size as i64);
296 let base_val = base.load_scalar(fx);
297 let res = fx.bcx.ins().iadd(base_val, ptr_diff);
298 ret.write_cvalue(fx, CValue::ByVal(res, args[0].layout()));
300 transmute, <src_ty, dst_ty> (c from) {
301 assert_eq!(from.layout().ty, src_ty);
302 let addr = from.force_stack(fx);
303 let dst_layout = fx.layout_of(dst_ty);
304 ret.write_cvalue(fx, CValue::ByRef(addr, dst_layout))
307 let layout = fx.layout_of(T);
308 let inited_place = CPlace::new_stack_slot(fx, T);
309 let addr = inited_place.to_addr(fx);
310 let zero_val = fx.bcx.ins().iconst(types::I8, 0);
311 let len_val = fx.bcx.ins().iconst(pointer_ty(fx.tcx), layout.size.bytes() as i64);
312 fx.bcx.call_memset(fx.module.target_config(), addr, zero_val, len_val);
314 let inited_val = inited_place.to_cvalue(fx);
315 ret.write_cvalue(fx, inited_val);
317 write_bytes, (v dst, v val, v count) {
318 fx.bcx.call_memset(fx.module.target_config(), dst, val, count);
321 let uninit_place = CPlace::new_stack_slot(fx, T);
322 let uninit_val = uninit_place.to_cvalue(fx);
323 ret.write_cvalue(fx, uninit_val);
325 ctlz | ctlz_nonzero, <T> (v arg) {
326 let res = CValue::ByVal(fx.bcx.ins().clz(arg), fx.layout_of(T));
327 ret.write_cvalue(fx, res);
329 cttz | cttz_nonzero, <T> (v arg) {
330 let res = CValue::ByVal(fx.bcx.ins().clz(arg), fx.layout_of(T));
331 ret.write_cvalue(fx, res);
334 let res = CValue::ByVal(fx.bcx.ins().popcnt(arg), fx.layout_of(T));
335 ret.write_cvalue(fx, res);
337 bitreverse, <T> (v arg) {
338 let res = CValue::ByVal(fx.bcx.ins().bitrev(arg), fx.layout_of(T));
339 ret.write_cvalue(fx, res);
342 let needs_drop = if T.needs_drop(fx.tcx, ParamEnv::reveal_all()) {
347 let needs_drop = CValue::const_val(fx, fx.tcx.types.bool, needs_drop);
348 ret.write_cvalue(fx, needs_drop);
350 panic_if_uninhabited, <T> () {
351 if fx.layout_of(T).abi.is_uninhabited() {
352 crate::trap::trap_panic(&mut fx.bcx);
357 _ if intrinsic.starts_with("atomic_fence"), () {};
358 _ if intrinsic.starts_with("atomic_singlethreadfence"), () {};
359 _ if intrinsic.starts_with("atomic_load"), (c ptr) {
361 fx.layout_of(ptr.layout().ty.builtin_deref(true).unwrap().ty);
362 let val = CValue::ByRef(ptr.load_scalar(fx), inner_layout);
363 ret.write_cvalue(fx, val);
365 _ if intrinsic.starts_with("atomic_store"), (v ptr, c val) {
366 let dest = CPlace::Addr(ptr, None, val.layout());
367 dest.write_cvalue(fx, val);
369 _ if intrinsic.starts_with("atomic_xchg"), <T> (v ptr, c src) {
371 let clif_ty = fx.clif_type(T).unwrap();
372 let old = fx.bcx.ins().load(clif_ty, MemFlags::new(), ptr, 0);
373 ret.write_cvalue(fx, CValue::ByVal(old, fx.layout_of(T)));
376 let dest = CPlace::Addr(ptr, None, src.layout());
377 dest.write_cvalue(fx, src);
379 _ if intrinsic.starts_with("atomic_cxchg"), <T> (v ptr, v test_old, v new) { // both atomic_cxchg_* and atomic_cxchgweak_*
381 let clif_ty = fx.clif_type(T).unwrap();
382 let old = fx.bcx.ins().load(clif_ty, MemFlags::new(), ptr, 0);
385 let is_eq = fx.bcx.ins().icmp(IntCC::Equal, old, test_old);
386 let new = crate::common::codegen_select(&mut fx.bcx, is_eq, old, new); // Keep old if not equal to test_old
389 fx.bcx.ins().store(MemFlags::new(), new, ptr, 0);
391 let ret_val = CValue::ByValPair(old, fx.bcx.ins().bint(types::I8, is_eq), ret.layout());
392 ret.write_cvalue(fx, ret_val);
395 _ if intrinsic.starts_with("atomic_xadd"), <T> (v ptr, v amount) {
396 atomic_binop_return_old! (fx, iadd<T>(ptr, amount) -> ret);
398 _ if intrinsic.starts_with("atomic_xsub"), <T> (v ptr, v amount) {
399 atomic_binop_return_old! (fx, isub<T>(ptr, amount) -> ret);
401 _ if intrinsic.starts_with("atomic_and"), <T> (v ptr, v src) {
402 atomic_binop_return_old! (fx, band<T>(ptr, src) -> ret);
404 _ if intrinsic.starts_with("atomic_nand"), <T> (v ptr, v src) {
405 atomic_binop_return_old! (fx, bnand<T>(ptr, src) -> ret);
407 _ if intrinsic.starts_with("atomic_or"), <T> (v ptr, v src) {
408 atomic_binop_return_old! (fx, bor<T>(ptr, src) -> ret);
410 _ if intrinsic.starts_with("atomic_xor"), <T> (v ptr, v src) {
411 atomic_binop_return_old! (fx, bxor<T>(ptr, src) -> ret);
414 _ if intrinsic.starts_with("atomic_max"), <T> (v ptr, v src) {
415 atomic_minmax!(fx, IntCC::SignedGreaterThan, <T> (ptr, src) -> ret);
417 _ if intrinsic.starts_with("atomic_umax"), <T> (v ptr, v src) {
418 atomic_minmax!(fx, IntCC::UnsignedGreaterThan, <T> (ptr, src) -> ret);
420 _ if intrinsic.starts_with("atomic_min"), <T> (v ptr, v src) {
421 atomic_minmax!(fx, IntCC::SignedLessThan, <T> (ptr, src) -> ret);
423 _ if intrinsic.starts_with("atomic_umin"), <T> (v ptr, v src) {
424 atomic_minmax!(fx, IntCC::UnsignedLessThan, <T> (ptr, src) -> ret);
428 if let Some((_, dest)) = destination {
429 let ret_ebb = fx.get_ebb(dest);
430 fx.bcx.ins().jump(ret_ebb, &[]);
432 trap_unreachable(&mut fx.bcx);