3 macro_rules! intrinsic_pat {
12 macro_rules! intrinsic_arg {
13 (c $fx:expr, $arg:ident) => {
16 (v $fx:expr, $arg:ident) => {
21 macro_rules! intrinsic_substs {
22 ($substs:expr, $index:expr,) => {};
23 ($substs:expr, $index:expr, $first:ident $(,$rest:ident)*) => {
24 let $first = $substs.type_at($index);
25 intrinsic_substs!($substs, $index+1, $($rest),*);
29 macro_rules! intrinsic_match {
30 ($fx:expr, $intrinsic:expr, $substs:expr, $args:expr, $(
31 $($name:tt)|+ $(if $cond:expr)?, $(<$($subst:ident),*>)? ($($a:ident $arg:ident),*) $content:block;
35 $(intrinsic_pat!($name))|* $(if $cond)? => {
36 #[allow(unused_parens, non_snake_case)]
39 intrinsic_substs!($substs, 0, $($subst),*);
41 if let [$($arg),*] = *$args {
43 $(intrinsic_arg!($a $fx, $arg)),*
45 #[warn(unused_parens, non_snake_case)]
50 bug!("wrong number of args for intrinsic {:?}", $intrinsic);
55 _ => unimpl!("unsupported intrinsic {}", $intrinsic),
60 macro_rules! atomic_binop_return_old {
61 ($fx:expr, $op:ident<$T:ident>($ptr:ident, $src:ident) -> $ret:ident) => {
62 let clif_ty = $fx.cton_type($T).unwrap();
63 let old = $fx.bcx.ins().load(clif_ty, MemFlags::new(), $ptr, 0);
64 let new = $fx.bcx.ins().band(old, $src);
65 $fx.bcx.ins().store(MemFlags::new(), new, $ptr, 0);
66 $ret.write_cvalue($fx, CValue::ByVal(old, $fx.layout_of($T)));
70 macro_rules! atomic_minmax {
71 ($fx:expr, $cc:expr, <$T:ident> ($ptr:ident, $src:ident) -> $ret:ident) => {
73 let clif_ty = $fx.cton_type($T).unwrap();
74 let old = $fx.bcx.ins().load(clif_ty, MemFlags::new(), $ptr, 0);
77 let is_eq = $fx.bcx.ins().icmp(IntCC::SignedGreaterThan, old, $src);
78 let new = crate::common::codegen_select(&mut $fx.bcx, is_eq, old, $src);
81 $fx.bcx.ins().store(MemFlags::new(), new, $ptr, 0);
83 let ret_val = CValue::ByVal(old, $ret.layout());
84 $ret.write_cvalue($fx, ret_val);
88 pub fn codegen_intrinsic_call<'a, 'tcx: 'a>(
89 fx: &mut FunctionCx<'a, 'tcx, impl Backend>,
92 args: Vec<CValue<'tcx>>,
93 destination: Option<(CPlace<'tcx>, BasicBlock)>,
95 let intrinsic = fx.tcx.item_name(def_id).as_str();
96 let intrinsic = &intrinsic[..];
98 let ret = match destination {
99 Some((place, _)) => place,
101 // Insert non returning intrinsics here
104 fx.bcx.ins().trap(TrapCode::User(!0 - 1));
107 fx.bcx.ins().trap(TrapCode::User(!0 - 1));
109 _ => unimplemented!("unsupported instrinsic {}", intrinsic),
115 let u64_layout = fx.layout_of(fx.tcx.types.u64);
116 let usize_layout = fx.layout_of(fx.tcx.types.usize);
119 fx, intrinsic, substs, args,
122 arith_offset, (v base, v offset) {
123 let res = fx.bcx.ins().iadd(base, offset);
124 let res = CValue::ByVal(res, ret.layout());
125 ret.write_cvalue(fx, res);
127 likely | unlikely, (c a) {
128 ret.write_cvalue(fx, a);
130 copy | copy_nonoverlapping, <elem_ty> (v src, v dst, v count) {
131 let elem_size: u64 = fx.layout_of(elem_ty).size.bytes();
135 .iconst(fx.module.pointer_type(), elem_size as i64);
136 assert_eq!(args.len(), 3);
137 let byte_amount = fx.bcx.ins().imul(count, elem_size);
139 if intrinsic.ends_with("_nonoverlapping") {
140 fx.bcx.call_memcpy(fx.isa, dst, src, byte_amount);
142 fx.bcx.call_memmove(fx.isa, dst, src, byte_amount);
145 discriminant_value, (c val) {
146 let discr = crate::base::trans_get_discriminant(fx, val, ret.layout());
147 ret.write_cvalue(fx, discr);
150 let size_of = fx.layout_of(T).size.bytes();
151 let size_of = CValue::const_val(fx, usize_layout.ty, size_of as i64);
152 ret.write_cvalue(fx, size_of);
154 size_of_val, <T> (c ptr) {
155 let layout = fx.layout_of(T);
156 let size = match &layout.ty.sty {
157 _ if !layout.is_unsized() => fx
160 .iconst(fx.module.pointer_type(), layout.size.bytes() as i64),
162 let len = ptr.load_value_pair(fx).1;
163 let elem_size = fx.layout_of(elem).size.bytes();
164 fx.bcx.ins().imul_imm(len, elem_size as i64)
166 ty::Dynamic(..) => crate::vtable::size_of_obj(fx, ptr),
167 ty => bug!("size_of_val for unknown unsized type {:?}", ty),
169 ret.write_cvalue(fx, CValue::ByVal(size, usize_layout));
171 min_align_of, <T> () {
172 let min_align = fx.layout_of(T).align.abi();
173 let min_align = CValue::const_val(fx, usize_layout.ty, min_align as i64);
174 ret.write_cvalue(fx, min_align);
176 min_align_of_val, <T> (c ptr) {
177 let layout = fx.layout_of(T);
178 let align = match &layout.ty.sty {
179 _ if !layout.is_unsized() => fx
182 .iconst(fx.module.pointer_type(), layout.align.abi() as i64),
184 let align = fx.layout_of(elem).align.abi() as i64;
185 fx.bcx.ins().iconst(fx.module.pointer_type(), align)
187 ty::Dynamic(..) => crate::vtable::min_align_of_obj(fx, ptr),
188 ty => unimplemented!("min_align_of_val for {:?}", ty),
190 ret.write_cvalue(fx, CValue::ByVal(align, usize_layout));
193 let type_id = fx.tcx.type_id_hash(T);
194 let type_id = CValue::const_val(fx, u64_layout.ty, type_id as i64);
195 ret.write_cvalue(fx, type_id);
197 _ if intrinsic.starts_with("unchecked_"), (c x, c y) {
198 let bin_op = match intrinsic {
199 "unchecked_div" => BinOp::Div,
200 "unchecked_rem" => BinOp::Rem,
201 "unchecked_shl" => BinOp::Shl,
202 "unchecked_shr" => BinOp::Shr,
203 _ => unimplemented!("intrinsic {}", intrinsic),
205 let res = match ret.layout().ty.sty {
206 ty::Uint(_) => crate::base::trans_int_binop(
214 ty::Int(_) => crate::base::trans_int_binop(
224 ret.write_cvalue(fx, res);
226 _ if intrinsic.ends_with("_with_overflow"), <T> (c x, c y) {
227 assert_eq!(x.layout().ty, y.layout().ty);
228 let bin_op = match intrinsic {
229 "add_with_overflow" => BinOp::Add,
230 "sub_with_overflow" => BinOp::Sub,
231 "mul_with_overflow" => BinOp::Mul,
232 _ => unimplemented!("intrinsic {}", intrinsic),
234 let res = match T.sty {
235 ty::Uint(_) => crate::base::trans_checked_int_binop(
243 ty::Int(_) => crate::base::trans_checked_int_binop(
253 ret.write_cvalue(fx, res);
255 _ if intrinsic.starts_with("overflowing_"), <T> (c x, c y) {
256 assert_eq!(x.layout().ty, y.layout().ty);
257 let bin_op = match intrinsic {
258 "overflowing_add" => BinOp::Add,
259 "overflowing_sub" => BinOp::Sub,
260 "overflowing_mul" => BinOp::Mul,
261 _ => unimplemented!("intrinsic {}", intrinsic),
263 let res = match T.sty {
264 ty::Uint(_) => crate::base::trans_int_binop(
272 ty::Int(_) => crate::base::trans_int_binop(
282 ret.write_cvalue(fx, res);
284 offset, (v base, v offset) {
285 let res = fx.bcx.ins().iadd(base, offset);
286 ret.write_cvalue(fx, CValue::ByVal(res, args[0].layout()));
288 transmute, <src_ty, dst_ty> (c from) {
289 assert_eq!(from.layout().ty, src_ty);
290 let addr = from.force_stack(fx);
291 let dst_layout = fx.layout_of(dst_ty);
292 ret.write_cvalue(fx, CValue::ByRef(addr, dst_layout))
295 let layout = fx.layout_of(T);
296 let stack_slot = fx.bcx.create_stack_slot(StackSlotData {
297 kind: StackSlotKind::ExplicitSlot,
298 size: layout.size.bytes() as u32,
301 let addr = fx.bcx.ins().stack_addr(pointer_ty(fx.tcx), stack_slot, 0);
302 let zero_val = fx.bcx.ins().iconst(types::I8, 0);
303 let len_val = fx.bcx.ins().iconst(pointer_ty(fx.tcx), layout.size.bytes() as i64);
304 fx.bcx.call_memset(fx.isa, addr, zero_val, len_val);
306 let uninit_place = CPlace::from_stack_slot(fx, stack_slot, T);
307 let uninit_val = uninit_place.to_cvalue(fx);
308 ret.write_cvalue(fx, uninit_val);
310 write_bytes, (v dst, v val, v count) {
311 fx.bcx.call_memset(fx.isa, dst, val, count);
314 let layout = fx.layout_of(T);
315 let stack_slot = fx.bcx.create_stack_slot(StackSlotData {
316 kind: StackSlotKind::ExplicitSlot,
317 size: layout.size.bytes() as u32,
321 let uninit_place = CPlace::from_stack_slot(fx, stack_slot, T);
322 let uninit_val = uninit_place.to_cvalue(fx);
323 ret.write_cvalue(fx, uninit_val);
325 ctlz | ctlz_nonzero, <T> (v arg) {
326 let res = CValue::ByVal(fx.bcx.ins().clz(arg), fx.layout_of(T));
327 ret.write_cvalue(fx, res);
329 cttz | cttz_nonzero, <T> (v arg) {
330 let res = CValue::ByVal(fx.bcx.ins().clz(arg), fx.layout_of(T));
331 ret.write_cvalue(fx, res);
334 let res = CValue::ByVal(fx.bcx.ins().popcnt(arg), fx.layout_of(T));
335 ret.write_cvalue(fx, res);
337 bitreverse, <T> (v arg) {
338 let res = CValue::ByVal(fx.bcx.ins().bitrev(arg), fx.layout_of(T));
339 ret.write_cvalue(fx, res);
342 let needs_drop = if T.needs_drop(fx.tcx, ParamEnv::reveal_all()) {
347 let needs_drop = CValue::const_val(fx, fx.tcx.types.bool, needs_drop);
348 ret.write_cvalue(fx, needs_drop);
351 _ if intrinsic.starts_with("atomic_fence"), () {};
352 _ if intrinsic.starts_with("atomic_singlethreadfence"), () {};
353 _ if intrinsic.starts_with("atomic_load"), (c ptr) {
355 fx.layout_of(ptr.layout().ty.builtin_deref(true).unwrap().ty);
356 let val = CValue::ByRef(ptr.load_value(fx), inner_layout);
357 ret.write_cvalue(fx, val);
359 _ if intrinsic.starts_with("atomic_store"), (v ptr, c val) {
360 let dest = CPlace::Addr(ptr, None, val.layout());
361 dest.write_cvalue(fx, val);
363 _ if intrinsic.starts_with("atomic_xchg"), <T> (v ptr, c src) {
365 let clif_ty = fx.cton_type(T).unwrap();
366 let old = fx.bcx.ins().load(clif_ty, MemFlags::new(), ptr, 0);
367 ret.write_cvalue(fx, CValue::ByVal(old, fx.layout_of(T)));
370 let dest = CPlace::Addr(ptr, None, src.layout());
371 dest.write_cvalue(fx, src);
373 _ if intrinsic.starts_with("atomic_cxchg"), <T> (v ptr, v test_old, v new) { // both atomic_cxchg_* and atomic_cxchgweak_*
375 let clif_ty = fx.cton_type(T).unwrap();
376 let old = fx.bcx.ins().load(clif_ty, MemFlags::new(), ptr, 0);
379 let is_eq = fx.bcx.ins().icmp(IntCC::Equal, old, test_old);
380 let new = crate::common::codegen_select(&mut fx.bcx, is_eq, old, new); // Keep old if not equal to test_old
383 fx.bcx.ins().store(MemFlags::new(), new, ptr, 0);
385 let ret_val = CValue::ByValPair(old, fx.bcx.ins().bint(types::I8, is_eq), ret.layout());
386 ret.write_cvalue(fx, ret_val);
389 _ if intrinsic.starts_with("atomic_xadd"), <T> (v ptr, v amount) {
390 atomic_binop_return_old! (fx, iadd<T>(ptr, amount) -> ret);
392 _ if intrinsic.starts_with("atomic_xsub"), <T> (v ptr, v amount) {
393 atomic_binop_return_old! (fx, isub<T>(ptr, amount) -> ret);
395 _ if intrinsic.starts_with("atomic_and"), <T> (v ptr, v src) {
396 atomic_binop_return_old! (fx, band<T>(ptr, src) -> ret);
398 _ if intrinsic.starts_with("atomic_nand"), <T> (v ptr, v src) {
399 atomic_binop_return_old! (fx, bnand<T>(ptr, src) -> ret);
401 _ if intrinsic.starts_with("atomic_or"), <T> (v ptr, v src) {
402 atomic_binop_return_old! (fx, bor<T>(ptr, src) -> ret);
404 _ if intrinsic.starts_with("atomic_xor"), <T> (v ptr, v src) {
405 atomic_binop_return_old! (fx, bxor<T>(ptr, src) -> ret);
408 _ if intrinsic.starts_with("atomic_max"), <T> (v ptr, v src) {
409 atomic_minmax!(fx, IntCC::SignedGreaterThan, <T> (ptr, src) -> ret);
411 _ if intrinsic.starts_with("atomic_umax"), <T> (v ptr, v src) {
412 atomic_minmax!(fx, IntCC::UnsignedGreaterThan, <T> (ptr, src) -> ret);
414 _ if intrinsic.starts_with("atomic_min"), <T> (v ptr, v src) {
415 atomic_minmax!(fx, IntCC::SignedLessThan, <T> (ptr, src) -> ret);
417 _ if intrinsic.starts_with("atomic_umin"), <T> (v ptr, v src) {
418 atomic_minmax!(fx, IntCC::UnsignedLessThan, <T> (ptr, src) -> ret);
422 if let Some((_, dest)) = destination {
423 let ret_ebb = fx.get_ebb(dest);
424 fx.bcx.ins().jump(ret_ebb, &[]);
426 fx.bcx.ins().trap(TrapCode::User(!0));