]> git.lizzy.rs Git - rust.git/blob - src/intrinsics.rs
Rustup to rustc 1.34.0-nightly (7e001e5c6 2019-02-27)
[rust.git] / src / intrinsics.rs
1 use crate::prelude::*;
2
3 use rustc::ty::subst::SubstsRef;
4
5 macro_rules! intrinsic_pat {
6     (_) => {
7         _
8     };
9     ($name:ident) => {
10         stringify!($name)
11     }
12 }
13
14 macro_rules! intrinsic_arg {
15     (c $fx:expr, $arg:ident) => {
16         $arg
17     };
18     (v $fx:expr, $arg:ident) => {
19         $arg.load_scalar($fx)
20     };
21 }
22
23 macro_rules! intrinsic_substs {
24     ($substs:expr, $index:expr,) => {};
25     ($substs:expr, $index:expr, $first:ident $(,$rest:ident)*) => {
26         let $first = $substs.type_at($index);
27         intrinsic_substs!($substs, $index+1, $($rest),*);
28     };
29 }
30
31 macro_rules! intrinsic_match {
32     ($fx:expr, $intrinsic:expr, $substs:expr, $args:expr, $(
33         $($name:tt)|+ $(if $cond:expr)?, $(<$($subst:ident),*>)? ($($a:ident $arg:ident),*) $content:block;
34     )*) => {
35         match $intrinsic {
36             $(
37                 $(intrinsic_pat!($name))|* $(if $cond)? => {
38                     #[allow(unused_parens, non_snake_case)]
39                     {
40                         $(
41                             intrinsic_substs!($substs, 0, $($subst),*);
42                         )?
43                         if let [$($arg),*] = *$args {
44                             let ($($arg),*) = (
45                                 $(intrinsic_arg!($a $fx, $arg)),*
46                             );
47                             #[warn(unused_parens, non_snake_case)]
48                             {
49                                 $content
50                             }
51                         } else {
52                             bug!("wrong number of args for intrinsic {:?}", $intrinsic);
53                         }
54                     }
55                 }
56             )*
57             _ => unimpl!("unsupported intrinsic {}", $intrinsic),
58         }
59     };
60 }
61
62 macro_rules! atomic_binop_return_old {
63     ($fx:expr, $op:ident<$T:ident>($ptr:ident, $src:ident) -> $ret:ident) => {
64         let clif_ty = $fx.clif_type($T).unwrap();
65         let old = $fx.bcx.ins().load(clif_ty, MemFlags::new(), $ptr, 0);
66         let new = $fx.bcx.ins().band(old, $src);
67         $fx.bcx.ins().store(MemFlags::new(), new, $ptr, 0);
68         $ret.write_cvalue($fx, CValue::ByVal(old, $fx.layout_of($T)));
69     };
70 }
71
72 macro_rules! atomic_minmax {
73     ($fx:expr, $cc:expr, <$T:ident> ($ptr:ident, $src:ident) -> $ret:ident) => {
74         // Read old
75         let clif_ty = $fx.clif_type($T).unwrap();
76         let old = $fx.bcx.ins().load(clif_ty, MemFlags::new(), $ptr, 0);
77
78         // Compare
79         let is_eq = $fx.bcx.ins().icmp(IntCC::SignedGreaterThan, old, $src);
80         let new = crate::common::codegen_select(&mut $fx.bcx, is_eq, old, $src);
81
82         // Write new
83         $fx.bcx.ins().store(MemFlags::new(), new, $ptr, 0);
84
85         let ret_val = CValue::ByVal(old, $ret.layout());
86         $ret.write_cvalue($fx, ret_val);
87     };
88 }
89
90 pub fn codegen_intrinsic_call<'a, 'tcx: 'a>(
91     fx: &mut FunctionCx<'a, 'tcx, impl Backend>,
92     def_id: DefId,
93     substs: SubstsRef<'tcx>,
94     args: Vec<CValue<'tcx>>,
95     destination: Option<(CPlace<'tcx>, BasicBlock)>,
96 ) {
97     let intrinsic = fx.tcx.item_name(def_id).as_str();
98     let intrinsic = &intrinsic[..];
99
100     let ret = match destination {
101         Some((place, _)) => place,
102         None => {
103             // Insert non returning intrinsics here
104             match intrinsic {
105                 "abort" => {
106                     trap_panic(&mut fx.bcx);
107                 }
108                 "unreachable" => {
109                     trap_unreachable(&mut fx.bcx);
110                 }
111                 _ => unimplemented!("unsupported instrinsic {}", intrinsic),
112             }
113             return;
114         }
115     };
116
117     let u64_layout = fx.layout_of(fx.tcx.types.u64);
118     let usize_layout = fx.layout_of(fx.tcx.types.usize);
119
120     intrinsic_match! {
121         fx, intrinsic, substs, args,
122
123         assume, (c _a) {};
124         likely | unlikely, (c a) {
125             ret.write_cvalue(fx, a);
126         };
127         breakpoint, () {
128             fx.bcx.ins().debugtrap();
129         };
130         copy | copy_nonoverlapping, <elem_ty> (v src, v dst, v count) {
131             let elem_size: u64 = fx.layout_of(elem_ty).size.bytes();
132             let elem_size = fx
133                 .bcx
134                 .ins()
135                 .iconst(fx.pointer_type, elem_size as i64);
136             assert_eq!(args.len(), 3);
137             let byte_amount = fx.bcx.ins().imul(count, elem_size);
138
139             if intrinsic.ends_with("_nonoverlapping") {
140                 fx.bcx.call_memcpy(fx.module.target_config(), dst, src, byte_amount);
141             } else {
142                 fx.bcx.call_memmove(fx.module.target_config(), dst, src, byte_amount);
143             }
144         };
145         discriminant_value, (c val) {
146             let pointee_layout = fx.layout_of(val.layout().ty.builtin_deref(true).unwrap().ty);
147             let place = CPlace::Addr(val.load_scalar(fx), None, pointee_layout);
148             let discr = crate::base::trans_get_discriminant(fx, place, ret.layout());
149             ret.write_cvalue(fx, discr);
150         };
151         size_of, <T> () {
152             let size_of = fx.layout_of(T).size.bytes();
153             let size_of = CValue::const_val(fx, usize_layout.ty, size_of as i64);
154             ret.write_cvalue(fx, size_of);
155         };
156         size_of_val, <T> (c ptr) {
157             let layout = fx.layout_of(T);
158             let size = if layout.is_unsized() {
159                 let (_ptr, info) = ptr.load_value_pair(fx);
160                 let (size, _align) = crate::unsize::size_and_align_of_dst(fx, layout.ty, info);
161                 size
162             } else {
163                 fx
164                     .bcx
165                     .ins()
166                     .iconst(fx.pointer_type, layout.size.bytes() as i64)
167             };
168             ret.write_cvalue(fx, CValue::ByVal(size, usize_layout));
169         };
170         min_align_of, <T> () {
171             let min_align = fx.layout_of(T).align.abi.bytes();
172             let min_align = CValue::const_val(fx, usize_layout.ty, min_align as i64);
173             ret.write_cvalue(fx, min_align);
174         };
175         min_align_of_val, <T> (c ptr) {
176             let layout = fx.layout_of(T);
177             let align = if layout.is_unsized() {
178                 let (_ptr, info) = ptr.load_value_pair(fx);
179                 let (_size, align) = crate::unsize::size_and_align_of_dst(fx, layout.ty, info);
180                 align
181             } else {
182                 fx
183                     .bcx
184                     .ins()
185                     .iconst(fx.pointer_type, layout.align.abi.bytes() as i64)
186             };
187             ret.write_cvalue(fx, CValue::ByVal(align, usize_layout));
188         };
189         type_id, <T> () {
190             let type_id = fx.tcx.type_id_hash(T);
191             let type_id = CValue::const_val(fx, u64_layout.ty, type_id as i64);
192             ret.write_cvalue(fx, type_id);
193         };
194         _ if intrinsic.starts_with("unchecked_") || intrinsic == "exact_div", (c x, c y) {
195             let bin_op = match intrinsic {
196                 "unchecked_div" | "exact_div" => BinOp::Div,
197                 "unchecked_rem" => BinOp::Rem,
198                 "unchecked_shl" => BinOp::Shl,
199                 "unchecked_shr" => BinOp::Shr,
200                 _ => unimplemented!("intrinsic {}", intrinsic),
201             };
202             let res = match ret.layout().ty.sty {
203                 ty::Uint(_) => crate::base::trans_int_binop(
204                     fx,
205                     bin_op,
206                     x,
207                     y,
208                     ret.layout().ty,
209                     false,
210                 ),
211                 ty::Int(_) => crate::base::trans_int_binop(
212                     fx,
213                     bin_op,
214                     x,
215                     y,
216                     ret.layout().ty,
217                     true,
218                 ),
219                 _ => panic!(),
220             };
221             ret.write_cvalue(fx, res);
222         };
223         _ if intrinsic.ends_with("_with_overflow"), <T> (c x, c y) {
224             assert_eq!(x.layout().ty, y.layout().ty);
225             let bin_op = match intrinsic {
226                 "add_with_overflow" => BinOp::Add,
227                 "sub_with_overflow" => BinOp::Sub,
228                 "mul_with_overflow" => BinOp::Mul,
229                 _ => unimplemented!("intrinsic {}", intrinsic),
230             };
231             let res = match T.sty {
232                 ty::Uint(_) => crate::base::trans_checked_int_binop(
233                     fx,
234                     bin_op,
235                     x,
236                     y,
237                     ret.layout().ty,
238                     false,
239                 ),
240                 ty::Int(_) => crate::base::trans_checked_int_binop(
241                     fx,
242                     bin_op,
243                     x,
244                     y,
245                     ret.layout().ty,
246                     true,
247                 ),
248                 _ => panic!(),
249             };
250             ret.write_cvalue(fx, res);
251         };
252         _ if intrinsic.starts_with("overflowing_"), <T> (c x, c y) {
253             assert_eq!(x.layout().ty, y.layout().ty);
254             let bin_op = match intrinsic {
255                 "overflowing_add" => BinOp::Add,
256                 "overflowing_sub" => BinOp::Sub,
257                 "overflowing_mul" => BinOp::Mul,
258                 _ => unimplemented!("intrinsic {}", intrinsic),
259             };
260             let res = match T.sty {
261                 ty::Uint(_) => crate::base::trans_int_binop(
262                     fx,
263                     bin_op,
264                     x,
265                     y,
266                     ret.layout().ty,
267                     false,
268                 ),
269                 ty::Int(_) => crate::base::trans_int_binop(
270                     fx,
271                     bin_op,
272                     x,
273                     y,
274                     ret.layout().ty,
275                     true,
276                 ),
277                 _ => panic!(),
278             };
279             ret.write_cvalue(fx, res);
280         };
281         rotate_left, <T>(v x, v y) {
282             let layout = fx.layout_of(T);
283             let res = fx.bcx.ins().rotl(x, y);
284             ret.write_cvalue(fx, CValue::ByVal(res, layout));
285         };
286         rotate_right, <T>(v x, v y) {
287             let layout = fx.layout_of(T);
288             let res = fx.bcx.ins().rotr(x, y);
289             ret.write_cvalue(fx, CValue::ByVal(res, layout));
290         };
291
292         // The only difference between offset and arith_offset is regarding UB. Because Cranelift
293         // doesn't have UB both are codegen'ed the same way
294         offset | arith_offset, (c base, v offset) {
295             let pointee_ty = base.layout().ty.builtin_deref(true).unwrap().ty;
296             let pointee_size = fx.layout_of(pointee_ty).size.bytes();
297             let ptr_diff = fx.bcx.ins().imul_imm(offset, pointee_size as i64);
298             let base_val = base.load_scalar(fx);
299             let res = fx.bcx.ins().iadd(base_val, ptr_diff);
300             ret.write_cvalue(fx, CValue::ByVal(res, args[0].layout()));
301         };
302
303         transmute, <src_ty, dst_ty> (c from) {
304             assert_eq!(from.layout().ty, src_ty);
305             let addr = from.force_stack(fx);
306             let dst_layout = fx.layout_of(dst_ty);
307             ret.write_cvalue(fx, CValue::ByRef(addr, dst_layout))
308         };
309         init, <T> () {
310             let layout = fx.layout_of(T);
311             let inited_place = CPlace::new_stack_slot(fx, T);
312             let addr = inited_place.to_addr(fx);
313             let zero_val = fx.bcx.ins().iconst(types::I8, 0);
314             let len_val = fx.bcx.ins().iconst(pointer_ty(fx.tcx), layout.size.bytes() as i64);
315             fx.bcx.call_memset(fx.module.target_config(), addr, zero_val, len_val);
316
317             let inited_val = inited_place.to_cvalue(fx);
318             ret.write_cvalue(fx, inited_val);
319         };
320         write_bytes, (c dst, v val, v count) {
321             let pointee_ty = dst.layout().ty.builtin_deref(true).unwrap().ty;
322             let pointee_size = fx.layout_of(pointee_ty).size.bytes();
323             let count = fx.bcx.ins().imul_imm(count, pointee_size as i64);
324             let dst_ptr = dst.load_scalar(fx);
325             fx.bcx.call_memset(fx.module.target_config(), dst_ptr, val, count);
326         };
327         uninit, <T> () {
328             let uninit_place = CPlace::new_stack_slot(fx, T);
329             let uninit_val = uninit_place.to_cvalue(fx);
330             ret.write_cvalue(fx, uninit_val);
331         };
332         ctlz | ctlz_nonzero, <T> (v arg) {
333             let res = CValue::ByVal(fx.bcx.ins().clz(arg), fx.layout_of(T));
334             ret.write_cvalue(fx, res);
335         };
336         cttz | cttz_nonzero, <T> (v arg) {
337             let res = CValue::ByVal(fx.bcx.ins().ctz(arg), fx.layout_of(T));
338             ret.write_cvalue(fx, res);
339         };
340         ctpop, <T> (v arg) {
341             let res = CValue::ByVal(fx.bcx.ins().popcnt(arg), fx.layout_of(T));
342             ret.write_cvalue(fx, res);
343         };
344         bitreverse, <T> (v arg) {
345             let res = CValue::ByVal(fx.bcx.ins().bitrev(arg), fx.layout_of(T));
346             ret.write_cvalue(fx, res);
347         };
348         needs_drop, <T> () {
349             let needs_drop = if T.needs_drop(fx.tcx, ParamEnv::reveal_all()) {
350                 1
351             } else {
352                 0
353             };
354             let needs_drop = CValue::const_val(fx, fx.tcx.types.bool, needs_drop);
355             ret.write_cvalue(fx, needs_drop);
356         };
357         panic_if_uninhabited, <T> () {
358             if fx.layout_of(T).abi.is_uninhabited() {
359                 crate::trap::trap_panic(&mut fx.bcx);
360                 return;
361             }
362         };
363
364         _ if intrinsic.starts_with("atomic_fence"), () {};
365         _ if intrinsic.starts_with("atomic_singlethreadfence"), () {};
366         _ if intrinsic.starts_with("atomic_load"), (c ptr) {
367             let inner_layout =
368                 fx.layout_of(ptr.layout().ty.builtin_deref(true).unwrap().ty);
369             let val = CValue::ByRef(ptr.load_scalar(fx), inner_layout);
370             ret.write_cvalue(fx, val);
371         };
372         _ if intrinsic.starts_with("atomic_store"), (v ptr, c val) {
373             let dest = CPlace::Addr(ptr, None, val.layout());
374             dest.write_cvalue(fx, val);
375         };
376         _ if intrinsic.starts_with("atomic_xchg"), <T> (v ptr, c src) {
377             // Read old
378             let clif_ty = fx.clif_type(T).unwrap();
379             let old = fx.bcx.ins().load(clif_ty, MemFlags::new(), ptr, 0);
380             ret.write_cvalue(fx, CValue::ByVal(old, fx.layout_of(T)));
381
382             // Write new
383             let dest = CPlace::Addr(ptr, None, src.layout());
384             dest.write_cvalue(fx, src);
385         };
386         _ if intrinsic.starts_with("atomic_cxchg"), <T> (v ptr, v test_old, v new) { // both atomic_cxchg_* and atomic_cxchgweak_*
387             // Read old
388             let clif_ty = fx.clif_type(T).unwrap();
389             let old = fx.bcx.ins().load(clif_ty, MemFlags::new(), ptr, 0);
390
391             // Compare
392             let is_eq = fx.bcx.ins().icmp(IntCC::Equal, old, test_old);
393             let new = crate::common::codegen_select(&mut fx.bcx, is_eq, new, old); // Keep old if not equal to test_old
394
395             // Write new
396             fx.bcx.ins().store(MemFlags::new(), new, ptr, 0);
397
398             let ret_val = CValue::ByValPair(old, fx.bcx.ins().bint(types::I8, is_eq), ret.layout());
399             ret.write_cvalue(fx, ret_val);
400         };
401
402         _ if intrinsic.starts_with("atomic_xadd"), <T> (v ptr, v amount) {
403             atomic_binop_return_old! (fx, iadd<T>(ptr, amount) -> ret);
404         };
405         _ if intrinsic.starts_with("atomic_xsub"), <T> (v ptr, v amount) {
406             atomic_binop_return_old! (fx, isub<T>(ptr, amount) -> ret);
407         };
408         _ if intrinsic.starts_with("atomic_and"), <T> (v ptr, v src) {
409             atomic_binop_return_old! (fx, band<T>(ptr, src) -> ret);
410         };
411         _ if intrinsic.starts_with("atomic_nand"), <T> (v ptr, v src) {
412             atomic_binop_return_old! (fx, bnand<T>(ptr, src) -> ret);
413         };
414         _ if intrinsic.starts_with("atomic_or"), <T> (v ptr, v src) {
415             atomic_binop_return_old! (fx, bor<T>(ptr, src) -> ret);
416         };
417         _ if intrinsic.starts_with("atomic_xor"), <T> (v ptr, v src) {
418             atomic_binop_return_old! (fx, bxor<T>(ptr, src) -> ret);
419         };
420
421         _ if intrinsic.starts_with("atomic_max"), <T> (v ptr, v src) {
422             atomic_minmax!(fx, IntCC::SignedGreaterThan, <T> (ptr, src) -> ret);
423         };
424         _ if intrinsic.starts_with("atomic_umax"), <T> (v ptr, v src) {
425             atomic_minmax!(fx, IntCC::UnsignedGreaterThan, <T> (ptr, src) -> ret);
426         };
427         _ if intrinsic.starts_with("atomic_min"), <T> (v ptr, v src) {
428             atomic_minmax!(fx, IntCC::SignedLessThan, <T> (ptr, src) -> ret);
429         };
430         _ if intrinsic.starts_with("atomic_umin"), <T> (v ptr, v src) {
431             atomic_minmax!(fx, IntCC::UnsignedLessThan, <T> (ptr, src) -> ret);
432         };
433     }
434
435     if let Some((_, dest)) = destination {
436         let ret_ebb = fx.get_ebb(dest);
437         fx.bcx.ins().jump(ret_ebb, &[]);
438     } else {
439         trap_unreachable(&mut fx.bcx);
440     }
441 }