]> git.lizzy.rs Git - rust.git/blob - compiler/rustc_codegen_ssa/src/mir/intrinsic.rs
Rollup merge of #93757 - jackh726:gat-bug-tests, r=nikomatsakis
[rust.git] / compiler / rustc_codegen_ssa / src / mir / intrinsic.rs
1 use super::operand::{OperandRef, OperandValue};
2 use super::place::PlaceRef;
3 use super::FunctionCx;
4 use crate::common::{span_invalid_monomorphization_error, IntPredicate};
5 use crate::glue;
6 use crate::traits::*;
7 use crate::MemFlags;
8
9 use rustc_middle::ty::{self, Ty, TyCtxt};
10 use rustc_span::{sym, Span};
11 use rustc_target::abi::call::{FnAbi, PassMode};
12
13 fn copy_intrinsic<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
14     bx: &mut Bx,
15     allow_overlap: bool,
16     volatile: bool,
17     ty: Ty<'tcx>,
18     dst: Bx::Value,
19     src: Bx::Value,
20     count: Bx::Value,
21 ) {
22     let layout = bx.layout_of(ty);
23     let size = layout.size;
24     let align = layout.align.abi;
25     let size = bx.mul(bx.const_usize(size.bytes()), count);
26     let flags = if volatile { MemFlags::VOLATILE } else { MemFlags::empty() };
27     if allow_overlap {
28         bx.memmove(dst, align, src, align, size, flags);
29     } else {
30         bx.memcpy(dst, align, src, align, size, flags);
31     }
32 }
33
34 fn memset_intrinsic<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
35     bx: &mut Bx,
36     volatile: bool,
37     ty: Ty<'tcx>,
38     dst: Bx::Value,
39     val: Bx::Value,
40     count: Bx::Value,
41 ) {
42     let layout = bx.layout_of(ty);
43     let size = layout.size;
44     let align = layout.align.abi;
45     let size = bx.mul(bx.const_usize(size.bytes()), count);
46     let flags = if volatile { MemFlags::VOLATILE } else { MemFlags::empty() };
47     bx.memset(dst, val, size, align, flags);
48 }
49
50 impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
51     pub fn codegen_intrinsic_call(
52         bx: &mut Bx,
53         instance: ty::Instance<'tcx>,
54         fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
55         args: &[OperandRef<'tcx, Bx::Value>],
56         llresult: Bx::Value,
57         span: Span,
58     ) {
59         let callee_ty = instance.ty(bx.tcx(), ty::ParamEnv::reveal_all());
60
61         let (def_id, substs) = match *callee_ty.kind() {
62             ty::FnDef(def_id, substs) => (def_id, substs),
63             _ => bug!("expected fn item type, found {}", callee_ty),
64         };
65
66         let sig = callee_ty.fn_sig(bx.tcx());
67         let sig = bx.tcx().normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), sig);
68         let arg_tys = sig.inputs();
69         let ret_ty = sig.output();
70         let name = bx.tcx().item_name(def_id);
71         let name_str = name.as_str();
72
73         let llret_ty = bx.backend_type(bx.layout_of(ret_ty));
74         let result = PlaceRef::new_sized(llresult, fn_abi.ret.layout);
75
76         let llval = match name {
77             sym::assume => {
78                 bx.assume(args[0].immediate());
79                 return;
80             }
81             sym::abort => {
82                 bx.abort();
83                 return;
84             }
85
86             sym::va_start => bx.va_start(args[0].immediate()),
87             sym::va_end => bx.va_end(args[0].immediate()),
88             sym::size_of_val => {
89                 let tp_ty = substs.type_at(0);
90                 if let OperandValue::Pair(_, meta) = args[0].val {
91                     let (llsize, _) = glue::size_and_align_of_dst(bx, tp_ty, Some(meta));
92                     llsize
93                 } else {
94                     bx.const_usize(bx.layout_of(tp_ty).size.bytes())
95                 }
96             }
97             sym::min_align_of_val => {
98                 let tp_ty = substs.type_at(0);
99                 if let OperandValue::Pair(_, meta) = args[0].val {
100                     let (_, llalign) = glue::size_and_align_of_dst(bx, tp_ty, Some(meta));
101                     llalign
102                 } else {
103                     bx.const_usize(bx.layout_of(tp_ty).align.abi.bytes())
104                 }
105             }
106             sym::pref_align_of
107             | sym::needs_drop
108             | sym::type_id
109             | sym::type_name
110             | sym::variant_count => {
111                 let value = bx
112                     .tcx()
113                     .const_eval_instance(ty::ParamEnv::reveal_all(), instance, None)
114                     .unwrap();
115                 OperandRef::from_const(bx, value, ret_ty).immediate_or_packed_pair(bx)
116             }
117             sym::offset => {
118                 let ty = substs.type_at(0);
119                 let layout = bx.layout_of(ty);
120                 let ptr = args[0].immediate();
121                 let offset = args[1].immediate();
122                 bx.inbounds_gep(bx.backend_type(layout), ptr, &[offset])
123             }
124             sym::arith_offset => {
125                 let ty = substs.type_at(0);
126                 let layout = bx.layout_of(ty);
127                 let ptr = args[0].immediate();
128                 let offset = args[1].immediate();
129                 bx.gep(bx.backend_type(layout), ptr, &[offset])
130             }
131             sym::copy => {
132                 copy_intrinsic(
133                     bx,
134                     true,
135                     false,
136                     substs.type_at(0),
137                     args[1].immediate(),
138                     args[0].immediate(),
139                     args[2].immediate(),
140                 );
141                 return;
142             }
143             sym::write_bytes => {
144                 memset_intrinsic(
145                     bx,
146                     false,
147                     substs.type_at(0),
148                     args[0].immediate(),
149                     args[1].immediate(),
150                     args[2].immediate(),
151                 );
152                 return;
153             }
154
155             sym::volatile_copy_nonoverlapping_memory => {
156                 copy_intrinsic(
157                     bx,
158                     false,
159                     true,
160                     substs.type_at(0),
161                     args[0].immediate(),
162                     args[1].immediate(),
163                     args[2].immediate(),
164                 );
165                 return;
166             }
167             sym::volatile_copy_memory => {
168                 copy_intrinsic(
169                     bx,
170                     true,
171                     true,
172                     substs.type_at(0),
173                     args[0].immediate(),
174                     args[1].immediate(),
175                     args[2].immediate(),
176                 );
177                 return;
178             }
179             sym::volatile_set_memory => {
180                 memset_intrinsic(
181                     bx,
182                     true,
183                     substs.type_at(0),
184                     args[0].immediate(),
185                     args[1].immediate(),
186                     args[2].immediate(),
187                 );
188                 return;
189             }
190             sym::volatile_store => {
191                 let dst = args[0].deref(bx.cx());
192                 args[1].val.volatile_store(bx, dst);
193                 return;
194             }
195             sym::unaligned_volatile_store => {
196                 let dst = args[0].deref(bx.cx());
197                 args[1].val.unaligned_volatile_store(bx, dst);
198                 return;
199             }
200             sym::add_with_overflow
201             | sym::sub_with_overflow
202             | sym::mul_with_overflow
203             | sym::unchecked_div
204             | sym::unchecked_rem
205             | sym::unchecked_shl
206             | sym::unchecked_shr
207             | sym::unchecked_add
208             | sym::unchecked_sub
209             | sym::unchecked_mul
210             | sym::exact_div => {
211                 let ty = arg_tys[0];
212                 match int_type_width_signed(ty, bx.tcx()) {
213                     Some((_width, signed)) => match name {
214                         sym::add_with_overflow
215                         | sym::sub_with_overflow
216                         | sym::mul_with_overflow => {
217                             let op = match name {
218                                 sym::add_with_overflow => OverflowOp::Add,
219                                 sym::sub_with_overflow => OverflowOp::Sub,
220                                 sym::mul_with_overflow => OverflowOp::Mul,
221                                 _ => bug!(),
222                             };
223                             let (val, overflow) =
224                                 bx.checked_binop(op, ty, args[0].immediate(), args[1].immediate());
225                             // Convert `i1` to a `bool`, and write it to the out parameter
226                             let val = bx.from_immediate(val);
227                             let overflow = bx.from_immediate(overflow);
228
229                             let dest = result.project_field(bx, 0);
230                             bx.store(val, dest.llval, dest.align);
231                             let dest = result.project_field(bx, 1);
232                             bx.store(overflow, dest.llval, dest.align);
233
234                             return;
235                         }
236                         sym::exact_div => {
237                             if signed {
238                                 bx.exactsdiv(args[0].immediate(), args[1].immediate())
239                             } else {
240                                 bx.exactudiv(args[0].immediate(), args[1].immediate())
241                             }
242                         }
243                         sym::unchecked_div => {
244                             if signed {
245                                 bx.sdiv(args[0].immediate(), args[1].immediate())
246                             } else {
247                                 bx.udiv(args[0].immediate(), args[1].immediate())
248                             }
249                         }
250                         sym::unchecked_rem => {
251                             if signed {
252                                 bx.srem(args[0].immediate(), args[1].immediate())
253                             } else {
254                                 bx.urem(args[0].immediate(), args[1].immediate())
255                             }
256                         }
257                         sym::unchecked_shl => bx.shl(args[0].immediate(), args[1].immediate()),
258                         sym::unchecked_shr => {
259                             if signed {
260                                 bx.ashr(args[0].immediate(), args[1].immediate())
261                             } else {
262                                 bx.lshr(args[0].immediate(), args[1].immediate())
263                             }
264                         }
265                         sym::unchecked_add => {
266                             if signed {
267                                 bx.unchecked_sadd(args[0].immediate(), args[1].immediate())
268                             } else {
269                                 bx.unchecked_uadd(args[0].immediate(), args[1].immediate())
270                             }
271                         }
272                         sym::unchecked_sub => {
273                             if signed {
274                                 bx.unchecked_ssub(args[0].immediate(), args[1].immediate())
275                             } else {
276                                 bx.unchecked_usub(args[0].immediate(), args[1].immediate())
277                             }
278                         }
279                         sym::unchecked_mul => {
280                             if signed {
281                                 bx.unchecked_smul(args[0].immediate(), args[1].immediate())
282                             } else {
283                                 bx.unchecked_umul(args[0].immediate(), args[1].immediate())
284                             }
285                         }
286                         _ => bug!(),
287                     },
288                     None => {
289                         span_invalid_monomorphization_error(
290                             bx.tcx().sess,
291                             span,
292                             &format!(
293                                 "invalid monomorphization of `{}` intrinsic: \
294                                       expected basic integer type, found `{}`",
295                                 name, ty
296                             ),
297                         );
298                         return;
299                     }
300                 }
301             }
302             sym::fadd_fast | sym::fsub_fast | sym::fmul_fast | sym::fdiv_fast | sym::frem_fast => {
303                 match float_type_width(arg_tys[0]) {
304                     Some(_width) => match name {
305                         sym::fadd_fast => bx.fadd_fast(args[0].immediate(), args[1].immediate()),
306                         sym::fsub_fast => bx.fsub_fast(args[0].immediate(), args[1].immediate()),
307                         sym::fmul_fast => bx.fmul_fast(args[0].immediate(), args[1].immediate()),
308                         sym::fdiv_fast => bx.fdiv_fast(args[0].immediate(), args[1].immediate()),
309                         sym::frem_fast => bx.frem_fast(args[0].immediate(), args[1].immediate()),
310                         _ => bug!(),
311                     },
312                     None => {
313                         span_invalid_monomorphization_error(
314                             bx.tcx().sess,
315                             span,
316                             &format!(
317                                 "invalid monomorphization of `{}` intrinsic: \
318                                       expected basic float type, found `{}`",
319                                 name, arg_tys[0]
320                             ),
321                         );
322                         return;
323                     }
324                 }
325             }
326
327             sym::float_to_int_unchecked => {
328                 if float_type_width(arg_tys[0]).is_none() {
329                     span_invalid_monomorphization_error(
330                         bx.tcx().sess,
331                         span,
332                         &format!(
333                             "invalid monomorphization of `float_to_int_unchecked` \
334                                   intrinsic: expected basic float type, \
335                                   found `{}`",
336                             arg_tys[0]
337                         ),
338                     );
339                     return;
340                 }
341                 let (_width, signed) = match int_type_width_signed(ret_ty, bx.tcx()) {
342                     Some(pair) => pair,
343                     None => {
344                         span_invalid_monomorphization_error(
345                             bx.tcx().sess,
346                             span,
347                             &format!(
348                                 "invalid monomorphization of `float_to_int_unchecked` \
349                                       intrinsic:  expected basic integer type, \
350                                       found `{}`",
351                                 ret_ty
352                             ),
353                         );
354                         return;
355                     }
356                 };
357                 if signed {
358                     bx.fptosi(args[0].immediate(), llret_ty)
359                 } else {
360                     bx.fptoui(args[0].immediate(), llret_ty)
361                 }
362             }
363
364             sym::discriminant_value => {
365                 if ret_ty.is_integral() {
366                     args[0].deref(bx.cx()).codegen_get_discr(bx, ret_ty)
367                 } else {
368                     span_bug!(span, "Invalid discriminant type for `{:?}`", arg_tys[0])
369                 }
370             }
371
372             sym::const_allocate => {
373                 // returns a null pointer at runtime.
374                 bx.const_null(bx.type_i8p())
375             }
376
377             sym::const_deallocate => {
378                 // nop at runtime.
379                 return;
380             }
381
382             // This requires that atomic intrinsics follow a specific naming pattern:
383             // "atomic_<operation>[_<ordering>]", and no ordering means SeqCst
384             name if name_str.starts_with("atomic_") => {
385                 use crate::common::AtomicOrdering::*;
386                 use crate::common::{AtomicRmwBinOp, SynchronizationScope};
387
388                 let split: Vec<_> = name_str.split('_').collect();
389
390                 let is_cxchg = split[1] == "cxchg" || split[1] == "cxchgweak";
391                 let (order, failorder) = match split.len() {
392                     2 => (SequentiallyConsistent, SequentiallyConsistent),
393                     3 => match split[2] {
394                         "unordered" => (Unordered, Unordered),
395                         "relaxed" => (Monotonic, Monotonic),
396                         "acq" => (Acquire, Acquire),
397                         "rel" => (Release, Monotonic),
398                         "acqrel" => (AcquireRelease, Acquire),
399                         "failrelaxed" if is_cxchg => (SequentiallyConsistent, Monotonic),
400                         "failacq" if is_cxchg => (SequentiallyConsistent, Acquire),
401                         _ => bx.sess().fatal("unknown ordering in atomic intrinsic"),
402                     },
403                     4 => match (split[2], split[3]) {
404                         ("acq", "failrelaxed") if is_cxchg => (Acquire, Monotonic),
405                         ("acqrel", "failrelaxed") if is_cxchg => (AcquireRelease, Monotonic),
406                         _ => bx.sess().fatal("unknown ordering in atomic intrinsic"),
407                     },
408                     _ => bx.sess().fatal("Atomic intrinsic not in correct format"),
409                 };
410
411                 let invalid_monomorphization = |ty| {
412                     span_invalid_monomorphization_error(
413                         bx.tcx().sess,
414                         span,
415                         &format!(
416                             "invalid monomorphization of `{}` intrinsic: \
417                                   expected basic integer type, found `{}`",
418                             name, ty
419                         ),
420                     );
421                 };
422
423                 match split[1] {
424                     "cxchg" | "cxchgweak" => {
425                         let ty = substs.type_at(0);
426                         if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_unsafe_ptr() {
427                             let weak = split[1] == "cxchgweak";
428                             let mut dst = args[0].immediate();
429                             let mut cmp = args[1].immediate();
430                             let mut src = args[2].immediate();
431                             if ty.is_unsafe_ptr() {
432                                 // Some platforms do not support atomic operations on pointers,
433                                 // so we cast to integer first.
434                                 let ptr_llty = bx.type_ptr_to(bx.type_isize());
435                                 dst = bx.pointercast(dst, ptr_llty);
436                                 cmp = bx.ptrtoint(cmp, bx.type_isize());
437                                 src = bx.ptrtoint(src, bx.type_isize());
438                             }
439                             let pair = bx.atomic_cmpxchg(dst, cmp, src, order, failorder, weak);
440                             let val = bx.extract_value(pair, 0);
441                             let success = bx.extract_value(pair, 1);
442                             let val = bx.from_immediate(val);
443                             let success = bx.from_immediate(success);
444
445                             let dest = result.project_field(bx, 0);
446                             bx.store(val, dest.llval, dest.align);
447                             let dest = result.project_field(bx, 1);
448                             bx.store(success, dest.llval, dest.align);
449                             return;
450                         } else {
451                             return invalid_monomorphization(ty);
452                         }
453                     }
454
455                     "load" => {
456                         let ty = substs.type_at(0);
457                         if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_unsafe_ptr() {
458                             let layout = bx.layout_of(ty);
459                             let size = layout.size;
460                             let mut source = args[0].immediate();
461                             if ty.is_unsafe_ptr() {
462                                 // Some platforms do not support atomic operations on pointers,
463                                 // so we cast to integer first...
464                                 let llty = bx.type_isize();
465                                 let ptr_llty = bx.type_ptr_to(llty);
466                                 source = bx.pointercast(source, ptr_llty);
467                                 let result = bx.atomic_load(llty, source, order, size);
468                                 // ... and then cast the result back to a pointer
469                                 bx.inttoptr(result, bx.backend_type(layout))
470                             } else {
471                                 bx.atomic_load(bx.backend_type(layout), source, order, size)
472                             }
473                         } else {
474                             return invalid_monomorphization(ty);
475                         }
476                     }
477
478                     "store" => {
479                         let ty = substs.type_at(0);
480                         if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_unsafe_ptr() {
481                             let size = bx.layout_of(ty).size;
482                             let mut val = args[1].immediate();
483                             let mut ptr = args[0].immediate();
484                             if ty.is_unsafe_ptr() {
485                                 // Some platforms do not support atomic operations on pointers,
486                                 // so we cast to integer first.
487                                 let ptr_llty = bx.type_ptr_to(bx.type_isize());
488                                 ptr = bx.pointercast(ptr, ptr_llty);
489                                 val = bx.ptrtoint(val, bx.type_isize());
490                             }
491                             bx.atomic_store(val, ptr, order, size);
492                             return;
493                         } else {
494                             return invalid_monomorphization(ty);
495                         }
496                     }
497
498                     "fence" => {
499                         bx.atomic_fence(order, SynchronizationScope::CrossThread);
500                         return;
501                     }
502
503                     "singlethreadfence" => {
504                         bx.atomic_fence(order, SynchronizationScope::SingleThread);
505                         return;
506                     }
507
508                     // These are all AtomicRMW ops
509                     op => {
510                         let atom_op = match op {
511                             "xchg" => AtomicRmwBinOp::AtomicXchg,
512                             "xadd" => AtomicRmwBinOp::AtomicAdd,
513                             "xsub" => AtomicRmwBinOp::AtomicSub,
514                             "and" => AtomicRmwBinOp::AtomicAnd,
515                             "nand" => AtomicRmwBinOp::AtomicNand,
516                             "or" => AtomicRmwBinOp::AtomicOr,
517                             "xor" => AtomicRmwBinOp::AtomicXor,
518                             "max" => AtomicRmwBinOp::AtomicMax,
519                             "min" => AtomicRmwBinOp::AtomicMin,
520                             "umax" => AtomicRmwBinOp::AtomicUMax,
521                             "umin" => AtomicRmwBinOp::AtomicUMin,
522                             _ => bx.sess().fatal("unknown atomic operation"),
523                         };
524
525                         let ty = substs.type_at(0);
526                         if int_type_width_signed(ty, bx.tcx()).is_some()
527                             || (ty.is_unsafe_ptr() && op == "xchg")
528                         {
529                             let mut ptr = args[0].immediate();
530                             let mut val = args[1].immediate();
531                             if ty.is_unsafe_ptr() {
532                                 // Some platforms do not support atomic operations on pointers,
533                                 // so we cast to integer first.
534                                 let ptr_llty = bx.type_ptr_to(bx.type_isize());
535                                 ptr = bx.pointercast(ptr, ptr_llty);
536                                 val = bx.ptrtoint(val, bx.type_isize());
537                             }
538                             bx.atomic_rmw(atom_op, ptr, val, order)
539                         } else {
540                             return invalid_monomorphization(ty);
541                         }
542                     }
543                 }
544             }
545
546             sym::nontemporal_store => {
547                 let dst = args[0].deref(bx.cx());
548                 args[1].val.nontemporal_store(bx, dst);
549                 return;
550             }
551
552             sym::ptr_guaranteed_eq | sym::ptr_guaranteed_ne => {
553                 let a = args[0].immediate();
554                 let b = args[1].immediate();
555                 if name == sym::ptr_guaranteed_eq {
556                     bx.icmp(IntPredicate::IntEQ, a, b)
557                 } else {
558                     bx.icmp(IntPredicate::IntNE, a, b)
559                 }
560             }
561
562             sym::ptr_offset_from => {
563                 let ty = substs.type_at(0);
564                 let pointee_size = bx.layout_of(ty).size;
565
566                 // This is the same sequence that Clang emits for pointer subtraction.
567                 // It can be neither `nsw` nor `nuw` because the input is treated as
568                 // unsigned but then the output is treated as signed, so neither works.
569                 let a = args[0].immediate();
570                 let b = args[1].immediate();
571                 let a = bx.ptrtoint(a, bx.type_isize());
572                 let b = bx.ptrtoint(b, bx.type_isize());
573                 let d = bx.sub(a, b);
574                 let pointee_size = bx.const_usize(pointee_size.bytes());
575                 // this is where the signed magic happens (notice the `s` in `exactsdiv`)
576                 bx.exactsdiv(d, pointee_size)
577             }
578
579             _ => {
580                 // Need to use backend-specific things in the implementation.
581                 bx.codegen_intrinsic_call(instance, fn_abi, args, llresult, span);
582                 return;
583             }
584         };
585
586         if !fn_abi.ret.is_ignore() {
587             if let PassMode::Cast(ty) = fn_abi.ret.mode {
588                 let ptr_llty = bx.type_ptr_to(bx.cast_backend_type(&ty));
589                 let ptr = bx.pointercast(result.llval, ptr_llty);
590                 bx.store(llval, ptr, result.align);
591             } else {
592                 OperandRef::from_immediate_or_packed_pair(bx, llval, result.layout)
593                     .val
594                     .store(bx, result);
595             }
596         }
597     }
598 }
599
600 // Returns the width of an int Ty, and if it's signed or not
601 // Returns None if the type is not an integer
602 // FIXME: there’s multiple of this functions, investigate using some of the already existing
603 // stuffs.
604 fn int_type_width_signed(ty: Ty<'_>, tcx: TyCtxt<'_>) -> Option<(u64, bool)> {
605     match ty.kind() {
606         ty::Int(t) => {
607             Some((t.bit_width().unwrap_or(u64::from(tcx.sess.target.pointer_width)), true))
608         }
609         ty::Uint(t) => {
610             Some((t.bit_width().unwrap_or(u64::from(tcx.sess.target.pointer_width)), false))
611         }
612         _ => None,
613     }
614 }
615
616 // Returns the width of a float Ty
617 // Returns None if the type is not a float
618 fn float_type_width(ty: Ty<'_>) -> Option<u64> {
619     match ty.kind() {
620         ty::Float(t) => Some(t.bit_width()),
621         _ => None,
622     }
623 }