]> git.lizzy.rs Git - rust.git/blob - compiler/rustc_codegen_ssa/src/mir/intrinsic.rs
Rollup merge of #105955 - Nilstrieb:no-trivial-opt-wrappers-we-have-field-accesses...
[rust.git] / compiler / rustc_codegen_ssa / src / mir / intrinsic.rs
1 use super::operand::{OperandRef, OperandValue};
2 use super::place::PlaceRef;
3 use super::FunctionCx;
4 use crate::common::{span_invalid_monomorphization_error, IntPredicate};
5 use crate::glue;
6 use crate::meth;
7 use crate::traits::*;
8 use crate::MemFlags;
9
10 use rustc_middle::ty::{self, Ty, TyCtxt};
11 use rustc_span::{sym, Span};
12 use rustc_target::abi::{
13     call::{FnAbi, PassMode},
14     WrappingRange,
15 };
16
17 fn copy_intrinsic<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
18     bx: &mut Bx,
19     allow_overlap: bool,
20     volatile: bool,
21     ty: Ty<'tcx>,
22     dst: Bx::Value,
23     src: Bx::Value,
24     count: Bx::Value,
25 ) {
26     let layout = bx.layout_of(ty);
27     let size = layout.size;
28     let align = layout.align.abi;
29     let size = bx.mul(bx.const_usize(size.bytes()), count);
30     let flags = if volatile { MemFlags::VOLATILE } else { MemFlags::empty() };
31     if allow_overlap {
32         bx.memmove(dst, align, src, align, size, flags);
33     } else {
34         bx.memcpy(dst, align, src, align, size, flags);
35     }
36 }
37
38 fn memset_intrinsic<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
39     bx: &mut Bx,
40     volatile: bool,
41     ty: Ty<'tcx>,
42     dst: Bx::Value,
43     val: Bx::Value,
44     count: Bx::Value,
45 ) {
46     let layout = bx.layout_of(ty);
47     let size = layout.size;
48     let align = layout.align.abi;
49     let size = bx.mul(bx.const_usize(size.bytes()), count);
50     let flags = if volatile { MemFlags::VOLATILE } else { MemFlags::empty() };
51     bx.memset(dst, val, size, align, flags);
52 }
53
54 impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
55     pub fn codegen_intrinsic_call(
56         bx: &mut Bx,
57         instance: ty::Instance<'tcx>,
58         fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
59         args: &[OperandRef<'tcx, Bx::Value>],
60         llresult: Bx::Value,
61         span: Span,
62     ) {
63         let callee_ty = instance.ty(bx.tcx(), ty::ParamEnv::reveal_all());
64
65         let ty::FnDef(def_id, substs) = *callee_ty.kind() else {
66             bug!("expected fn item type, found {}", callee_ty);
67         };
68
69         let sig = callee_ty.fn_sig(bx.tcx());
70         let sig = bx.tcx().normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), sig);
71         let arg_tys = sig.inputs();
72         let ret_ty = sig.output();
73         let name = bx.tcx().item_name(def_id);
74         let name_str = name.as_str();
75
76         let llret_ty = bx.backend_type(bx.layout_of(ret_ty));
77         let result = PlaceRef::new_sized(llresult, fn_abi.ret.layout);
78
79         let llval = match name {
80             sym::abort => {
81                 bx.abort();
82                 return;
83             }
84
85             sym::va_start => bx.va_start(args[0].immediate()),
86             sym::va_end => bx.va_end(args[0].immediate()),
87             sym::size_of_val => {
88                 let tp_ty = substs.type_at(0);
89                 if let OperandValue::Pair(_, meta) = args[0].val {
90                     let (llsize, _) = glue::size_and_align_of_dst(bx, tp_ty, Some(meta));
91                     llsize
92                 } else {
93                     bx.const_usize(bx.layout_of(tp_ty).size.bytes())
94                 }
95             }
96             sym::min_align_of_val => {
97                 let tp_ty = substs.type_at(0);
98                 if let OperandValue::Pair(_, meta) = args[0].val {
99                     let (_, llalign) = glue::size_and_align_of_dst(bx, tp_ty, Some(meta));
100                     llalign
101                 } else {
102                     bx.const_usize(bx.layout_of(tp_ty).align.abi.bytes())
103                 }
104             }
105             sym::vtable_size | sym::vtable_align => {
106                 let vtable = args[0].immediate();
107                 let idx = match name {
108                     sym::vtable_size => ty::COMMON_VTABLE_ENTRIES_SIZE,
109                     sym::vtable_align => ty::COMMON_VTABLE_ENTRIES_ALIGN,
110                     _ => bug!(),
111                 };
112                 let value = meth::VirtualIndex::from_index(idx).get_usize(bx, vtable);
113                 match name {
114                     // Size is always <= isize::MAX.
115                     sym::vtable_size => {
116                         let size_bound = bx.data_layout().ptr_sized_integer().signed_max() as u128;
117                         bx.range_metadata(value, WrappingRange { start: 0, end: size_bound });
118                     },
119                     // Alignment is always nonzero.
120                     sym::vtable_align => bx.range_metadata(value, WrappingRange { start: 1, end: !0 }),
121                     _ => {}
122                 }
123                 value
124             }
125             sym::pref_align_of
126             | sym::needs_drop
127             | sym::type_id
128             | sym::type_name
129             | sym::variant_count => {
130                 let value = bx
131                     .tcx()
132                     .const_eval_instance(ty::ParamEnv::reveal_all(), instance, None)
133                     .unwrap();
134                 OperandRef::from_const(bx, value, ret_ty).immediate_or_packed_pair(bx)
135             }
136             sym::offset => {
137                 let ty = substs.type_at(0);
138                 let layout = bx.layout_of(ty);
139                 let ptr = args[0].immediate();
140                 let offset = args[1].immediate();
141                 bx.inbounds_gep(bx.backend_type(layout), ptr, &[offset])
142             }
143             sym::arith_offset => {
144                 let ty = substs.type_at(0);
145                 let layout = bx.layout_of(ty);
146                 let ptr = args[0].immediate();
147                 let offset = args[1].immediate();
148                 bx.gep(bx.backend_type(layout), ptr, &[offset])
149             }
150             sym::copy => {
151                 copy_intrinsic(
152                     bx,
153                     true,
154                     false,
155                     substs.type_at(0),
156                     args[1].immediate(),
157                     args[0].immediate(),
158                     args[2].immediate(),
159                 );
160                 return;
161             }
162             sym::write_bytes => {
163                 memset_intrinsic(
164                     bx,
165                     false,
166                     substs.type_at(0),
167                     args[0].immediate(),
168                     args[1].immediate(),
169                     args[2].immediate(),
170                 );
171                 return;
172             }
173
174             sym::volatile_copy_nonoverlapping_memory => {
175                 copy_intrinsic(
176                     bx,
177                     false,
178                     true,
179                     substs.type_at(0),
180                     args[0].immediate(),
181                     args[1].immediate(),
182                     args[2].immediate(),
183                 );
184                 return;
185             }
186             sym::volatile_copy_memory => {
187                 copy_intrinsic(
188                     bx,
189                     true,
190                     true,
191                     substs.type_at(0),
192                     args[0].immediate(),
193                     args[1].immediate(),
194                     args[2].immediate(),
195                 );
196                 return;
197             }
198             sym::volatile_set_memory => {
199                 memset_intrinsic(
200                     bx,
201                     true,
202                     substs.type_at(0),
203                     args[0].immediate(),
204                     args[1].immediate(),
205                     args[2].immediate(),
206                 );
207                 return;
208             }
209             sym::volatile_store => {
210                 let dst = args[0].deref(bx.cx());
211                 args[1].val.volatile_store(bx, dst);
212                 return;
213             }
214             sym::unaligned_volatile_store => {
215                 let dst = args[0].deref(bx.cx());
216                 args[1].val.unaligned_volatile_store(bx, dst);
217                 return;
218             }
219             sym::add_with_overflow
220             | sym::sub_with_overflow
221             | sym::mul_with_overflow
222             | sym::unchecked_div
223             | sym::unchecked_rem
224             | sym::unchecked_shl
225             | sym::unchecked_shr
226             | sym::unchecked_add
227             | sym::unchecked_sub
228             | sym::unchecked_mul
229             | sym::exact_div => {
230                 let ty = arg_tys[0];
231                 match int_type_width_signed(ty, bx.tcx()) {
232                     Some((_width, signed)) => match name {
233                         sym::add_with_overflow
234                         | sym::sub_with_overflow
235                         | sym::mul_with_overflow => {
236                             let op = match name {
237                                 sym::add_with_overflow => OverflowOp::Add,
238                                 sym::sub_with_overflow => OverflowOp::Sub,
239                                 sym::mul_with_overflow => OverflowOp::Mul,
240                                 _ => bug!(),
241                             };
242                             let (val, overflow) =
243                                 bx.checked_binop(op, ty, args[0].immediate(), args[1].immediate());
244                             // Convert `i1` to a `bool`, and write it to the out parameter
245                             let val = bx.from_immediate(val);
246                             let overflow = bx.from_immediate(overflow);
247
248                             let dest = result.project_field(bx, 0);
249                             bx.store(val, dest.llval, dest.align);
250                             let dest = result.project_field(bx, 1);
251                             bx.store(overflow, dest.llval, dest.align);
252
253                             return;
254                         }
255                         sym::exact_div => {
256                             if signed {
257                                 bx.exactsdiv(args[0].immediate(), args[1].immediate())
258                             } else {
259                                 bx.exactudiv(args[0].immediate(), args[1].immediate())
260                             }
261                         }
262                         sym::unchecked_div => {
263                             if signed {
264                                 bx.sdiv(args[0].immediate(), args[1].immediate())
265                             } else {
266                                 bx.udiv(args[0].immediate(), args[1].immediate())
267                             }
268                         }
269                         sym::unchecked_rem => {
270                             if signed {
271                                 bx.srem(args[0].immediate(), args[1].immediate())
272                             } else {
273                                 bx.urem(args[0].immediate(), args[1].immediate())
274                             }
275                         }
276                         sym::unchecked_shl => bx.shl(args[0].immediate(), args[1].immediate()),
277                         sym::unchecked_shr => {
278                             if signed {
279                                 bx.ashr(args[0].immediate(), args[1].immediate())
280                             } else {
281                                 bx.lshr(args[0].immediate(), args[1].immediate())
282                             }
283                         }
284                         sym::unchecked_add => {
285                             if signed {
286                                 bx.unchecked_sadd(args[0].immediate(), args[1].immediate())
287                             } else {
288                                 bx.unchecked_uadd(args[0].immediate(), args[1].immediate())
289                             }
290                         }
291                         sym::unchecked_sub => {
292                             if signed {
293                                 bx.unchecked_ssub(args[0].immediate(), args[1].immediate())
294                             } else {
295                                 bx.unchecked_usub(args[0].immediate(), args[1].immediate())
296                             }
297                         }
298                         sym::unchecked_mul => {
299                             if signed {
300                                 bx.unchecked_smul(args[0].immediate(), args[1].immediate())
301                             } else {
302                                 bx.unchecked_umul(args[0].immediate(), args[1].immediate())
303                             }
304                         }
305                         _ => bug!(),
306                     },
307                     None => {
308                         span_invalid_monomorphization_error(
309                             bx.tcx().sess,
310                             span,
311                             &format!(
312                                 "invalid monomorphization of `{}` intrinsic: \
313                                       expected basic integer type, found `{}`",
314                                 name, ty
315                             ),
316                         );
317                         return;
318                     }
319                 }
320             }
321             sym::fadd_fast | sym::fsub_fast | sym::fmul_fast | sym::fdiv_fast | sym::frem_fast => {
322                 match float_type_width(arg_tys[0]) {
323                     Some(_width) => match name {
324                         sym::fadd_fast => bx.fadd_fast(args[0].immediate(), args[1].immediate()),
325                         sym::fsub_fast => bx.fsub_fast(args[0].immediate(), args[1].immediate()),
326                         sym::fmul_fast => bx.fmul_fast(args[0].immediate(), args[1].immediate()),
327                         sym::fdiv_fast => bx.fdiv_fast(args[0].immediate(), args[1].immediate()),
328                         sym::frem_fast => bx.frem_fast(args[0].immediate(), args[1].immediate()),
329                         _ => bug!(),
330                     },
331                     None => {
332                         span_invalid_monomorphization_error(
333                             bx.tcx().sess,
334                             span,
335                             &format!(
336                                 "invalid monomorphization of `{}` intrinsic: \
337                                       expected basic float type, found `{}`",
338                                 name, arg_tys[0]
339                             ),
340                         );
341                         return;
342                     }
343                 }
344             }
345
346             sym::float_to_int_unchecked => {
347                 if float_type_width(arg_tys[0]).is_none() {
348                     span_invalid_monomorphization_error(
349                         bx.tcx().sess,
350                         span,
351                         &format!(
352                             "invalid monomorphization of `float_to_int_unchecked` \
353                                   intrinsic: expected basic float type, \
354                                   found `{}`",
355                             arg_tys[0]
356                         ),
357                     );
358                     return;
359                 }
360                 let Some((_width, signed)) = int_type_width_signed(ret_ty, bx.tcx()) else {
361                     span_invalid_monomorphization_error(
362                         bx.tcx().sess,
363                         span,
364                         &format!(
365                             "invalid monomorphization of `float_to_int_unchecked` \
366                                     intrinsic:  expected basic integer type, \
367                                     found `{}`",
368                             ret_ty
369                         ),
370                     );
371                     return;
372                 };
373                 if signed {
374                     bx.fptosi(args[0].immediate(), llret_ty)
375                 } else {
376                     bx.fptoui(args[0].immediate(), llret_ty)
377                 }
378             }
379
380             sym::discriminant_value => {
381                 if ret_ty.is_integral() {
382                     args[0].deref(bx.cx()).codegen_get_discr(bx, ret_ty)
383                 } else {
384                     span_bug!(span, "Invalid discriminant type for `{:?}`", arg_tys[0])
385                 }
386             }
387
388             sym::const_allocate => {
389                 // returns a null pointer at runtime.
390                 bx.const_null(bx.type_i8p())
391             }
392
393             sym::const_deallocate => {
394                 // nop at runtime.
395                 return;
396             }
397
398             // This requires that atomic intrinsics follow a specific naming pattern:
399             // "atomic_<operation>[_<ordering>]"
400             name if let Some(atomic) = name_str.strip_prefix("atomic_") => {
401                 use crate::common::AtomicOrdering::*;
402                 use crate::common::{AtomicRmwBinOp, SynchronizationScope};
403
404                 let Some((instruction, ordering)) = atomic.split_once('_') else {
405                     bx.sess().fatal("Atomic intrinsic missing memory ordering");
406                 };
407
408                 let parse_ordering = |bx: &Bx, s| match s {
409                     "unordered" => Unordered,
410                     "relaxed" => Relaxed,
411                     "acquire" => Acquire,
412                     "release" => Release,
413                     "acqrel" => AcquireRelease,
414                     "seqcst" => SequentiallyConsistent,
415                     _ => bx.sess().fatal("unknown ordering in atomic intrinsic"),
416                 };
417
418                 let invalid_monomorphization = |ty| {
419                     span_invalid_monomorphization_error(
420                         bx.tcx().sess,
421                         span,
422                         &format!(
423                             "invalid monomorphization of `{}` intrinsic: \
424                                   expected basic integer type, found `{}`",
425                             name, ty
426                         ),
427                     );
428                 };
429
430                 match instruction {
431                     "cxchg" | "cxchgweak" => {
432                         let Some((success, failure)) = ordering.split_once('_') else {
433                             bx.sess().fatal("Atomic compare-exchange intrinsic missing failure memory ordering");
434                         };
435                         let ty = substs.type_at(0);
436                         if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_unsafe_ptr() {
437                             let weak = instruction == "cxchgweak";
438                             let mut dst = args[0].immediate();
439                             let mut cmp = args[1].immediate();
440                             let mut src = args[2].immediate();
441                             if ty.is_unsafe_ptr() {
442                                 // Some platforms do not support atomic operations on pointers,
443                                 // so we cast to integer first.
444                                 let ptr_llty = bx.type_ptr_to(bx.type_isize());
445                                 dst = bx.pointercast(dst, ptr_llty);
446                                 cmp = bx.ptrtoint(cmp, bx.type_isize());
447                                 src = bx.ptrtoint(src, bx.type_isize());
448                             }
449                             let pair = bx.atomic_cmpxchg(dst, cmp, src, parse_ordering(bx, success), parse_ordering(bx, failure), weak);
450                             let val = bx.extract_value(pair, 0);
451                             let success = bx.extract_value(pair, 1);
452                             let val = bx.from_immediate(val);
453                             let success = bx.from_immediate(success);
454
455                             let dest = result.project_field(bx, 0);
456                             bx.store(val, dest.llval, dest.align);
457                             let dest = result.project_field(bx, 1);
458                             bx.store(success, dest.llval, dest.align);
459                             return;
460                         } else {
461                             return invalid_monomorphization(ty);
462                         }
463                     }
464
465                     "load" => {
466                         let ty = substs.type_at(0);
467                         if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_unsafe_ptr() {
468                             let layout = bx.layout_of(ty);
469                             let size = layout.size;
470                             let mut source = args[0].immediate();
471                             if ty.is_unsafe_ptr() {
472                                 // Some platforms do not support atomic operations on pointers,
473                                 // so we cast to integer first...
474                                 let llty = bx.type_isize();
475                                 let ptr_llty = bx.type_ptr_to(llty);
476                                 source = bx.pointercast(source, ptr_llty);
477                                 let result = bx.atomic_load(llty, source, parse_ordering(bx, ordering), size);
478                                 // ... and then cast the result back to a pointer
479                                 bx.inttoptr(result, bx.backend_type(layout))
480                             } else {
481                                 bx.atomic_load(bx.backend_type(layout), source, parse_ordering(bx, ordering), size)
482                             }
483                         } else {
484                             return invalid_monomorphization(ty);
485                         }
486                     }
487
488                     "store" => {
489                         let ty = substs.type_at(0);
490                         if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_unsafe_ptr() {
491                             let size = bx.layout_of(ty).size;
492                             let mut val = args[1].immediate();
493                             let mut ptr = args[0].immediate();
494                             if ty.is_unsafe_ptr() {
495                                 // Some platforms do not support atomic operations on pointers,
496                                 // so we cast to integer first.
497                                 let ptr_llty = bx.type_ptr_to(bx.type_isize());
498                                 ptr = bx.pointercast(ptr, ptr_llty);
499                                 val = bx.ptrtoint(val, bx.type_isize());
500                             }
501                             bx.atomic_store(val, ptr, parse_ordering(bx, ordering), size);
502                             return;
503                         } else {
504                             return invalid_monomorphization(ty);
505                         }
506                     }
507
508                     "fence" => {
509                         bx.atomic_fence(parse_ordering(bx, ordering), SynchronizationScope::CrossThread);
510                         return;
511                     }
512
513                     "singlethreadfence" => {
514                         bx.atomic_fence(parse_ordering(bx, ordering), SynchronizationScope::SingleThread);
515                         return;
516                     }
517
518                     // These are all AtomicRMW ops
519                     op => {
520                         let atom_op = match op {
521                             "xchg" => AtomicRmwBinOp::AtomicXchg,
522                             "xadd" => AtomicRmwBinOp::AtomicAdd,
523                             "xsub" => AtomicRmwBinOp::AtomicSub,
524                             "and" => AtomicRmwBinOp::AtomicAnd,
525                             "nand" => AtomicRmwBinOp::AtomicNand,
526                             "or" => AtomicRmwBinOp::AtomicOr,
527                             "xor" => AtomicRmwBinOp::AtomicXor,
528                             "max" => AtomicRmwBinOp::AtomicMax,
529                             "min" => AtomicRmwBinOp::AtomicMin,
530                             "umax" => AtomicRmwBinOp::AtomicUMax,
531                             "umin" => AtomicRmwBinOp::AtomicUMin,
532                             _ => bx.sess().fatal("unknown atomic operation"),
533                         };
534
535                         let ty = substs.type_at(0);
536                         if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_unsafe_ptr() {
537                             let mut ptr = args[0].immediate();
538                             let mut val = args[1].immediate();
539                             if ty.is_unsafe_ptr() {
540                                 // Some platforms do not support atomic operations on pointers,
541                                 // so we cast to integer first.
542                                 let ptr_llty = bx.type_ptr_to(bx.type_isize());
543                                 ptr = bx.pointercast(ptr, ptr_llty);
544                                 val = bx.ptrtoint(val, bx.type_isize());
545                             }
546                             bx.atomic_rmw(atom_op, ptr, val, parse_ordering(bx, ordering))
547                         } else {
548                             return invalid_monomorphization(ty);
549                         }
550                     }
551                 }
552             }
553
554             sym::nontemporal_store => {
555                 let dst = args[0].deref(bx.cx());
556                 args[1].val.nontemporal_store(bx, dst);
557                 return;
558             }
559
560             sym::ptr_guaranteed_cmp => {
561                 let a = args[0].immediate();
562                 let b = args[1].immediate();
563                 bx.icmp(IntPredicate::IntEQ, a, b)
564             }
565
566             sym::ptr_offset_from | sym::ptr_offset_from_unsigned => {
567                 let ty = substs.type_at(0);
568                 let pointee_size = bx.layout_of(ty).size;
569
570                 let a = args[0].immediate();
571                 let b = args[1].immediate();
572                 let a = bx.ptrtoint(a, bx.type_isize());
573                 let b = bx.ptrtoint(b, bx.type_isize());
574                 let pointee_size = bx.const_usize(pointee_size.bytes());
575                 if name == sym::ptr_offset_from {
576                     // This is the same sequence that Clang emits for pointer subtraction.
577                     // It can be neither `nsw` nor `nuw` because the input is treated as
578                     // unsigned but then the output is treated as signed, so neither works.
579                     let d = bx.sub(a, b);
580                     // this is where the signed magic happens (notice the `s` in `exactsdiv`)
581                     bx.exactsdiv(d, pointee_size)
582                 } else {
583                     // The `_unsigned` version knows the relative ordering of the pointers,
584                     // so can use `sub nuw` and `udiv exact` instead of dealing in signed.
585                     let d = bx.unchecked_usub(a, b);
586                     bx.exactudiv(d, pointee_size)
587                 }
588             }
589
590             _ => {
591                 // Need to use backend-specific things in the implementation.
592                 bx.codegen_intrinsic_call(instance, fn_abi, args, llresult, span);
593                 return;
594             }
595         };
596
597         if !fn_abi.ret.is_ignore() {
598             if let PassMode::Cast(ty, _) = &fn_abi.ret.mode {
599                 let ptr_llty = bx.type_ptr_to(bx.cast_backend_type(ty));
600                 let ptr = bx.pointercast(result.llval, ptr_llty);
601                 bx.store(llval, ptr, result.align);
602             } else {
603                 OperandRef::from_immediate_or_packed_pair(bx, llval, result.layout)
604                     .val
605                     .store(bx, result);
606             }
607         }
608     }
609 }
610
611 // Returns the width of an int Ty, and if it's signed or not
612 // Returns None if the type is not an integer
613 // FIXME: there’s multiple of this functions, investigate using some of the already existing
614 // stuffs.
615 fn int_type_width_signed(ty: Ty<'_>, tcx: TyCtxt<'_>) -> Option<(u64, bool)> {
616     match ty.kind() {
617         ty::Int(t) => {
618             Some((t.bit_width().unwrap_or(u64::from(tcx.sess.target.pointer_width)), true))
619         }
620         ty::Uint(t) => {
621             Some((t.bit_width().unwrap_or(u64::from(tcx.sess.target.pointer_width)), false))
622         }
623         _ => None,
624     }
625 }
626
627 // Returns the width of a float Ty
628 // Returns None if the type is not a float
629 fn float_type_width(ty: Ty<'_>) -> Option<u64> {
630     match ty.kind() {
631         ty::Float(t) => Some(t.bit_width()),
632         _ => None,
633     }
634 }